Update and sync helpers.

This commit is contained in:
Adam Gandelman 2013-11-05 19:53:17 -08:00
parent fd14987261
commit 8d23efe7f5
8 changed files with 291 additions and 48 deletions

View File

@ -385,16 +385,33 @@ class NeutronContext(object):
def ovs_ctxt(self): def ovs_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver', driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager) self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
ovs_ctxt = { ovs_ctxt = {
'core_plugin': driver, 'core_plugin': driver,
'neutron_plugin': 'ovs', 'neutron_plugin': 'ovs',
'neutron_security_groups': self.neutron_security_groups, 'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(), 'local_ip': unit_private_ip(),
'config': config
} }
return ovs_ctxt return ovs_ctxt
def nvp_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
nvp_ctxt = {
'core_plugin': driver,
'neutron_plugin': 'nvp',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': config
}
return nvp_ctxt
def __call__(self): def __call__(self):
self._ensure_packages() self._ensure_packages()
@ -408,6 +425,8 @@ class NeutronContext(object):
if self.plugin == 'ovs': if self.plugin == 'ovs':
ctxt.update(self.ovs_ctxt()) ctxt.update(self.ovs_ctxt())
elif self.plugin == 'nvp':
ctxt.update(self.nvp_ctxt())
self._save_flag_file() self._save_flag_file()
return ctxt return ctxt

View File

@ -34,13 +34,23 @@ def quantum_plugins():
'services': ['quantum-plugin-openvswitch-agent'], 'services': ['quantum-plugin-openvswitch-agent'],
'packages': [[headers_package(), 'openvswitch-datapath-dkms'], 'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
['quantum-plugin-openvswitch-agent']], ['quantum-plugin-openvswitch-agent']],
'server_packages': ['quantum-server',
'quantum-plugin-openvswitch'],
'server_services': ['quantum-server']
}, },
'nvp': { 'nvp': {
'config': '/etc/quantum/plugins/nicira/nvp.ini', 'config': '/etc/quantum/plugins/nicira/nvp.ini',
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
'QuantumPlugin.NvpPluginV2', 'QuantumPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron')],
'services': [], 'services': [],
'packages': [], 'packages': [],
'server_packages': ['quantum-server',
'quantum-plugin-nicira'],
'server_services': ['quantum-server']
} }
} }
@ -60,13 +70,23 @@ def neutron_plugins():
'services': ['neutron-plugin-openvswitch-agent'], 'services': ['neutron-plugin-openvswitch-agent'],
'packages': [[headers_package(), 'openvswitch-datapath-dkms'], 'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
['quantum-plugin-openvswitch-agent']], ['quantum-plugin-openvswitch-agent']],
'server_packages': ['neutron-server',
'neutron-plugin-openvswitch'],
'server_services': ['neutron-server']
}, },
'nvp': { 'nvp': {
'config': '/etc/neutron/plugins/nicira/nvp.ini', 'config': '/etc/neutron/plugins/nicira/nvp.ini',
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
'NeutronPlugin.NvpPluginV2', 'NeutronPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron')],
'services': [], 'services': [],
'packages': [], 'packages': [],
'server_packages': ['neutron-server',
'neutron-plugin-nicira'],
'server_services': ['neutron-server']
} }
} }

View File

@ -13,19 +13,28 @@ from charmhelpers.core.hookenv import (
config, config,
log as juju_log, log as juju_log,
charm_dir, charm_dir,
ERROR,
INFO
) )
from charmhelpers.core.host import ( from charmhelpers.contrib.storage.linux.lvm import (
lsb_release, deactivate_lvm_volume_group,
is_lvm_physical_volume,
remove_lvm_physical_volume,
) )
from charmhelpers.fetch import ( from charmhelpers.core.host import lsb_release, mounts, umount
apt_install, from charmhelpers.fetch import apt_install
) from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
'restricted main multiverse universe')
UBUNTU_OPENSTACK_RELEASE = OrderedDict([ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'), ('oneiric', 'diablo'),
('precise', 'essex'), ('precise', 'essex'),
@ -57,6 +66,8 @@ SWIFT_CODENAMES = OrderedDict([
('1.9.0', 'havana'), ('1.9.0', 'havana'),
]) ])
DEFAULT_LOOPBACK_SIZE = '5G'
def error_out(msg): def error_out(msg):
juju_log("FATAL ERROR: %s" % msg, level='ERROR') juju_log("FATAL ERROR: %s" % msg, level='ERROR')
@ -67,7 +78,7 @@ def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.''' '''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = '' rel = ''
if src == 'distro': if src in ['distro', 'distro-proposed']:
try: try:
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
except KeyError: except KeyError:
@ -202,6 +213,10 @@ def configure_installation_source(rel):
'''Configure apt installation source.''' '''Configure apt installation source.'''
if rel == 'distro': if rel == 'distro':
return return
elif rel == 'distro-proposed':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(DISTRO_PROPOSED % ubuntu_rel)
elif rel[:4] == "ppa:": elif rel[:4] == "ppa:":
src = rel src = rel
subprocess.check_call(["add-apt-repository", "-y", src]) subprocess.check_call(["add-apt-repository", "-y", src])
@ -299,6 +314,62 @@ def openstack_upgrade_available(package):
return apt.version_compare(available_vers, cur_vers) == 1 return apt.version_compare(available_vers, cur_vers) == 1
def ensure_block_device(block_device):
'''
Confirm block_device, create as loopback if necessary.
:param block_device: str: Full path of block device to ensure.
:returns: str: Full path of ensured block device.
'''
_none = ['None', 'none', None]
if (block_device in _none):
error_out('prepare_storage(): Missing required input: '
'block_device=%s.' % block_device, level=ERROR)
if block_device.startswith('/dev/'):
bdev = block_device
elif block_device.startswith('/'):
_bd = block_device.split('|')
if len(_bd) == 2:
bdev, size = _bd
else:
bdev = block_device
size = DEFAULT_LOOPBACK_SIZE
bdev = ensure_loopback_device(bdev, size)
else:
bdev = '/dev/%s' % block_device
if not is_block_device(bdev):
error_out('Failed to locate valid block device at %s' % bdev,
level=ERROR)
return bdev
def clean_storage(block_device):
'''
Ensures a block device is clean. That is:
- unmounted
- any lvm volume groups are deactivated
- any lvm physical device signatures removed
- partition table wiped
:param block_device: str: Full path to block device to clean.
'''
for mp, d in mounts():
if d == block_device:
juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
(d, mp), level=INFO)
umount(mp, persist=True)
if is_lvm_physical_volume(block_device):
deactivate_lvm_volume_group(block_device)
remove_lvm_physical_volume(block_device)
else:
zap_disk(block_device)
def is_ip(address): def is_ip(address):
""" """
Returns True if address is a valid IP address. Returns True if address is a valid IP address.

View File

@ -102,8 +102,12 @@ def get_osds(service):
Return a list of all Ceph Object Storage Daemons Return a list of all Ceph Object Storage Daemons
currently in the cluster currently in the cluster
''' '''
return json.loads(check_output(['ceph', '--id', service, version = ceph_version()
'osd', 'ls', '--format=json'])) if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service,
'osd', 'ls', '--format=json']))
else:
return None
def create_pool(service, name, replicas=2): def create_pool(service, name, replicas=2):
@ -114,7 +118,13 @@ def create_pool(service, name, replicas=2):
return return
# Calculate the number of placement groups based # Calculate the number of placement groups based
# on upstream recommended best practices. # on upstream recommended best practices.
pgnum = (len(get_osds(service)) * 100 / replicas) osds = get_osds(service)
if osds:
pgnum = (len(osds) * 100 / replicas)
else:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
pgnum = 200
cmd = [ cmd = [
'ceph', '--id', service, 'ceph', '--id', service,
'osd', 'pool', 'create', 'osd', 'pool', 'create',
@ -357,3 +367,17 @@ def ensure_ceph_keyring(service, user=None, group=None):
if user and group: if user and group:
check_call(['chown', '%s.%s' % (user, group), keyring]) check_call(['chown', '%s.%s' % (user, group), keyring])
return True return True
def ceph_version():
''' Retrieve the local version of ceph '''
if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v']
output = check_output(cmd)
output = output.split()
if len(output) > 3:
return output[2]
else:
return None
else:
return None

View File

@ -9,6 +9,7 @@ import json
import yaml import yaml
import subprocess import subprocess
import UserDict import UserDict
from subprocess import CalledProcessError
CRITICAL = "CRITICAL" CRITICAL = "CRITICAL"
ERROR = "ERROR" ERROR = "ERROR"
@ -21,7 +22,7 @@ cache = {}
def cached(func): def cached(func):
''' Cache return values for multiple executions of func + args """Cache return values for multiple executions of func + args
For example: For example:
@ -32,7 +33,7 @@ def cached(func):
unit_get('test') unit_get('test')
will cache the result of unit_get + 'test' for future calls. will cache the result of unit_get + 'test' for future calls.
''' """
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
global cache global cache
key = str((func, args, kwargs)) key = str((func, args, kwargs))
@ -46,8 +47,8 @@ def cached(func):
def flush(key): def flush(key):
''' Flushes any entries from function cache where the """Flushes any entries from function cache where the
key is found in the function+args ''' key is found in the function+args """
flush_list = [] flush_list = []
for item in cache: for item in cache:
if key in item: if key in item:
@ -57,7 +58,7 @@ def flush(key):
def log(message, level=None): def log(message, level=None):
"Write a message to the juju log" """Write a message to the juju log"""
command = ['juju-log'] command = ['juju-log']
if level: if level:
command += ['-l', level] command += ['-l', level]
@ -66,7 +67,7 @@ def log(message, level=None):
class Serializable(UserDict.IterableUserDict): class Serializable(UserDict.IterableUserDict):
"Wrapper, an object that can be serialized to yaml or json" """Wrapper, an object that can be serialized to yaml or json"""
def __init__(self, obj): def __init__(self, obj):
# wrap the object # wrap the object
@ -96,11 +97,11 @@ class Serializable(UserDict.IterableUserDict):
self.data = state self.data = state
def json(self): def json(self):
"Serialize the object to json" """Serialize the object to json"""
return json.dumps(self.data) return json.dumps(self.data)
def yaml(self): def yaml(self):
"Serialize the object to yaml" """Serialize the object to yaml"""
return yaml.dump(self.data) return yaml.dump(self.data)
@ -119,38 +120,38 @@ def execution_environment():
def in_relation_hook(): def in_relation_hook():
"Determine whether we're running in a relation hook" """Determine whether we're running in a relation hook"""
return 'JUJU_RELATION' in os.environ return 'JUJU_RELATION' in os.environ
def relation_type(): def relation_type():
"The scope for the current relation hook" """The scope for the current relation hook"""
return os.environ.get('JUJU_RELATION', None) return os.environ.get('JUJU_RELATION', None)
def relation_id(): def relation_id():
"The relation ID for the current relation hook" """The relation ID for the current relation hook"""
return os.environ.get('JUJU_RELATION_ID', None) return os.environ.get('JUJU_RELATION_ID', None)
def local_unit(): def local_unit():
"Local unit ID" """Local unit ID"""
return os.environ['JUJU_UNIT_NAME'] return os.environ['JUJU_UNIT_NAME']
def remote_unit(): def remote_unit():
"The remote unit for the current relation hook" """The remote unit for the current relation hook"""
return os.environ['JUJU_REMOTE_UNIT'] return os.environ['JUJU_REMOTE_UNIT']
def service_name(): def service_name():
"The name service group this unit belongs to" """The name service group this unit belongs to"""
return local_unit().split('/')[0] return local_unit().split('/')[0]
@cached @cached
def config(scope=None): def config(scope=None):
"Juju charm configuration" """Juju charm configuration"""
config_cmd_line = ['config-get'] config_cmd_line = ['config-get']
if scope is not None: if scope is not None:
config_cmd_line.append(scope) config_cmd_line.append(scope)
@ -163,6 +164,7 @@ def config(scope=None):
@cached @cached
def relation_get(attribute=None, unit=None, rid=None): def relation_get(attribute=None, unit=None, rid=None):
"""Get relation information"""
_args = ['relation-get', '--format=json'] _args = ['relation-get', '--format=json']
if rid: if rid:
_args.append('-r') _args.append('-r')
@ -174,9 +176,14 @@ def relation_get(attribute=None, unit=None, rid=None):
return json.loads(subprocess.check_output(_args)) return json.loads(subprocess.check_output(_args))
except ValueError: except ValueError:
return None return None
except CalledProcessError, e:
if e.returncode == 2:
return None
raise
def relation_set(relation_id=None, relation_settings={}, **kwargs): def relation_set(relation_id=None, relation_settings={}, **kwargs):
"""Set relation information for the current unit"""
relation_cmd_line = ['relation-set'] relation_cmd_line = ['relation-set']
if relation_id is not None: if relation_id is not None:
relation_cmd_line.extend(('-r', relation_id)) relation_cmd_line.extend(('-r', relation_id))
@ -192,7 +199,7 @@ def relation_set(relation_id=None, relation_settings={}, **kwargs):
@cached @cached
def relation_ids(reltype=None): def relation_ids(reltype=None):
"A list of relation_ids" """A list of relation_ids"""
reltype = reltype or relation_type() reltype = reltype or relation_type()
relid_cmd_line = ['relation-ids', '--format=json'] relid_cmd_line = ['relation-ids', '--format=json']
if reltype is not None: if reltype is not None:
@ -203,7 +210,7 @@ def relation_ids(reltype=None):
@cached @cached
def related_units(relid=None): def related_units(relid=None):
"A list of related units" """A list of related units"""
relid = relid or relation_id() relid = relid or relation_id()
units_cmd_line = ['relation-list', '--format=json'] units_cmd_line = ['relation-list', '--format=json']
if relid is not None: if relid is not None:
@ -213,7 +220,7 @@ def related_units(relid=None):
@cached @cached
def relation_for_unit(unit=None, rid=None): def relation_for_unit(unit=None, rid=None):
"Get the json represenation of a unit's relation" """Get the json represenation of a unit's relation"""
unit = unit or remote_unit() unit = unit or remote_unit()
relation = relation_get(unit=unit, rid=rid) relation = relation_get(unit=unit, rid=rid)
for key in relation: for key in relation:
@ -225,7 +232,7 @@ def relation_for_unit(unit=None, rid=None):
@cached @cached
def relations_for_id(relid=None): def relations_for_id(relid=None):
"Get relations of a specific relation ID" """Get relations of a specific relation ID"""
relation_data = [] relation_data = []
relid = relid or relation_ids() relid = relid or relation_ids()
for unit in related_units(relid): for unit in related_units(relid):
@ -237,7 +244,7 @@ def relations_for_id(relid=None):
@cached @cached
def relations_of_type(reltype=None): def relations_of_type(reltype=None):
"Get relations of a specific type" """Get relations of a specific type"""
relation_data = [] relation_data = []
reltype = reltype or relation_type() reltype = reltype or relation_type()
for relid in relation_ids(reltype): for relid in relation_ids(reltype):
@ -249,7 +256,7 @@ def relations_of_type(reltype=None):
@cached @cached
def relation_types(): def relation_types():
"Get a list of relation types supported by this charm" """Get a list of relation types supported by this charm"""
charmdir = os.environ.get('CHARM_DIR', '') charmdir = os.environ.get('CHARM_DIR', '')
mdf = open(os.path.join(charmdir, 'metadata.yaml')) mdf = open(os.path.join(charmdir, 'metadata.yaml'))
md = yaml.safe_load(mdf) md = yaml.safe_load(mdf)
@ -264,6 +271,7 @@ def relation_types():
@cached @cached
def relations(): def relations():
"""Get a nested dictionary of relation data for all related units"""
rels = {} rels = {}
for reltype in relation_types(): for reltype in relation_types():
relids = {} relids = {}
@ -277,15 +285,35 @@ def relations():
return rels return rels
@cached
def is_relation_made(relation, keys='private-address'):
'''
Determine whether a relation is established by checking for
presence of key(s). If a list of keys is provided, they
must all be present for the relation to be identified as made
'''
if isinstance(keys, str):
keys = [keys]
for r_id in relation_ids(relation):
for unit in related_units(r_id):
context = {}
for k in keys:
context[k] = relation_get(k, rid=r_id,
unit=unit)
if None not in context.values():
return True
return False
def open_port(port, protocol="TCP"): def open_port(port, protocol="TCP"):
"Open a service network port" """Open a service network port"""
_args = ['open-port'] _args = ['open-port']
_args.append('{}/{}'.format(port, protocol)) _args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args) subprocess.check_call(_args)
def close_port(port, protocol="TCP"): def close_port(port, protocol="TCP"):
"Close a service network port" """Close a service network port"""
_args = ['close-port'] _args = ['close-port']
_args.append('{}/{}'.format(port, protocol)) _args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args) subprocess.check_call(_args)
@ -293,6 +321,7 @@ def close_port(port, protocol="TCP"):
@cached @cached
def unit_get(attribute): def unit_get(attribute):
"""Get the unit ID for the remote unit"""
_args = ['unit-get', '--format=json', attribute] _args = ['unit-get', '--format=json', attribute]
try: try:
return json.loads(subprocess.check_output(_args)) return json.loads(subprocess.check_output(_args))
@ -301,22 +330,46 @@ def unit_get(attribute):
def unit_private_ip(): def unit_private_ip():
"""Get this unit's private IP address"""
return unit_get('private-address') return unit_get('private-address')
class UnregisteredHookError(Exception): class UnregisteredHookError(Exception):
"""Raised when an undefined hook is called"""
pass pass
class Hooks(object): class Hooks(object):
"""A convenient handler for hook functions.
Example:
hooks = Hooks()
# register a hook, taking its name from the function name
@hooks.hook()
def install():
...
# register a hook, providing a custom hook name
@hooks.hook("config-changed")
def config_changed():
...
if __name__ == "__main__":
# execute a hook based on the name the program is called by
hooks.execute(sys.argv)
"""
def __init__(self): def __init__(self):
super(Hooks, self).__init__() super(Hooks, self).__init__()
self._hooks = {} self._hooks = {}
def register(self, name, function): def register(self, name, function):
"""Register a hook"""
self._hooks[name] = function self._hooks[name] = function
def execute(self, args): def execute(self, args):
"""Execute a registered hook based on args[0]"""
hook_name = os.path.basename(args[0]) hook_name = os.path.basename(args[0])
if hook_name in self._hooks: if hook_name in self._hooks:
self._hooks[hook_name]() self._hooks[hook_name]()
@ -324,6 +377,7 @@ class Hooks(object):
raise UnregisteredHookError(hook_name) raise UnregisteredHookError(hook_name)
def hook(self, *hook_names): def hook(self, *hook_names):
"""Decorator, registering them as hooks"""
def wrapper(decorated): def wrapper(decorated):
for hook_name in hook_names: for hook_name in hook_names:
self.register(hook_name, decorated) self.register(hook_name, decorated)
@ -337,4 +391,5 @@ class Hooks(object):
def charm_dir(): def charm_dir():
"""Return the root directory of the current charm"""
return os.environ.get('CHARM_DIR') return os.environ.get('CHARM_DIR')

View File

@ -19,18 +19,22 @@ from hookenv import log
def service_start(service_name): def service_start(service_name):
"""Start a system service"""
return service('start', service_name) return service('start', service_name)
def service_stop(service_name): def service_stop(service_name):
"""Stop a system service"""
return service('stop', service_name) return service('stop', service_name)
def service_restart(service_name): def service_restart(service_name):
"""Restart a system service"""
return service('restart', service_name) return service('restart', service_name)
def service_reload(service_name, restart_on_failure=False): def service_reload(service_name, restart_on_failure=False):
"""Reload a system service, optionally falling back to restart if reload fails"""
service_result = service('reload', service_name) service_result = service('reload', service_name)
if not service_result and restart_on_failure: if not service_result and restart_on_failure:
service_result = service('restart', service_name) service_result = service('restart', service_name)
@ -38,11 +42,13 @@ def service_reload(service_name, restart_on_failure=False):
def service(action, service_name): def service(action, service_name):
"""Control a system service"""
cmd = ['service', service_name, action] cmd = ['service', service_name, action]
return subprocess.call(cmd) == 0 return subprocess.call(cmd) == 0
def service_running(service): def service_running(service):
"""Determine whether a system service is running"""
try: try:
output = subprocess.check_output(['service', service, 'status']) output = subprocess.check_output(['service', service, 'status'])
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
@ -55,7 +61,7 @@ def service_running(service):
def adduser(username, password=None, shell='/bin/bash', system_user=False): def adduser(username, password=None, shell='/bin/bash', system_user=False):
"""Add a user""" """Add a user to the system"""
try: try:
user_info = pwd.getpwnam(username) user_info = pwd.getpwnam(username)
log('user {0} already exists!'.format(username)) log('user {0} already exists!'.format(username))
@ -138,7 +144,7 @@ def write_file(path, content, owner='root', group='root', perms=0444):
def mount(device, mountpoint, options=None, persist=False): def mount(device, mountpoint, options=None, persist=False):
'''Mount a filesystem''' """Mount a filesystem at a particular mountpoint"""
cmd_args = ['mount'] cmd_args = ['mount']
if options is not None: if options is not None:
cmd_args.extend(['-o', options]) cmd_args.extend(['-o', options])
@ -155,7 +161,7 @@ def mount(device, mountpoint, options=None, persist=False):
def umount(mountpoint, persist=False): def umount(mountpoint, persist=False):
'''Unmount a filesystem''' """Unmount a filesystem"""
cmd_args = ['umount', mountpoint] cmd_args = ['umount', mountpoint]
try: try:
subprocess.check_output(cmd_args) subprocess.check_output(cmd_args)
@ -169,7 +175,7 @@ def umount(mountpoint, persist=False):
def mounts(): def mounts():
'''List of all mounted volumes as [[mountpoint,device],[...]]''' """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
with open('/proc/mounts') as f: with open('/proc/mounts') as f:
# [['/mount/point','/dev/path'],[...]] # [['/mount/point','/dev/path'],[...]]
system_mounts = [m[1::-1] for m in [l.strip().split() system_mounts = [m[1::-1] for m in [l.strip().split()
@ -178,7 +184,7 @@ def mounts():
def file_hash(path): def file_hash(path):
''' Generate a md5 hash of the contents of 'path' or None if not found ''' """Generate a md5 hash of the contents of 'path' or None if not found """
if os.path.exists(path): if os.path.exists(path):
h = hashlib.md5() h = hashlib.md5()
with open(path, 'r') as source: with open(path, 'r') as source:
@ -189,7 +195,7 @@ def file_hash(path):
def restart_on_change(restart_map): def restart_on_change(restart_map):
''' Restart services based on configuration files changing """Restart services based on configuration files changing
This function is used a decorator, for example This function is used a decorator, for example
@ -202,7 +208,7 @@ def restart_on_change(restart_map):
In this example, the cinder-api and cinder-volume services In this example, the cinder-api and cinder-volume services
would be restarted if /etc/ceph/ceph.conf is changed by the would be restarted if /etc/ceph/ceph.conf is changed by the
ceph_client_changed function. ceph_client_changed function.
''' """
def wrap(f): def wrap(f):
def wrapped_f(*args): def wrapped_f(*args):
checksums = {} checksums = {}
@ -220,7 +226,7 @@ def restart_on_change(restart_map):
def lsb_release(): def lsb_release():
'''Return /etc/lsb-release in a dict''' """Return /etc/lsb-release in a dict"""
d = {} d = {}
with open('/etc/lsb-release', 'r') as lsb: with open('/etc/lsb-release', 'r') as lsb:
for l in lsb: for l in lsb:
@ -230,7 +236,7 @@ def lsb_release():
def pwgen(length=None): def pwgen(length=None):
'''Generate a random pasword.''' """Generate a random pasword."""
if length is None: if length is None:
length = random.choice(range(35, 45)) length = random.choice(range(35, 45))
alphanumeric_chars = [ alphanumeric_chars = [

View File

@ -20,6 +20,32 @@ deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
PROPOSED_POCKET = """# Proposed PROPOSED_POCKET = """# Proposed
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
""" """
CLOUD_ARCHIVE_POCKETS = {
# Folsom
'folsom': 'precise-updates/folsom',
'precise-folsom': 'precise-updates/folsom',
'precise-folsom/updates': 'precise-updates/folsom',
'precise-updates/folsom': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'precise-folsom/proposed': 'precise-proposed/folsom',
'precise-proposed/folsom': 'precise-proposed/folsom',
# Grizzly
'grizzly': 'precise-updates/grizzly',
'precise-grizzly': 'precise-updates/grizzly',
'precise-grizzly/updates': 'precise-updates/grizzly',
'precise-updates/grizzly': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'precise-grizzly/proposed': 'precise-proposed/grizzly',
'precise-proposed/grizzly': 'precise-proposed/grizzly',
# Havana
'havana': 'precise-updates/havana',
'precise-havana': 'precise-updates/havana',
'precise-havana/updates': 'precise-updates/havana',
'precise-updates/havana': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'precies-havana/proposed': 'precise-proposed/havana',
'precise-proposed/havana': 'precise-proposed/havana',
}
def filter_installed_packages(packages): def filter_installed_packages(packages):
@ -79,16 +105,35 @@ def apt_purge(packages, fatal=False):
subprocess.call(cmd) subprocess.call(cmd)
def apt_hold(packages, fatal=False):
"""Hold one or more packages"""
cmd = ['apt-mark', 'hold']
if isinstance(packages, basestring):
cmd.append(packages)
else:
cmd.extend(packages)
log("Holding {}".format(packages))
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def add_source(source, key=None): def add_source(source, key=None):
if ((source.startswith('ppa:') or if (source.startswith('ppa:') or
source.startswith('http:'))): source.startswith('http:') or
source.startswith('deb ') or
source.startswith('cloud-archive:')):
subprocess.check_call(['add-apt-repository', '--yes', source]) subprocess.check_call(['add-apt-repository', '--yes', source])
elif source.startswith('cloud:'): elif source.startswith('cloud:'):
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
fatal=True) fatal=True)
pocket = source.split(':')[-1] pocket = source.split(':')[-1]
if pocket not in CLOUD_ARCHIVE_POCKETS:
raise SourceConfigError('Unsupported cloud: source option %s' % pocket)
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(pocket)) apt.write(CLOUD_ARCHIVE.format(actual_pocket))
elif source == 'proposed': elif source == 'proposed':
release = lsb_release()['DISTRIB_CODENAME'] release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
@ -118,8 +163,11 @@ def configure_sources(update=False,
Note that 'null' (a.k.a. None) should not be quoted. Note that 'null' (a.k.a. None) should not be quoted.
""" """
sources = safe_load(config(sources_var)) sources = safe_load(config(sources_var))
keys = safe_load(config(keys_var)) keys = config(keys_var)
if isinstance(sources, basestring) and isinstance(keys, basestring): if keys is not None:
keys = safe_load(keys)
if isinstance(sources, basestring) and (
keys is None or isinstance(keys, basestring)):
add_source(sources, keys) add_source(sources, keys)
else: else:
if not len(sources) == len(keys): if not len(sources) == len(keys):

View File

@ -12,6 +12,7 @@ except ImportError:
apt_install("python-bzrlib") apt_install("python-bzrlib")
from bzrlib.branch import Branch from bzrlib.branch import Branch
class BzrUrlFetchHandler(BaseFetchHandler): class BzrUrlFetchHandler(BaseFetchHandler):
"""Handler for bazaar branches via generic and lp URLs""" """Handler for bazaar branches via generic and lp URLs"""
def can_handle(self, source): def can_handle(self, source):
@ -46,4 +47,3 @@ class BzrUrlFetchHandler(BaseFetchHandler):
except OSError as e: except OSError as e:
raise UnhandledSource(e.strerror) raise UnhandledSource(e.strerror)
return dest_dir return dest_dir