Initial version of charm
This commit is contained in:
commit
74e831c061
6
.coveragerc
Normal file
6
.coveragerc
Normal file
@ -0,0 +1,6 @@
|
||||
[report]
|
||||
# Regexes for lines to exclude from consideration
|
||||
exclude_lines =
|
||||
if __name__ == .__main__.:
|
||||
include=
|
||||
hooks/cinder_*
|
17
.project
Normal file
17
.project
Normal file
@ -0,0 +1,17 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<projectDescription>
|
||||
<name>cinder-ceph</name>
|
||||
<comment></comment>
|
||||
<projects>
|
||||
</projects>
|
||||
<buildSpec>
|
||||
<buildCommand>
|
||||
<name>org.python.pydev.PyDevBuilder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
</buildSpec>
|
||||
<natures>
|
||||
<nature>org.python.pydev.pythonNature</nature>
|
||||
</natures>
|
||||
</projectDescription>
|
9
.pydevproject
Normal file
9
.pydevproject
Normal file
@ -0,0 +1,9 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<?eclipse-pydev version="1.0"?><pydev_project>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
|
||||
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
|
||||
<path>/cinder-ceph/hooks</path>
|
||||
<path>/cinder-ceph/unit_tests</path>
|
||||
</pydev_pathproperty>
|
||||
</pydev_project>
|
14
Makefile
Normal file
14
Makefile
Normal file
@ -0,0 +1,14 @@
|
||||
#!/usr/bin/make
|
||||
PYTHON := /usr/bin/env python
|
||||
|
||||
lint:
|
||||
@flake8 --exclude hooks/charmhelpers hooks
|
||||
@flake8 --exclude hooks/charmhelpers unit_tests
|
||||
@charm proof
|
||||
|
||||
test:
|
||||
@echo Starting tests...
|
||||
@$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests
|
||||
|
||||
sync:
|
||||
@charm-helper-sync -c charm-helpers.yaml
|
9
charm-helpers.yaml
Normal file
9
charm-helpers.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
branch: lp:charm-helpers
|
||||
destination: hooks/charmhelpers
|
||||
include:
|
||||
- core
|
||||
- fetch
|
||||
- contrib.openstack|inc=*
|
||||
- contrib.storage
|
||||
- contrib.hahelpers
|
||||
- payload.execd
|
12
config.yaml
Normal file
12
config.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
options:
|
||||
ceph-osd-replication-count:
|
||||
default: 2
|
||||
type: int
|
||||
description: |
|
||||
This value dictates the number of replicas ceph must make of any
|
||||
object it stores withing the cinder rbd pool. Of course, this only
|
||||
applies if using Ceph as a backend store. Note that once the cinder
|
||||
rbd pool has been created, changing this value will not have any
|
||||
effect (although it can be changed in ceph by manually configuring
|
||||
your ceph cluster).
|
||||
|
17
copyright
Normal file
17
copyright
Normal file
@ -0,0 +1,17 @@
|
||||
Format: http://dep.debian.net/deps/dep5/
|
||||
|
||||
Files: *
|
||||
Copyright: Copyright 2012, Canonical Ltd., All Rights Reserved.
|
||||
License: GPL-3
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
.
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
.
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
0
hooks/__init__.py
Normal file
0
hooks/__init__.py
Normal file
1
hooks/ceph-relation-broken
Symbolic link
1
hooks/ceph-relation-broken
Symbolic link
@ -0,0 +1 @@
|
||||
cinder_hooks.py
|
1
hooks/ceph-relation-changed
Symbolic link
1
hooks/ceph-relation-changed
Symbolic link
@ -0,0 +1 @@
|
||||
cinder_hooks.py
|
1
hooks/ceph-relation-joined
Symbolic link
1
hooks/ceph-relation-joined
Symbolic link
@ -0,0 +1 @@
|
||||
cinder_hooks.py
|
0
hooks/charmhelpers/__init__.py
Normal file
0
hooks/charmhelpers/__init__.py
Normal file
0
hooks/charmhelpers/contrib/__init__.py
Normal file
0
hooks/charmhelpers/contrib/__init__.py
Normal file
0
hooks/charmhelpers/contrib/hahelpers/__init__.py
Normal file
0
hooks/charmhelpers/contrib/hahelpers/__init__.py
Normal file
58
hooks/charmhelpers/contrib/hahelpers/apache.py
Normal file
58
hooks/charmhelpers/contrib/hahelpers/apache.py
Normal file
@ -0,0 +1,58 @@
|
||||
#
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# This file is sourced from lp:openstack-charm-helpers
|
||||
#
|
||||
# Authors:
|
||||
# James Page <james.page@ubuntu.com>
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
import subprocess
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config as config_get,
|
||||
relation_get,
|
||||
relation_ids,
|
||||
related_units as relation_list,
|
||||
log,
|
||||
INFO,
|
||||
)
|
||||
|
||||
|
||||
def get_cert():
|
||||
cert = config_get('ssl_cert')
|
||||
key = config_get('ssl_key')
|
||||
if not (cert and key):
|
||||
log("Inspecting identity-service relations for SSL certificate.",
|
||||
level=INFO)
|
||||
cert = key = None
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
if not cert:
|
||||
cert = relation_get('ssl_cert',
|
||||
rid=r_id, unit=unit)
|
||||
if not key:
|
||||
key = relation_get('ssl_key',
|
||||
rid=r_id, unit=unit)
|
||||
return (cert, key)
|
||||
|
||||
|
||||
def get_ca_cert():
|
||||
ca_cert = None
|
||||
log("Inspecting identity-service relations for CA SSL certificate.",
|
||||
level=INFO)
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
if not ca_cert:
|
||||
ca_cert = relation_get('ca_cert',
|
||||
rid=r_id, unit=unit)
|
||||
return ca_cert
|
||||
|
||||
|
||||
def install_ca_cert(ca_cert):
|
||||
if ca_cert:
|
||||
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
|
||||
'w') as crt:
|
||||
crt.write(ca_cert)
|
||||
subprocess.check_call(['update-ca-certificates', '--fresh'])
|
183
hooks/charmhelpers/contrib/hahelpers/cluster.py
Normal file
183
hooks/charmhelpers/contrib/hahelpers/cluster.py
Normal file
@ -0,0 +1,183 @@
|
||||
#
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# James Page <james.page@ubuntu.com>
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
from socket import gethostname as get_unit_hostname
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
relation_ids,
|
||||
related_units as relation_list,
|
||||
relation_get,
|
||||
config as config_get,
|
||||
INFO,
|
||||
ERROR,
|
||||
unit_get,
|
||||
)
|
||||
|
||||
|
||||
class HAIncompleteConfig(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def is_clustered():
|
||||
for r_id in (relation_ids('ha') or []):
|
||||
for unit in (relation_list(r_id) or []):
|
||||
clustered = relation_get('clustered',
|
||||
rid=r_id,
|
||||
unit=unit)
|
||||
if clustered:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def is_leader(resource):
|
||||
cmd = [
|
||||
"crm", "resource",
|
||||
"show", resource
|
||||
]
|
||||
try:
|
||||
status = subprocess.check_output(cmd)
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
if get_unit_hostname() in status:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def peer_units():
|
||||
peers = []
|
||||
for r_id in (relation_ids('cluster') or []):
|
||||
for unit in (relation_list(r_id) or []):
|
||||
peers.append(unit)
|
||||
return peers
|
||||
|
||||
|
||||
def oldest_peer(peers):
|
||||
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
|
||||
for peer in peers:
|
||||
remote_unit_no = int(peer.split('/')[1])
|
||||
if remote_unit_no < local_unit_no:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def eligible_leader(resource):
|
||||
if is_clustered():
|
||||
if not is_leader(resource):
|
||||
log('Deferring action to CRM leader.', level=INFO)
|
||||
return False
|
||||
else:
|
||||
peers = peer_units()
|
||||
if peers and not oldest_peer(peers):
|
||||
log('Deferring action to oldest service unit.', level=INFO)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def https():
|
||||
'''
|
||||
Determines whether enough data has been provided in configuration
|
||||
or relation data to configure HTTPS
|
||||
.
|
||||
returns: boolean
|
||||
'''
|
||||
if config_get('use-https') == "yes":
|
||||
return True
|
||||
if config_get('ssl_cert') and config_get('ssl_key'):
|
||||
return True
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
rel_state = [
|
||||
relation_get('https_keystone', rid=r_id, unit=unit),
|
||||
relation_get('ssl_cert', rid=r_id, unit=unit),
|
||||
relation_get('ssl_key', rid=r_id, unit=unit),
|
||||
relation_get('ca_cert', rid=r_id, unit=unit),
|
||||
]
|
||||
# NOTE: works around (LP: #1203241)
|
||||
if (None not in rel_state) and ('' not in rel_state):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def determine_api_port(public_port):
|
||||
'''
|
||||
Determine correct API server listening port based on
|
||||
existence of HTTPS reverse proxy and/or haproxy.
|
||||
|
||||
public_port: int: standard public port for given service
|
||||
|
||||
returns: int: the correct listening port for the API service
|
||||
'''
|
||||
i = 0
|
||||
if len(peer_units()) > 0 or is_clustered():
|
||||
i += 1
|
||||
if https():
|
||||
i += 1
|
||||
return public_port - (i * 10)
|
||||
|
||||
|
||||
def determine_haproxy_port(public_port):
|
||||
'''
|
||||
Description: Determine correct proxy listening port based on public IP +
|
||||
existence of HTTPS reverse proxy.
|
||||
|
||||
public_port: int: standard public port for given service
|
||||
|
||||
returns: int: the correct listening port for the HAProxy service
|
||||
'''
|
||||
i = 0
|
||||
if https():
|
||||
i += 1
|
||||
return public_port - (i * 10)
|
||||
|
||||
|
||||
def get_hacluster_config():
|
||||
'''
|
||||
Obtains all relevant configuration from charm configuration required
|
||||
for initiating a relation to hacluster:
|
||||
|
||||
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
|
||||
|
||||
returns: dict: A dict containing settings keyed by setting name.
|
||||
raises: HAIncompleteConfig if settings are missing.
|
||||
'''
|
||||
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
|
||||
conf = {}
|
||||
for setting in settings:
|
||||
conf[setting] = config_get(setting)
|
||||
missing = []
|
||||
[missing.append(s) for s, v in conf.iteritems() if v is None]
|
||||
if missing:
|
||||
log('Insufficient config data to configure hacluster.', level=ERROR)
|
||||
raise HAIncompleteConfig
|
||||
return conf
|
||||
|
||||
|
||||
def canonical_url(configs, vip_setting='vip'):
|
||||
'''
|
||||
Returns the correct HTTP URL to this host given the state of HTTPS
|
||||
configuration and hacluster.
|
||||
|
||||
:configs : OSTemplateRenderer: A config tempating object to inspect for
|
||||
a complete https context.
|
||||
:vip_setting: str: Setting in charm config that specifies
|
||||
VIP address.
|
||||
'''
|
||||
scheme = 'http'
|
||||
if 'https' in configs.complete_contexts():
|
||||
scheme = 'https'
|
||||
if is_clustered():
|
||||
addr = config_get(vip_setting)
|
||||
else:
|
||||
addr = unit_get('private-address')
|
||||
return '%s://%s' % (scheme, addr)
|
0
hooks/charmhelpers/contrib/openstack/__init__.py
Normal file
0
hooks/charmhelpers/contrib/openstack/__init__.py
Normal file
17
hooks/charmhelpers/contrib/openstack/alternatives.py
Normal file
17
hooks/charmhelpers/contrib/openstack/alternatives.py
Normal file
@ -0,0 +1,17 @@
|
||||
''' Helper for managing alternatives for file conflict resolution '''
|
||||
|
||||
import subprocess
|
||||
import shutil
|
||||
import os
|
||||
|
||||
|
||||
def install_alternative(name, target, source, priority=50):
|
||||
''' Install alternative configuration '''
|
||||
if (os.path.exists(target) and not os.path.islink(target)):
|
||||
# Move existing file/directory away before installing
|
||||
shutil.move(target, '{}.bak'.format(target))
|
||||
cmd = [
|
||||
'update-alternatives', '--force', '--install',
|
||||
target, name, source, str(priority)
|
||||
]
|
||||
subprocess.check_call(cmd)
|
577
hooks/charmhelpers/contrib/openstack/context.py
Normal file
577
hooks/charmhelpers/contrib/openstack/context.py
Normal file
@ -0,0 +1,577 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
from base64 import b64decode
|
||||
|
||||
from subprocess import (
|
||||
check_call
|
||||
)
|
||||
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
filter_installed_packages,
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
local_unit,
|
||||
log,
|
||||
relation_get,
|
||||
relation_ids,
|
||||
related_units,
|
||||
unit_get,
|
||||
unit_private_ip,
|
||||
ERROR,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
determine_api_port,
|
||||
determine_haproxy_port,
|
||||
https,
|
||||
is_clustered,
|
||||
peer_units,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.apache import (
|
||||
get_cert,
|
||||
get_ca_cert,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.neutron import (
|
||||
neutron_plugin_attribute,
|
||||
)
|
||||
|
||||
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
||||
|
||||
|
||||
class OSContextError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def ensure_packages(packages):
|
||||
'''Install but do not upgrade required plugin packages'''
|
||||
required = filter_installed_packages(packages)
|
||||
if required:
|
||||
apt_install(required, fatal=True)
|
||||
|
||||
|
||||
def context_complete(ctxt):
|
||||
_missing = []
|
||||
for k, v in ctxt.iteritems():
|
||||
if v is None or v == '':
|
||||
_missing.append(k)
|
||||
if _missing:
|
||||
log('Missing required data: %s' % ' '.join(_missing), level='INFO')
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class OSContextGenerator(object):
|
||||
interfaces = []
|
||||
|
||||
def __call__(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class SharedDBContext(OSContextGenerator):
|
||||
interfaces = ['shared-db']
|
||||
|
||||
def __init__(self, database=None, user=None, relation_prefix=None):
|
||||
'''
|
||||
Allows inspecting relation for settings prefixed with relation_prefix.
|
||||
This is useful for parsing access for multiple databases returned via
|
||||
the shared-db interface (eg, nova_password, quantum_password)
|
||||
'''
|
||||
self.relation_prefix = relation_prefix
|
||||
self.database = database
|
||||
self.user = user
|
||||
|
||||
def __call__(self):
|
||||
self.database = self.database or config('database')
|
||||
self.user = self.user or config('database-user')
|
||||
if None in [self.database, self.user]:
|
||||
log('Could not generate shared_db context. '
|
||||
'Missing required charm config options. '
|
||||
'(database name and user)')
|
||||
raise OSContextError
|
||||
ctxt = {}
|
||||
|
||||
password_setting = 'password'
|
||||
if self.relation_prefix:
|
||||
password_setting = self.relation_prefix + '_password'
|
||||
|
||||
for rid in relation_ids('shared-db'):
|
||||
for unit in related_units(rid):
|
||||
passwd = relation_get(password_setting, rid=rid, unit=unit)
|
||||
ctxt = {
|
||||
'database_host': relation_get('db_host', rid=rid,
|
||||
unit=unit),
|
||||
'database': self.database,
|
||||
'database_user': self.user,
|
||||
'database_password': passwd,
|
||||
}
|
||||
if context_complete(ctxt):
|
||||
return ctxt
|
||||
return {}
|
||||
|
||||
|
||||
class IdentityServiceContext(OSContextGenerator):
|
||||
interfaces = ['identity-service']
|
||||
|
||||
def __call__(self):
|
||||
log('Generating template context for identity-service')
|
||||
ctxt = {}
|
||||
|
||||
for rid in relation_ids('identity-service'):
|
||||
for unit in related_units(rid):
|
||||
ctxt = {
|
||||
'service_port': relation_get('service_port', rid=rid,
|
||||
unit=unit),
|
||||
'service_host': relation_get('service_host', rid=rid,
|
||||
unit=unit),
|
||||
'auth_host': relation_get('auth_host', rid=rid, unit=unit),
|
||||
'auth_port': relation_get('auth_port', rid=rid, unit=unit),
|
||||
'admin_tenant_name': relation_get('service_tenant',
|
||||
rid=rid, unit=unit),
|
||||
'admin_user': relation_get('service_username', rid=rid,
|
||||
unit=unit),
|
||||
'admin_password': relation_get('service_password', rid=rid,
|
||||
unit=unit),
|
||||
# XXX: Hard-coded http.
|
||||
'service_protocol': 'http',
|
||||
'auth_protocol': 'http',
|
||||
}
|
||||
if context_complete(ctxt):
|
||||
return ctxt
|
||||
return {}
|
||||
|
||||
|
||||
class AMQPContext(OSContextGenerator):
|
||||
interfaces = ['amqp']
|
||||
|
||||
def __call__(self):
|
||||
log('Generating template context for amqp')
|
||||
conf = config()
|
||||
try:
|
||||
username = conf['rabbit-user']
|
||||
vhost = conf['rabbit-vhost']
|
||||
except KeyError as e:
|
||||
log('Could not generate shared_db context. '
|
||||
'Missing required charm config options: %s.' % e)
|
||||
raise OSContextError
|
||||
|
||||
ctxt = {}
|
||||
for rid in relation_ids('amqp'):
|
||||
for unit in related_units(rid):
|
||||
if relation_get('clustered', rid=rid, unit=unit):
|
||||
ctxt['clustered'] = True
|
||||
ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
|
||||
unit=unit)
|
||||
else:
|
||||
ctxt['rabbitmq_host'] = relation_get('private-address',
|
||||
rid=rid, unit=unit)
|
||||
ctxt.update({
|
||||
'rabbitmq_user': username,
|
||||
'rabbitmq_password': relation_get('password', rid=rid,
|
||||
unit=unit),
|
||||
'rabbitmq_virtual_host': vhost,
|
||||
})
|
||||
if context_complete(ctxt):
|
||||
# Sufficient information found = break out!
|
||||
break
|
||||
# Used for active/active rabbitmq >= grizzly
|
||||
if 'clustered' not in ctxt and len(related_units(rid)) > 1:
|
||||
rabbitmq_hosts = []
|
||||
for unit in related_units(rid):
|
||||
rabbitmq_hosts.append(relation_get('private-address',
|
||||
rid=rid, unit=unit))
|
||||
ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
|
||||
if not context_complete(ctxt):
|
||||
return {}
|
||||
else:
|
||||
return ctxt
|
||||
|
||||
|
||||
class CephContext(OSContextGenerator):
|
||||
interfaces = ['ceph']
|
||||
|
||||
def __call__(self):
|
||||
'''This generates context for /etc/ceph/ceph.conf templates'''
|
||||
if not relation_ids('ceph'):
|
||||
return {}
|
||||
log('Generating template context for ceph')
|
||||
mon_hosts = []
|
||||
auth = None
|
||||
key = None
|
||||
for rid in relation_ids('ceph'):
|
||||
for unit in related_units(rid):
|
||||
mon_hosts.append(relation_get('private-address', rid=rid,
|
||||
unit=unit))
|
||||
auth = relation_get('auth', rid=rid, unit=unit)
|
||||
key = relation_get('key', rid=rid, unit=unit)
|
||||
|
||||
ctxt = {
|
||||
'mon_hosts': ' '.join(mon_hosts),
|
||||
'auth': auth,
|
||||
'key': key,
|
||||
}
|
||||
|
||||
if not os.path.isdir('/etc/ceph'):
|
||||
os.mkdir('/etc/ceph')
|
||||
|
||||
if not context_complete(ctxt):
|
||||
return {}
|
||||
|
||||
ensure_packages(['ceph-common'])
|
||||
|
||||
return ctxt
|
||||
|
||||
|
||||
class HAProxyContext(OSContextGenerator):
|
||||
interfaces = ['cluster']
|
||||
|
||||
def __call__(self):
|
||||
'''
|
||||
Builds half a context for the haproxy template, which describes
|
||||
all peers to be included in the cluster. Each charm needs to include
|
||||
its own context generator that describes the port mapping.
|
||||
'''
|
||||
if not relation_ids('cluster'):
|
||||
return {}
|
||||
|
||||
cluster_hosts = {}
|
||||
l_unit = local_unit().replace('/', '-')
|
||||
cluster_hosts[l_unit] = unit_get('private-address')
|
||||
|
||||
for rid in relation_ids('cluster'):
|
||||
for unit in related_units(rid):
|
||||
_unit = unit.replace('/', '-')
|
||||
addr = relation_get('private-address', rid=rid, unit=unit)
|
||||
cluster_hosts[_unit] = addr
|
||||
|
||||
ctxt = {
|
||||
'units': cluster_hosts,
|
||||
}
|
||||
if len(cluster_hosts.keys()) > 1:
|
||||
# Enable haproxy when we have enough peers.
|
||||
log('Ensuring haproxy enabled in /etc/default/haproxy.')
|
||||
with open('/etc/default/haproxy', 'w') as out:
|
||||
out.write('ENABLED=1\n')
|
||||
return ctxt
|
||||
log('HAProxy context is incomplete, this unit has no peers.')
|
||||
return {}
|
||||
|
||||
|
||||
class ImageServiceContext(OSContextGenerator):
|
||||
interfaces = ['image-service']
|
||||
|
||||
def __call__(self):
|
||||
'''
|
||||
Obtains the glance API server from the image-service relation. Useful
|
||||
in nova and cinder (currently).
|
||||
'''
|
||||
log('Generating template context for image-service.')
|
||||
rids = relation_ids('image-service')
|
||||
if not rids:
|
||||
return {}
|
||||
for rid in rids:
|
||||
for unit in related_units(rid):
|
||||
api_server = relation_get('glance-api-server',
|
||||
rid=rid, unit=unit)
|
||||
if api_server:
|
||||
return {'glance_api_servers': api_server}
|
||||
log('ImageService context is incomplete. '
|
||||
'Missing required relation data.')
|
||||
return {}
|
||||
|
||||
|
||||
class ApacheSSLContext(OSContextGenerator):
|
||||
|
||||
"""
|
||||
Generates a context for an apache vhost configuration that configures
|
||||
HTTPS reverse proxying for one or many endpoints. Generated context
|
||||
looks something like:
|
||||
{
|
||||
'namespace': 'cinder',
|
||||
'private_address': 'iscsi.mycinderhost.com',
|
||||
'endpoints': [(8776, 8766), (8777, 8767)]
|
||||
}
|
||||
|
||||
The endpoints list consists of a tuples mapping external ports
|
||||
to internal ports.
|
||||
"""
|
||||
interfaces = ['https']
|
||||
|
||||
# charms should inherit this context and set external ports
|
||||
# and service namespace accordingly.
|
||||
external_ports = []
|
||||
service_namespace = None
|
||||
|
||||
def enable_modules(self):
|
||||
cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
|
||||
check_call(cmd)
|
||||
|
||||
def configure_cert(self):
|
||||
if not os.path.isdir('/etc/apache2/ssl'):
|
||||
os.mkdir('/etc/apache2/ssl')
|
||||
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
|
||||
if not os.path.isdir(ssl_dir):
|
||||
os.mkdir(ssl_dir)
|
||||
cert, key = get_cert()
|
||||
with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
|
||||
cert_out.write(b64decode(cert))
|
||||
with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
|
||||
key_out.write(b64decode(key))
|
||||
ca_cert = get_ca_cert()
|
||||
if ca_cert:
|
||||
with open(CA_CERT_PATH, 'w') as ca_out:
|
||||
ca_out.write(b64decode(ca_cert))
|
||||
check_call(['update-ca-certificates'])
|
||||
|
||||
def __call__(self):
|
||||
if isinstance(self.external_ports, basestring):
|
||||
self.external_ports = [self.external_ports]
|
||||
if (not self.external_ports or not https()):
|
||||
return {}
|
||||
|
||||
self.configure_cert()
|
||||
self.enable_modules()
|
||||
|
||||
ctxt = {
|
||||
'namespace': self.service_namespace,
|
||||
'private_address': unit_get('private-address'),
|
||||
'endpoints': []
|
||||
}
|
||||
for ext_port in self.external_ports:
|
||||
if peer_units() or is_clustered():
|
||||
int_port = determine_haproxy_port(ext_port)
|
||||
else:
|
||||
int_port = determine_api_port(ext_port)
|
||||
portmap = (int(ext_port), int(int_port))
|
||||
ctxt['endpoints'].append(portmap)
|
||||
return ctxt
|
||||
|
||||
|
||||
class NeutronContext(object):
|
||||
interfaces = []
|
||||
|
||||
@property
|
||||
def plugin(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def network_manager(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def packages(self):
|
||||
return neutron_plugin_attribute(
|
||||
self.plugin, 'packages', self.network_manager)
|
||||
|
||||
@property
|
||||
def neutron_security_groups(self):
|
||||
return None
|
||||
|
||||
def _ensure_packages(self):
|
||||
[ensure_packages(pkgs) for pkgs in self.packages]
|
||||
|
||||
def _save_flag_file(self):
|
||||
if self.network_manager == 'quantum':
|
||||
_file = '/etc/nova/quantum_plugin.conf'
|
||||
else:
|
||||
_file = '/etc/nova/neutron_plugin.conf'
|
||||
with open(_file, 'wb') as out:
|
||||
out.write(self.plugin + '\n')
|
||||
|
||||
def ovs_ctxt(self):
|
||||
driver = neutron_plugin_attribute(self.plugin, 'driver',
|
||||
self.network_manager)
|
||||
config = neutron_plugin_attribute(self.plugin, 'config',
|
||||
self.network_manager)
|
||||
ovs_ctxt = {
|
||||
'core_plugin': driver,
|
||||
'neutron_plugin': 'ovs',
|
||||
'neutron_security_groups': self.neutron_security_groups,
|
||||
'local_ip': unit_private_ip(),
|
||||
'config': config
|
||||
}
|
||||
|
||||
return ovs_ctxt
|
||||
|
||||
def nvp_ctxt(self):
|
||||
driver = neutron_plugin_attribute(self.plugin, 'driver',
|
||||
self.network_manager)
|
||||
config = neutron_plugin_attribute(self.plugin, 'config',
|
||||
self.network_manager)
|
||||
nvp_ctxt = {
|
||||
'core_plugin': driver,
|
||||
'neutron_plugin': 'nvp',
|
||||
'neutron_security_groups': self.neutron_security_groups,
|
||||
'local_ip': unit_private_ip(),
|
||||
'config': config
|
||||
}
|
||||
|
||||
return nvp_ctxt
|
||||
|
||||
def __call__(self):
|
||||
self._ensure_packages()
|
||||
|
||||
if self.network_manager not in ['quantum', 'neutron']:
|
||||
return {}
|
||||
|
||||
if not self.plugin:
|
||||
return {}
|
||||
|
||||
ctxt = {'network_manager': self.network_manager}
|
||||
|
||||
if self.plugin == 'ovs':
|
||||
ctxt.update(self.ovs_ctxt())
|
||||
elif self.plugin == 'nvp':
|
||||
ctxt.update(self.nvp_ctxt())
|
||||
|
||||
self._save_flag_file()
|
||||
return ctxt
|
||||
|
||||
|
||||
class OSConfigFlagContext(OSContextGenerator):
|
||||
|
||||
"""
|
||||
Responsible for adding user-defined config-flags in charm config to a
|
||||
template context.
|
||||
|
||||
NOTE: the value of config-flags may be a comma-separated list of
|
||||
key=value pairs and some Openstack config files support
|
||||
comma-separated lists as values.
|
||||
"""
|
||||
|
||||
def __call__(self):
|
||||
config_flags = config('config-flags')
|
||||
if not config_flags:
|
||||
return {}
|
||||
|
||||
if config_flags.find('==') >= 0:
|
||||
log("config_flags is not in expected format (key=value)",
|
||||
level=ERROR)
|
||||
raise OSContextError
|
||||
|
||||
# strip the following from each value.
|
||||
post_strippers = ' ,'
|
||||
# we strip any leading/trailing '=' or ' ' from the string then
|
||||
# split on '='.
|
||||
split = config_flags.strip(' =').split('=')
|
||||
limit = len(split)
|
||||
flags = {}
|
||||
for i in xrange(0, limit - 1):
|
||||
current = split[i]
|
||||
next = split[i + 1]
|
||||
vindex = next.rfind(',')
|
||||
if (i == limit - 2) or (vindex < 0):
|
||||
value = next
|
||||
else:
|
||||
value = next[:vindex]
|
||||
|
||||
if i == 0:
|
||||
key = current
|
||||
else:
|
||||
# if this not the first entry, expect an embedded key.
|
||||
index = current.rfind(',')
|
||||
if index < 0:
|
||||
log("invalid config value(s) at index %s" % (i),
|
||||
level=ERROR)
|
||||
raise OSContextError
|
||||
key = current[index + 1:]
|
||||
|
||||
# Add to collection.
|
||||
flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
|
||||
|
||||
return {'user_config_flags': flags}
|
||||
|
||||
|
||||
class SubordinateConfigContext(OSContextGenerator):
|
||||
|
||||
"""
|
||||
Responsible for inspecting relations to subordinates that
|
||||
may be exporting required config via a json blob.
|
||||
|
||||
The subordinate interface allows subordinates to export their
|
||||
configuration requirements to the principle for multiple config
|
||||
files and multiple serivces. Ie, a subordinate that has interfaces
|
||||
to both glance and nova may export to following yaml blob as json:
|
||||
|
||||
glance:
|
||||
/etc/glance/glance-api.conf:
|
||||
sections:
|
||||
DEFAULT:
|
||||
- [key1, value1]
|
||||
/etc/glance/glance-registry.conf:
|
||||
MYSECTION:
|
||||
- [key2, value2]
|
||||
nova:
|
||||
/etc/nova/nova.conf:
|
||||
sections:
|
||||
DEFAULT:
|
||||
- [key3, value3]
|
||||
|
||||
|
||||
It is then up to the principle charms to subscribe this context to
|
||||
the service+config file it is interestd in. Configuration data will
|
||||
be available in the template context, in glance's case, as:
|
||||
ctxt = {
|
||||
... other context ...
|
||||
'subordinate_config': {
|
||||
'DEFAULT': {
|
||||
'key1': 'value1',
|
||||
},
|
||||
'MYSECTION': {
|
||||
'key2': 'value2',
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, service, config_file, interface):
|
||||
"""
|
||||
:param service : Service name key to query in any subordinate
|
||||
data found
|
||||
:param config_file : Service's config file to query sections
|
||||
:param interface : Subordinate interface to inspect
|
||||
"""
|
||||
self.service = service
|
||||
self.config_file = config_file
|
||||
self.interface = interface
|
||||
|
||||
def __call__(self):
|
||||
ctxt = {}
|
||||
for rid in relation_ids(self.interface):
|
||||
for unit in related_units(rid):
|
||||
sub_config = relation_get('subordinate_configuration',
|
||||
rid=rid, unit=unit)
|
||||
if sub_config and sub_config != '':
|
||||
try:
|
||||
sub_config = json.loads(sub_config)
|
||||
except:
|
||||
log('Could not parse JSON from subordinate_config '
|
||||
'setting from %s' % rid, level=ERROR)
|
||||
continue
|
||||
|
||||
if self.service not in sub_config:
|
||||
log('Found subordinate_config on %s but it contained'
|
||||
'nothing for %s service' % (rid, self.service))
|
||||
continue
|
||||
|
||||
sub_config = sub_config[self.service]
|
||||
if self.config_file not in sub_config:
|
||||
log('Found subordinate_config on %s but it contained'
|
||||
'nothing for %s' % (rid, self.config_file))
|
||||
continue
|
||||
|
||||
sub_config = sub_config[self.config_file]
|
||||
for k, v in sub_config.iteritems():
|
||||
ctxt[k] = v
|
||||
|
||||
if not ctxt:
|
||||
ctxt['sections'] = {}
|
||||
|
||||
return ctxt
|
137
hooks/charmhelpers/contrib/openstack/neutron.py
Normal file
137
hooks/charmhelpers/contrib/openstack/neutron.py
Normal file
@ -0,0 +1,137 @@
|
||||
# Various utilies for dealing with Neutron and the renaming from Quantum.
|
||||
|
||||
from subprocess import check_output
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
log,
|
||||
ERROR,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import os_release
|
||||
|
||||
|
||||
def headers_package():
|
||||
"""Ensures correct linux-headers for running kernel are installed,
|
||||
for building DKMS package"""
|
||||
kver = check_output(['uname', '-r']).strip()
|
||||
return 'linux-headers-%s' % kver
|
||||
|
||||
|
||||
# legacy
|
||||
def quantum_plugins():
|
||||
from charmhelpers.contrib.openstack import context
|
||||
return {
|
||||
'ovs': {
|
||||
'config': '/etc/quantum/plugins/openvswitch/'
|
||||
'ovs_quantum_plugin.ini',
|
||||
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
|
||||
'OVSQuantumPluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron')],
|
||||
'services': ['quantum-plugin-openvswitch-agent'],
|
||||
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
|
||||
['quantum-plugin-openvswitch-agent']],
|
||||
'server_packages': ['quantum-server',
|
||||
'quantum-plugin-openvswitch'],
|
||||
'server_services': ['quantum-server']
|
||||
},
|
||||
'nvp': {
|
||||
'config': '/etc/quantum/plugins/nicira/nvp.ini',
|
||||
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
|
||||
'QuantumPlugin.NvpPluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron')],
|
||||
'services': [],
|
||||
'packages': [],
|
||||
'server_packages': ['quantum-server',
|
||||
'quantum-plugin-nicira'],
|
||||
'server_services': ['quantum-server']
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def neutron_plugins():
|
||||
from charmhelpers.contrib.openstack import context
|
||||
return {
|
||||
'ovs': {
|
||||
'config': '/etc/neutron/plugins/openvswitch/'
|
||||
'ovs_neutron_plugin.ini',
|
||||
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
|
||||
'OVSNeutronPluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron')],
|
||||
'services': ['neutron-plugin-openvswitch-agent'],
|
||||
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
|
||||
['quantum-plugin-openvswitch-agent']],
|
||||
'server_packages': ['neutron-server',
|
||||
'neutron-plugin-openvswitch'],
|
||||
'server_services': ['neutron-server']
|
||||
},
|
||||
'nvp': {
|
||||
'config': '/etc/neutron/plugins/nicira/nvp.ini',
|
||||
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
|
||||
'NeutronPlugin.NvpPluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron')],
|
||||
'services': [],
|
||||
'packages': [],
|
||||
'server_packages': ['neutron-server',
|
||||
'neutron-plugin-nicira'],
|
||||
'server_services': ['neutron-server']
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def neutron_plugin_attribute(plugin, attr, net_manager=None):
|
||||
manager = net_manager or network_manager()
|
||||
if manager == 'quantum':
|
||||
plugins = quantum_plugins()
|
||||
elif manager == 'neutron':
|
||||
plugins = neutron_plugins()
|
||||
else:
|
||||
log('Error: Network manager does not support plugins.')
|
||||
raise Exception
|
||||
|
||||
try:
|
||||
_plugin = plugins[plugin]
|
||||
except KeyError:
|
||||
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
|
||||
raise Exception
|
||||
|
||||
try:
|
||||
return _plugin[attr]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
|
||||
def network_manager():
|
||||
'''
|
||||
Deals with the renaming of Quantum to Neutron in H and any situations
|
||||
that require compatability (eg, deploying H with network-manager=quantum,
|
||||
upgrading from G).
|
||||
'''
|
||||
release = os_release('nova-common')
|
||||
manager = config('network-manager').lower()
|
||||
|
||||
if manager not in ['quantum', 'neutron']:
|
||||
return manager
|
||||
|
||||
if release in ['essex']:
|
||||
# E does not support neutron
|
||||
log('Neutron networking not supported in Essex.', level=ERROR)
|
||||
raise Exception
|
||||
elif release in ['folsom', 'grizzly']:
|
||||
# neutron is named quantum in F and G
|
||||
return 'quantum'
|
||||
else:
|
||||
# ensure accurate naming for all releases post-H
|
||||
return 'neutron'
|
@ -0,0 +1,2 @@
|
||||
# dummy __init__.py to fool syncer into thinking this is a syncable python
|
||||
# module
|
11
hooks/charmhelpers/contrib/openstack/templates/ceph.conf
Normal file
11
hooks/charmhelpers/contrib/openstack/templates/ceph.conf
Normal file
@ -0,0 +1,11 @@
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# cinder configuration file maintained by Juju
|
||||
# local changes may be overwritten.
|
||||
###############################################################################
|
||||
{% if auth -%}
|
||||
[global]
|
||||
auth_supported = {{ auth }}
|
||||
keyring = /etc/ceph/$cluster.$name.keyring
|
||||
mon host = {{ mon_hosts }}
|
||||
{% endif -%}
|
37
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
Normal file
37
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
Normal file
@ -0,0 +1,37 @@
|
||||
global
|
||||
log 127.0.0.1 local0
|
||||
log 127.0.0.1 local1 notice
|
||||
maxconn 20000
|
||||
user haproxy
|
||||
group haproxy
|
||||
spread-checks 0
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode http
|
||||
option httplog
|
||||
option dontlognull
|
||||
retries 3
|
||||
timeout queue 1000
|
||||
timeout connect 1000
|
||||
timeout client 30000
|
||||
timeout server 30000
|
||||
|
||||
listen stats :8888
|
||||
mode http
|
||||
stats enable
|
||||
stats hide-version
|
||||
stats realm Haproxy\ Statistics
|
||||
stats uri /
|
||||
stats auth admin:password
|
||||
|
||||
{% if units -%}
|
||||
{% for service, ports in service_ports.iteritems() -%}
|
||||
listen {{ service }} 0.0.0.0:{{ ports[0] }}
|
||||
balance roundrobin
|
||||
option tcplog
|
||||
{% for unit, address in units.iteritems() -%}
|
||||
server {{ unit }} {{ address }}:{{ ports[1] }} check
|
||||
{% endfor %}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
@ -0,0 +1,23 @@
|
||||
{% if endpoints -%}
|
||||
{% for ext, int in endpoints -%}
|
||||
Listen {{ ext }}
|
||||
NameVirtualHost *:{{ ext }}
|
||||
<VirtualHost *:{{ ext }}>
|
||||
ServerName {{ private_address }}
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
|
||||
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
|
||||
ProxyPass / http://localhost:{{ int }}/
|
||||
ProxyPassReverse / http://localhost:{{ int }}/
|
||||
ProxyPreserveHost on
|
||||
</VirtualHost>
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
</Proxy>
|
||||
<Location />
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</Location>
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
@ -0,0 +1,23 @@
|
||||
{% if endpoints -%}
|
||||
{% for ext, int in endpoints -%}
|
||||
Listen {{ ext }}
|
||||
NameVirtualHost *:{{ ext }}
|
||||
<VirtualHost *:{{ ext }}>
|
||||
ServerName {{ private_address }}
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
|
||||
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
|
||||
ProxyPass / http://localhost:{{ int }}/
|
||||
ProxyPassReverse / http://localhost:{{ int }}/
|
||||
ProxyPreserveHost on
|
||||
</VirtualHost>
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
</Proxy>
|
||||
<Location />
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</Location>
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
280
hooks/charmhelpers/contrib/openstack/templating.py
Normal file
280
hooks/charmhelpers/contrib/openstack/templating.py
Normal file
@ -0,0 +1,280 @@
|
||||
import os
|
||||
|
||||
from charmhelpers.fetch import apt_install
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
ERROR,
|
||||
INFO
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
|
||||
|
||||
try:
|
||||
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
||||
except ImportError:
|
||||
# python-jinja2 may not be installed yet, or we're running unittests.
|
||||
FileSystemLoader = ChoiceLoader = Environment = exceptions = None
|
||||
|
||||
|
||||
class OSConfigException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def get_loader(templates_dir, os_release):
|
||||
"""
|
||||
Create a jinja2.ChoiceLoader containing template dirs up to
|
||||
and including os_release. If directory template directory
|
||||
is missing at templates_dir, it will be omitted from the loader.
|
||||
templates_dir is added to the bottom of the search list as a base
|
||||
loading dir.
|
||||
|
||||
A charm may also ship a templates dir with this module
|
||||
and it will be appended to the bottom of the search list, eg:
|
||||
hooks/charmhelpers/contrib/openstack/templates.
|
||||
|
||||
:param templates_dir: str: Base template directory containing release
|
||||
sub-directories.
|
||||
:param os_release : str: OpenStack release codename to construct template
|
||||
loader.
|
||||
|
||||
:returns : jinja2.ChoiceLoader constructed with a list of
|
||||
jinja2.FilesystemLoaders, ordered in descending
|
||||
order by OpenStack release.
|
||||
"""
|
||||
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
|
||||
for rel in OPENSTACK_CODENAMES.itervalues()]
|
||||
|
||||
if not os.path.isdir(templates_dir):
|
||||
log('Templates directory not found @ %s.' % templates_dir,
|
||||
level=ERROR)
|
||||
raise OSConfigException
|
||||
|
||||
# the bottom contains tempaltes_dir and possibly a common templates dir
|
||||
# shipped with the helper.
|
||||
loaders = [FileSystemLoader(templates_dir)]
|
||||
helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
|
||||
if os.path.isdir(helper_templates):
|
||||
loaders.append(FileSystemLoader(helper_templates))
|
||||
|
||||
for rel, tmpl_dir in tmpl_dirs:
|
||||
if os.path.isdir(tmpl_dir):
|
||||
loaders.insert(0, FileSystemLoader(tmpl_dir))
|
||||
if rel == os_release:
|
||||
break
|
||||
log('Creating choice loader with dirs: %s' %
|
||||
[l.searchpath for l in loaders], level=INFO)
|
||||
return ChoiceLoader(loaders)
|
||||
|
||||
|
||||
class OSConfigTemplate(object):
|
||||
"""
|
||||
Associates a config file template with a list of context generators.
|
||||
Responsible for constructing a template context based on those generators.
|
||||
"""
|
||||
def __init__(self, config_file, contexts):
|
||||
self.config_file = config_file
|
||||
|
||||
if hasattr(contexts, '__call__'):
|
||||
self.contexts = [contexts]
|
||||
else:
|
||||
self.contexts = contexts
|
||||
|
||||
self._complete_contexts = []
|
||||
|
||||
def context(self):
|
||||
ctxt = {}
|
||||
for context in self.contexts:
|
||||
_ctxt = context()
|
||||
if _ctxt:
|
||||
ctxt.update(_ctxt)
|
||||
# track interfaces for every complete context.
|
||||
[self._complete_contexts.append(interface)
|
||||
for interface in context.interfaces
|
||||
if interface not in self._complete_contexts]
|
||||
return ctxt
|
||||
|
||||
def complete_contexts(self):
|
||||
'''
|
||||
Return a list of interfaces that have atisfied contexts.
|
||||
'''
|
||||
if self._complete_contexts:
|
||||
return self._complete_contexts
|
||||
self.context()
|
||||
return self._complete_contexts
|
||||
|
||||
|
||||
class OSConfigRenderer(object):
|
||||
"""
|
||||
This class provides a common templating system to be used by OpenStack
|
||||
charms. It is intended to help charms share common code and templates,
|
||||
and ease the burden of managing config templates across multiple OpenStack
|
||||
releases.
|
||||
|
||||
Basic usage:
|
||||
# import some common context generates from charmhelpers
|
||||
from charmhelpers.contrib.openstack import context
|
||||
|
||||
# Create a renderer object for a specific OS release.
|
||||
configs = OSConfigRenderer(templates_dir='/tmp/templates',
|
||||
openstack_release='folsom')
|
||||
# register some config files with context generators.
|
||||
configs.register(config_file='/etc/nova/nova.conf',
|
||||
contexts=[context.SharedDBContext(),
|
||||
context.AMQPContext()])
|
||||
configs.register(config_file='/etc/nova/api-paste.ini',
|
||||
contexts=[context.IdentityServiceContext()])
|
||||
configs.register(config_file='/etc/haproxy/haproxy.conf',
|
||||
contexts=[context.HAProxyContext()])
|
||||
# write out a single config
|
||||
configs.write('/etc/nova/nova.conf')
|
||||
# write out all registered configs
|
||||
configs.write_all()
|
||||
|
||||
Details:
|
||||
|
||||
OpenStack Releases and template loading
|
||||
---------------------------------------
|
||||
When the object is instantiated, it is associated with a specific OS
|
||||
release. This dictates how the template loader will be constructed.
|
||||
|
||||
The constructed loader attempts to load the template from several places
|
||||
in the following order:
|
||||
- from the most recent OS release-specific template dir (if one exists)
|
||||
- the base templates_dir
|
||||
- a template directory shipped in the charm with this helper file.
|
||||
|
||||
|
||||
For the example above, '/tmp/templates' contains the following structure:
|
||||
/tmp/templates/nova.conf
|
||||
/tmp/templates/api-paste.ini
|
||||
/tmp/templates/grizzly/api-paste.ini
|
||||
/tmp/templates/havana/api-paste.ini
|
||||
|
||||
Since it was registered with the grizzly release, it first seraches
|
||||
the grizzly directory for nova.conf, then the templates dir.
|
||||
|
||||
When writing api-paste.ini, it will find the template in the grizzly
|
||||
directory.
|
||||
|
||||
If the object were created with folsom, it would fall back to the
|
||||
base templates dir for its api-paste.ini template.
|
||||
|
||||
This system should help manage changes in config files through
|
||||
openstack releases, allowing charms to fall back to the most recently
|
||||
updated config template for a given release
|
||||
|
||||
The haproxy.conf, since it is not shipped in the templates dir, will
|
||||
be loaded from the module directory's template directory, eg
|
||||
$CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
|
||||
us to ship common templates (haproxy, apache) with the helpers.
|
||||
|
||||
Context generators
|
||||
---------------------------------------
|
||||
Context generators are used to generate template contexts during hook
|
||||
execution. Doing so may require inspecting service relations, charm
|
||||
config, etc. When registered, a config file is associated with a list
|
||||
of generators. When a template is rendered and written, all context
|
||||
generates are called in a chain to generate the context dictionary
|
||||
passed to the jinja2 template. See context.py for more info.
|
||||
"""
|
||||
def __init__(self, templates_dir, openstack_release):
|
||||
if not os.path.isdir(templates_dir):
|
||||
log('Could not locate templates dir %s' % templates_dir,
|
||||
level=ERROR)
|
||||
raise OSConfigException
|
||||
|
||||
self.templates_dir = templates_dir
|
||||
self.openstack_release = openstack_release
|
||||
self.templates = {}
|
||||
self._tmpl_env = None
|
||||
|
||||
if None in [Environment, ChoiceLoader, FileSystemLoader]:
|
||||
# if this code is running, the object is created pre-install hook.
|
||||
# jinja2 shouldn't get touched until the module is reloaded on next
|
||||
# hook execution, with proper jinja2 bits successfully imported.
|
||||
apt_install('python-jinja2')
|
||||
|
||||
def register(self, config_file, contexts):
|
||||
"""
|
||||
Register a config file with a list of context generators to be called
|
||||
during rendering.
|
||||
"""
|
||||
self.templates[config_file] = OSConfigTemplate(config_file=config_file,
|
||||
contexts=contexts)
|
||||
log('Registered config file: %s' % config_file, level=INFO)
|
||||
|
||||
def _get_tmpl_env(self):
|
||||
if not self._tmpl_env:
|
||||
loader = get_loader(self.templates_dir, self.openstack_release)
|
||||
self._tmpl_env = Environment(loader=loader)
|
||||
|
||||
def _get_template(self, template):
|
||||
self._get_tmpl_env()
|
||||
template = self._tmpl_env.get_template(template)
|
||||
log('Loaded template from %s' % template.filename, level=INFO)
|
||||
return template
|
||||
|
||||
def render(self, config_file):
|
||||
if config_file not in self.templates:
|
||||
log('Config not registered: %s' % config_file, level=ERROR)
|
||||
raise OSConfigException
|
||||
ctxt = self.templates[config_file].context()
|
||||
|
||||
_tmpl = os.path.basename(config_file)
|
||||
try:
|
||||
template = self._get_template(_tmpl)
|
||||
except exceptions.TemplateNotFound:
|
||||
# if no template is found with basename, try looking for it
|
||||
# using a munged full path, eg:
|
||||
# /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
|
||||
_tmpl = '_'.join(config_file.split('/')[1:])
|
||||
try:
|
||||
template = self._get_template(_tmpl)
|
||||
except exceptions.TemplateNotFound as e:
|
||||
log('Could not load template from %s by %s or %s.' %
|
||||
(self.templates_dir, os.path.basename(config_file), _tmpl),
|
||||
level=ERROR)
|
||||
raise e
|
||||
|
||||
log('Rendering from template: %s' % _tmpl, level=INFO)
|
||||
return template.render(ctxt)
|
||||
|
||||
def write(self, config_file):
|
||||
"""
|
||||
Write a single config file, raises if config file is not registered.
|
||||
"""
|
||||
if config_file not in self.templates:
|
||||
log('Config not registered: %s' % config_file, level=ERROR)
|
||||
raise OSConfigException
|
||||
|
||||
_out = self.render(config_file)
|
||||
|
||||
with open(config_file, 'wb') as out:
|
||||
out.write(_out)
|
||||
|
||||
log('Wrote template %s.' % config_file, level=INFO)
|
||||
|
||||
def write_all(self):
|
||||
"""
|
||||
Write out all registered config files.
|
||||
"""
|
||||
[self.write(k) for k in self.templates.iterkeys()]
|
||||
|
||||
def set_release(self, openstack_release):
|
||||
"""
|
||||
Resets the template environment and generates a new template loader
|
||||
based on a the new openstack release.
|
||||
"""
|
||||
self._tmpl_env = None
|
||||
self.openstack_release = openstack_release
|
||||
self._get_tmpl_env()
|
||||
|
||||
def complete_contexts(self):
|
||||
'''
|
||||
Returns a list of context interfaces that yield a complete context.
|
||||
'''
|
||||
interfaces = []
|
||||
[interfaces.extend(i.complete_contexts())
|
||||
for i in self.templates.itervalues()]
|
||||
return interfaces
|
444
hooks/charmhelpers/contrib/openstack/utils.py
Normal file
444
hooks/charmhelpers/contrib/openstack/utils.py
Normal file
@ -0,0 +1,444 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Common python helper functions used for OpenStack charms.
|
||||
from collections import OrderedDict
|
||||
|
||||
import apt_pkg as apt
|
||||
import subprocess
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
log as juju_log,
|
||||
charm_dir,
|
||||
ERROR,
|
||||
INFO
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.storage.linux.lvm import (
|
||||
deactivate_lvm_volume_group,
|
||||
is_lvm_physical_volume,
|
||||
remove_lvm_physical_volume,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import lsb_release, mounts, umount
|
||||
from charmhelpers.fetch import apt_install
|
||||
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
||||
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
||||
|
||||
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
||||
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
||||
|
||||
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
|
||||
'restricted main multiverse universe')
|
||||
|
||||
|
||||
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||
('oneiric', 'diablo'),
|
||||
('precise', 'essex'),
|
||||
('quantal', 'folsom'),
|
||||
('raring', 'grizzly'),
|
||||
('saucy', 'havana'),
|
||||
('trusty', 'icehouse')
|
||||
])
|
||||
|
||||
|
||||
OPENSTACK_CODENAMES = OrderedDict([
|
||||
('2011.2', 'diablo'),
|
||||
('2012.1', 'essex'),
|
||||
('2012.2', 'folsom'),
|
||||
('2013.1', 'grizzly'),
|
||||
('2013.2', 'havana'),
|
||||
('2014.1', 'icehouse'),
|
||||
])
|
||||
|
||||
# The ugly duckling
|
||||
SWIFT_CODENAMES = OrderedDict([
|
||||
('1.4.3', 'diablo'),
|
||||
('1.4.8', 'essex'),
|
||||
('1.7.4', 'folsom'),
|
||||
('1.8.0', 'grizzly'),
|
||||
('1.7.7', 'grizzly'),
|
||||
('1.7.6', 'grizzly'),
|
||||
('1.10.0', 'havana'),
|
||||
('1.9.1', 'havana'),
|
||||
('1.9.0', 'havana'),
|
||||
])
|
||||
|
||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||
|
||||
|
||||
def error_out(msg):
|
||||
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_os_codename_install_source(src):
|
||||
'''Derive OpenStack release codename from a given installation source.'''
|
||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||
rel = ''
|
||||
if src in ['distro', 'distro-proposed']:
|
||||
try:
|
||||
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
|
||||
except KeyError:
|
||||
e = 'Could not derive openstack release for '\
|
||||
'this Ubuntu release: %s' % ubuntu_rel
|
||||
error_out(e)
|
||||
return rel
|
||||
|
||||
if src.startswith('cloud:'):
|
||||
ca_rel = src.split(':')[1]
|
||||
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
|
||||
return ca_rel
|
||||
|
||||
# Best guess match based on deb string provided
|
||||
if src.startswith('deb') or src.startswith('ppa'):
|
||||
for k, v in OPENSTACK_CODENAMES.iteritems():
|
||||
if v in src:
|
||||
return v
|
||||
|
||||
|
||||
def get_os_version_install_source(src):
|
||||
codename = get_os_codename_install_source(src)
|
||||
return get_os_version_codename(codename)
|
||||
|
||||
|
||||
def get_os_codename_version(vers):
|
||||
'''Determine OpenStack codename from version number.'''
|
||||
try:
|
||||
return OPENSTACK_CODENAMES[vers]
|
||||
except KeyError:
|
||||
e = 'Could not determine OpenStack codename for version %s' % vers
|
||||
error_out(e)
|
||||
|
||||
|
||||
def get_os_version_codename(codename):
|
||||
'''Determine OpenStack version number from codename.'''
|
||||
for k, v in OPENSTACK_CODENAMES.iteritems():
|
||||
if v == codename:
|
||||
return k
|
||||
e = 'Could not derive OpenStack version for '\
|
||||
'codename: %s' % codename
|
||||
error_out(e)
|
||||
|
||||
|
||||
def get_os_codename_package(package, fatal=True):
|
||||
'''Derive OpenStack release codename from an installed package.'''
|
||||
apt.init()
|
||||
cache = apt.Cache()
|
||||
|
||||
try:
|
||||
pkg = cache[package]
|
||||
except:
|
||||
if not fatal:
|
||||
return None
|
||||
# the package is unknown to the current apt cache.
|
||||
e = 'Could not determine version of package with no installation '\
|
||||
'candidate: %s' % package
|
||||
error_out(e)
|
||||
|
||||
if not pkg.current_ver:
|
||||
if not fatal:
|
||||
return None
|
||||
# package is known, but no version is currently installed.
|
||||
e = 'Could not determine version of uninstalled package: %s' % package
|
||||
error_out(e)
|
||||
|
||||
vers = apt.upstream_version(pkg.current_ver.ver_str)
|
||||
|
||||
try:
|
||||
if 'swift' in pkg.name:
|
||||
swift_vers = vers[:5]
|
||||
if swift_vers not in SWIFT_CODENAMES:
|
||||
# Deal with 1.10.0 upward
|
||||
swift_vers = vers[:6]
|
||||
return SWIFT_CODENAMES[swift_vers]
|
||||
else:
|
||||
vers = vers[:6]
|
||||
return OPENSTACK_CODENAMES[vers]
|
||||
except KeyError:
|
||||
e = 'Could not determine OpenStack codename for version %s' % vers
|
||||
error_out(e)
|
||||
|
||||
|
||||
def get_os_version_package(pkg, fatal=True):
|
||||
'''Derive OpenStack version number from an installed package.'''
|
||||
codename = get_os_codename_package(pkg, fatal=fatal)
|
||||
|
||||
if not codename:
|
||||
return None
|
||||
|
||||
if 'swift' in pkg:
|
||||
vers_map = SWIFT_CODENAMES
|
||||
else:
|
||||
vers_map = OPENSTACK_CODENAMES
|
||||
|
||||
for version, cname in vers_map.iteritems():
|
||||
if cname == codename:
|
||||
return version
|
||||
#e = "Could not determine OpenStack version for package: %s" % pkg
|
||||
#error_out(e)
|
||||
|
||||
|
||||
os_rel = None
|
||||
|
||||
|
||||
def os_release(package, base='essex'):
|
||||
'''
|
||||
Returns OpenStack release codename from a cached global.
|
||||
If the codename can not be determined from either an installed package or
|
||||
the installation source, the earliest release supported by the charm should
|
||||
be returned.
|
||||
'''
|
||||
global os_rel
|
||||
if os_rel:
|
||||
return os_rel
|
||||
os_rel = (get_os_codename_package(package, fatal=False) or
|
||||
get_os_codename_install_source(config('openstack-origin')) or
|
||||
base)
|
||||
return os_rel
|
||||
|
||||
|
||||
def import_key(keyid):
|
||||
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
|
||||
"--recv-keys %s" % keyid
|
||||
try:
|
||||
subprocess.check_call(cmd.split(' '))
|
||||
except subprocess.CalledProcessError:
|
||||
error_out("Error importing repo key %s" % keyid)
|
||||
|
||||
|
||||
def configure_installation_source(rel):
|
||||
'''Configure apt installation source.'''
|
||||
if rel == 'distro':
|
||||
return
|
||||
elif rel == 'distro-proposed':
|
||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
||||
f.write(DISTRO_PROPOSED % ubuntu_rel)
|
||||
elif rel[:4] == "ppa:":
|
||||
src = rel
|
||||
subprocess.check_call(["add-apt-repository", "-y", src])
|
||||
elif rel[:3] == "deb":
|
||||
l = len(rel.split('|'))
|
||||
if l == 2:
|
||||
src, key = rel.split('|')
|
||||
juju_log("Importing PPA key from keyserver for %s" % src)
|
||||
import_key(key)
|
||||
elif l == 1:
|
||||
src = rel
|
||||
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
||||
f.write(src)
|
||||
elif rel[:6] == 'cloud:':
|
||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||
rel = rel.split(':')[1]
|
||||
u_rel = rel.split('-')[0]
|
||||
ca_rel = rel.split('-')[1]
|
||||
|
||||
if u_rel != ubuntu_rel:
|
||||
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
|
||||
'version (%s)' % (ca_rel, ubuntu_rel)
|
||||
error_out(e)
|
||||
|
||||
if 'staging' in ca_rel:
|
||||
# staging is just a regular PPA.
|
||||
os_rel = ca_rel.split('/')[0]
|
||||
ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
|
||||
cmd = 'add-apt-repository -y %s' % ppa
|
||||
subprocess.check_call(cmd.split(' '))
|
||||
return
|
||||
|
||||
# map charm config options to actual archive pockets.
|
||||
pockets = {
|
||||
'folsom': 'precise-updates/folsom',
|
||||
'folsom/updates': 'precise-updates/folsom',
|
||||
'folsom/proposed': 'precise-proposed/folsom',
|
||||
'grizzly': 'precise-updates/grizzly',
|
||||
'grizzly/updates': 'precise-updates/grizzly',
|
||||
'grizzly/proposed': 'precise-proposed/grizzly',
|
||||
'havana': 'precise-updates/havana',
|
||||
'havana/updates': 'precise-updates/havana',
|
||||
'havana/proposed': 'precise-proposed/havana',
|
||||
'icehouse': 'precise-updates/icehouse',
|
||||
'icehouse/updates': 'precise-updates/icehouse',
|
||||
'icehouse/proposed': 'precise-proposed/icehouse',
|
||||
}
|
||||
|
||||
try:
|
||||
pocket = pockets[ca_rel]
|
||||
except KeyError:
|
||||
e = 'Invalid Cloud Archive release specified: %s' % rel
|
||||
error_out(e)
|
||||
|
||||
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
|
||||
apt_install('ubuntu-cloud-keyring', fatal=True)
|
||||
|
||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
|
||||
f.write(src)
|
||||
else:
|
||||
error_out("Invalid openstack-release specified: %s" % rel)
|
||||
|
||||
|
||||
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
|
||||
"""
|
||||
Write an rc file in the charm-delivered directory containing
|
||||
exported environment variables provided by env_vars. Any charm scripts run
|
||||
outside the juju hook environment can source this scriptrc to obtain
|
||||
updated config information necessary to perform health checks or
|
||||
service changes.
|
||||
"""
|
||||
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
|
||||
if not os.path.exists(os.path.dirname(juju_rc_path)):
|
||||
os.mkdir(os.path.dirname(juju_rc_path))
|
||||
with open(juju_rc_path, 'wb') as rc_script:
|
||||
rc_script.write(
|
||||
"#!/bin/bash\n")
|
||||
[rc_script.write('export %s=%s\n' % (u, p))
|
||||
for u, p in env_vars.iteritems() if u != "script_path"]
|
||||
|
||||
|
||||
def openstack_upgrade_available(package):
|
||||
"""
|
||||
Determines if an OpenStack upgrade is available from installation
|
||||
source, based on version of installed package.
|
||||
|
||||
:param package: str: Name of installed package.
|
||||
|
||||
:returns: bool: : Returns True if configured installation source offers
|
||||
a newer version of package.
|
||||
|
||||
"""
|
||||
|
||||
src = config('openstack-origin')
|
||||
cur_vers = get_os_version_package(package)
|
||||
available_vers = get_os_version_install_source(src)
|
||||
apt.init()
|
||||
return apt.version_compare(available_vers, cur_vers) == 1
|
||||
|
||||
|
||||
def ensure_block_device(block_device):
|
||||
'''
|
||||
Confirm block_device, create as loopback if necessary.
|
||||
|
||||
:param block_device: str: Full path of block device to ensure.
|
||||
|
||||
:returns: str: Full path of ensured block device.
|
||||
'''
|
||||
_none = ['None', 'none', None]
|
||||
if (block_device in _none):
|
||||
error_out('prepare_storage(): Missing required input: '
|
||||
'block_device=%s.' % block_device, level=ERROR)
|
||||
|
||||
if block_device.startswith('/dev/'):
|
||||
bdev = block_device
|
||||
elif block_device.startswith('/'):
|
||||
_bd = block_device.split('|')
|
||||
if len(_bd) == 2:
|
||||
bdev, size = _bd
|
||||
else:
|
||||
bdev = block_device
|
||||
size = DEFAULT_LOOPBACK_SIZE
|
||||
bdev = ensure_loopback_device(bdev, size)
|
||||
else:
|
||||
bdev = '/dev/%s' % block_device
|
||||
|
||||
if not is_block_device(bdev):
|
||||
error_out('Failed to locate valid block device at %s' % bdev,
|
||||
level=ERROR)
|
||||
|
||||
return bdev
|
||||
|
||||
|
||||
def clean_storage(block_device):
|
||||
'''
|
||||
Ensures a block device is clean. That is:
|
||||
- unmounted
|
||||
- any lvm volume groups are deactivated
|
||||
- any lvm physical device signatures removed
|
||||
- partition table wiped
|
||||
|
||||
:param block_device: str: Full path to block device to clean.
|
||||
'''
|
||||
for mp, d in mounts():
|
||||
if d == block_device:
|
||||
juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
|
||||
(d, mp), level=INFO)
|
||||
umount(mp, persist=True)
|
||||
|
||||
if is_lvm_physical_volume(block_device):
|
||||
deactivate_lvm_volume_group(block_device)
|
||||
remove_lvm_physical_volume(block_device)
|
||||
else:
|
||||
zap_disk(block_device)
|
||||
|
||||
|
||||
def is_ip(address):
|
||||
"""
|
||||
Returns True if address is a valid IP address.
|
||||
"""
|
||||
try:
|
||||
# Test to see if already an IPv4 address
|
||||
socket.inet_aton(address)
|
||||
return True
|
||||
except socket.error:
|
||||
return False
|
||||
|
||||
|
||||
def ns_query(address):
|
||||
try:
|
||||
import dns.resolver
|
||||
except ImportError:
|
||||
apt_install('python-dnspython')
|
||||
import dns.resolver
|
||||
|
||||
if isinstance(address, dns.name.Name):
|
||||
rtype = 'PTR'
|
||||
elif isinstance(address, basestring):
|
||||
rtype = 'A'
|
||||
|
||||
answers = dns.resolver.query(address, rtype)
|
||||
if answers:
|
||||
return str(answers[0])
|
||||
return None
|
||||
|
||||
|
||||
def get_host_ip(hostname):
|
||||
"""
|
||||
Resolves the IP for a given hostname, or returns
|
||||
the input if it is already an IP.
|
||||
"""
|
||||
if is_ip(hostname):
|
||||
return hostname
|
||||
|
||||
return ns_query(hostname)
|
||||
|
||||
|
||||
def get_hostname(address, fqdn=True):
|
||||
"""
|
||||
Resolves hostname for given IP, or returns the input
|
||||
if it is already a hostname.
|
||||
"""
|
||||
if not is_ip(address):
|
||||
return address
|
||||
|
||||
try:
|
||||
import dns.reversename
|
||||
except ImportError:
|
||||
apt_install('python-dnspython')
|
||||
import dns.reversename
|
||||
|
||||
rev = dns.reversename.from_address(address)
|
||||
result = ns_query(rev)
|
||||
if not result:
|
||||
return None
|
||||
|
||||
if fqdn:
|
||||
# strip trailing .
|
||||
if result.endswith('.'):
|
||||
return result[:-1]
|
||||
else:
|
||||
return result
|
||||
else:
|
||||
return result.split('.')[0]
|
0
hooks/charmhelpers/contrib/storage/__init__.py
Normal file
0
hooks/charmhelpers/contrib/storage/__init__.py
Normal file
383
hooks/charmhelpers/contrib/storage/linux/ceph.py
Normal file
383
hooks/charmhelpers/contrib/storage/linux/ceph.py
Normal file
@ -0,0 +1,383 @@
|
||||
#
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# This file is sourced from lp:openstack-charm-helpers
|
||||
#
|
||||
# Authors:
|
||||
# James Page <james.page@ubuntu.com>
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import json
|
||||
import time
|
||||
|
||||
from subprocess import (
|
||||
check_call,
|
||||
check_output,
|
||||
CalledProcessError
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
relation_get,
|
||||
relation_ids,
|
||||
related_units,
|
||||
log,
|
||||
INFO,
|
||||
WARNING,
|
||||
ERROR
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
mount,
|
||||
mounts,
|
||||
service_start,
|
||||
service_stop,
|
||||
service_running,
|
||||
umount,
|
||||
)
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
)
|
||||
|
||||
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
|
||||
KEYFILE = '/etc/ceph/ceph.client.{}.key'
|
||||
|
||||
CEPH_CONF = """[global]
|
||||
auth supported = {auth}
|
||||
keyring = {keyring}
|
||||
mon host = {mon_hosts}
|
||||
"""
|
||||
|
||||
|
||||
def install():
|
||||
''' Basic Ceph client installation '''
|
||||
ceph_dir = "/etc/ceph"
|
||||
if not os.path.exists(ceph_dir):
|
||||
os.mkdir(ceph_dir)
|
||||
apt_install('ceph-common', fatal=True)
|
||||
|
||||
|
||||
def rbd_exists(service, pool, rbd_img):
|
||||
''' Check to see if a RADOS block device exists '''
|
||||
try:
|
||||
out = check_output(['rbd', 'list', '--id', service,
|
||||
'--pool', pool])
|
||||
except CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
return rbd_img in out
|
||||
|
||||
|
||||
def create_rbd_image(service, pool, image, sizemb):
|
||||
''' Create a new RADOS block device '''
|
||||
cmd = [
|
||||
'rbd',
|
||||
'create',
|
||||
image,
|
||||
'--size',
|
||||
str(sizemb),
|
||||
'--id',
|
||||
service,
|
||||
'--pool',
|
||||
pool
|
||||
]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def pool_exists(service, name):
|
||||
''' Check to see if a RADOS pool already exists '''
|
||||
try:
|
||||
out = check_output(['rados', '--id', service, 'lspools'])
|
||||
except CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
return name in out
|
||||
|
||||
|
||||
def get_osds(service):
|
||||
'''
|
||||
Return a list of all Ceph Object Storage Daemons
|
||||
currently in the cluster
|
||||
'''
|
||||
version = ceph_version()
|
||||
if version and version >= '0.56':
|
||||
return json.loads(check_output(['ceph', '--id', service,
|
||||
'osd', 'ls', '--format=json']))
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def create_pool(service, name, replicas=2):
|
||||
''' Create a new RADOS pool '''
|
||||
if pool_exists(service, name):
|
||||
log("Ceph pool {} already exists, skipping creation".format(name),
|
||||
level=WARNING)
|
||||
return
|
||||
# Calculate the number of placement groups based
|
||||
# on upstream recommended best practices.
|
||||
osds = get_osds(service)
|
||||
if osds:
|
||||
pgnum = (len(osds) * 100 / replicas)
|
||||
else:
|
||||
# NOTE(james-page): Default to 200 for older ceph versions
|
||||
# which don't support OSD query from cli
|
||||
pgnum = 200
|
||||
cmd = [
|
||||
'ceph', '--id', service,
|
||||
'osd', 'pool', 'create',
|
||||
name, str(pgnum)
|
||||
]
|
||||
check_call(cmd)
|
||||
cmd = [
|
||||
'ceph', '--id', service,
|
||||
'osd', 'pool', 'set', name,
|
||||
'size', str(replicas)
|
||||
]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def delete_pool(service, name):
|
||||
''' Delete a RADOS pool from ceph '''
|
||||
cmd = [
|
||||
'ceph', '--id', service,
|
||||
'osd', 'pool', 'delete',
|
||||
name, '--yes-i-really-really-mean-it'
|
||||
]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def _keyfile_path(service):
|
||||
return KEYFILE.format(service)
|
||||
|
||||
|
||||
def _keyring_path(service):
|
||||
return KEYRING.format(service)
|
||||
|
||||
|
||||
def create_keyring(service, key):
|
||||
''' Create a new Ceph keyring containing key'''
|
||||
keyring = _keyring_path(service)
|
||||
if os.path.exists(keyring):
|
||||
log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
|
||||
return
|
||||
cmd = [
|
||||
'ceph-authtool',
|
||||
keyring,
|
||||
'--create-keyring',
|
||||
'--name=client.{}'.format(service),
|
||||
'--add-key={}'.format(key)
|
||||
]
|
||||
check_call(cmd)
|
||||
log('ceph: Created new ring at %s.' % keyring, level=INFO)
|
||||
|
||||
|
||||
def create_key_file(service, key):
|
||||
''' Create a file containing key '''
|
||||
keyfile = _keyfile_path(service)
|
||||
if os.path.exists(keyfile):
|
||||
log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
|
||||
return
|
||||
with open(keyfile, 'w') as fd:
|
||||
fd.write(key)
|
||||
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
|
||||
|
||||
|
||||
def get_ceph_nodes():
|
||||
''' Query named relation 'ceph' to detemine current nodes '''
|
||||
hosts = []
|
||||
for r_id in relation_ids('ceph'):
|
||||
for unit in related_units(r_id):
|
||||
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
|
||||
return hosts
|
||||
|
||||
|
||||
def configure(service, key, auth):
|
||||
''' Perform basic configuration of Ceph '''
|
||||
create_keyring(service, key)
|
||||
create_key_file(service, key)
|
||||
hosts = get_ceph_nodes()
|
||||
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
|
||||
ceph_conf.write(CEPH_CONF.format(auth=auth,
|
||||
keyring=_keyring_path(service),
|
||||
mon_hosts=",".join(map(str, hosts))))
|
||||
modprobe('rbd')
|
||||
|
||||
|
||||
def image_mapped(name):
|
||||
''' Determine whether a RADOS block device is mapped locally '''
|
||||
try:
|
||||
out = check_output(['rbd', 'showmapped'])
|
||||
except CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
return name in out
|
||||
|
||||
|
||||
def map_block_storage(service, pool, image):
|
||||
''' Map a RADOS block device for local use '''
|
||||
cmd = [
|
||||
'rbd',
|
||||
'map',
|
||||
'{}/{}'.format(pool, image),
|
||||
'--user',
|
||||
service,
|
||||
'--secret',
|
||||
_keyfile_path(service),
|
||||
]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def filesystem_mounted(fs):
|
||||
''' Determine whether a filesytems is already mounted '''
|
||||
return fs in [f for f, m in mounts()]
|
||||
|
||||
|
||||
def make_filesystem(blk_device, fstype='ext4', timeout=10):
|
||||
''' Make a new filesystem on the specified block device '''
|
||||
count = 0
|
||||
e_noent = os.errno.ENOENT
|
||||
while not os.path.exists(blk_device):
|
||||
if count >= timeout:
|
||||
log('ceph: gave up waiting on block device %s' % blk_device,
|
||||
level=ERROR)
|
||||
raise IOError(e_noent, os.strerror(e_noent), blk_device)
|
||||
log('ceph: waiting for block device %s to appear' % blk_device,
|
||||
level=INFO)
|
||||
count += 1
|
||||
time.sleep(1)
|
||||
else:
|
||||
log('ceph: Formatting block device %s as filesystem %s.' %
|
||||
(blk_device, fstype), level=INFO)
|
||||
check_call(['mkfs', '-t', fstype, blk_device])
|
||||
|
||||
|
||||
def place_data_on_block_device(blk_device, data_src_dst):
|
||||
''' Migrate data in data_src_dst to blk_device and then remount '''
|
||||
# mount block device into /mnt
|
||||
mount(blk_device, '/mnt')
|
||||
# copy data to /mnt
|
||||
copy_files(data_src_dst, '/mnt')
|
||||
# umount block device
|
||||
umount('/mnt')
|
||||
# Grab user/group ID's from original source
|
||||
_dir = os.stat(data_src_dst)
|
||||
uid = _dir.st_uid
|
||||
gid = _dir.st_gid
|
||||
# re-mount where the data should originally be
|
||||
# TODO: persist is currently a NO-OP in core.host
|
||||
mount(blk_device, data_src_dst, persist=True)
|
||||
# ensure original ownership of new mount.
|
||||
os.chown(data_src_dst, uid, gid)
|
||||
|
||||
|
||||
# TODO: re-use
|
||||
def modprobe(module):
|
||||
''' Load a kernel module and configure for auto-load on reboot '''
|
||||
log('ceph: Loading kernel module', level=INFO)
|
||||
cmd = ['modprobe', module]
|
||||
check_call(cmd)
|
||||
with open('/etc/modules', 'r+') as modules:
|
||||
if module not in modules.read():
|
||||
modules.write(module)
|
||||
|
||||
|
||||
def copy_files(src, dst, symlinks=False, ignore=None):
|
||||
''' Copy files from src to dst '''
|
||||
for item in os.listdir(src):
|
||||
s = os.path.join(src, item)
|
||||
d = os.path.join(dst, item)
|
||||
if os.path.isdir(s):
|
||||
shutil.copytree(s, d, symlinks, ignore)
|
||||
else:
|
||||
shutil.copy2(s, d)
|
||||
|
||||
|
||||
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
|
||||
blk_device, fstype, system_services=[]):
|
||||
"""
|
||||
NOTE: This function must only be called from a single service unit for
|
||||
the same rbd_img otherwise data loss will occur.
|
||||
|
||||
Ensures given pool and RBD image exists, is mapped to a block device,
|
||||
and the device is formatted and mounted at the given mount_point.
|
||||
|
||||
If formatting a device for the first time, data existing at mount_point
|
||||
will be migrated to the RBD device before being re-mounted.
|
||||
|
||||
All services listed in system_services will be stopped prior to data
|
||||
migration and restarted when complete.
|
||||
"""
|
||||
# Ensure pool, RBD image, RBD mappings are in place.
|
||||
if not pool_exists(service, pool):
|
||||
log('ceph: Creating new pool {}.'.format(pool))
|
||||
create_pool(service, pool)
|
||||
|
||||
if not rbd_exists(service, pool, rbd_img):
|
||||
log('ceph: Creating RBD image ({}).'.format(rbd_img))
|
||||
create_rbd_image(service, pool, rbd_img, sizemb)
|
||||
|
||||
if not image_mapped(rbd_img):
|
||||
log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
|
||||
map_block_storage(service, pool, rbd_img)
|
||||
|
||||
# make file system
|
||||
# TODO: What happens if for whatever reason this is run again and
|
||||
# the data is already in the rbd device and/or is mounted??
|
||||
# When it is mounted already, it will fail to make the fs
|
||||
# XXX: This is really sketchy! Need to at least add an fstab entry
|
||||
# otherwise this hook will blow away existing data if its executed
|
||||
# after a reboot.
|
||||
if not filesystem_mounted(mount_point):
|
||||
make_filesystem(blk_device, fstype)
|
||||
|
||||
for svc in system_services:
|
||||
if service_running(svc):
|
||||
log('ceph: Stopping services {} prior to migrating data.'
|
||||
.format(svc))
|
||||
service_stop(svc)
|
||||
|
||||
place_data_on_block_device(blk_device, mount_point)
|
||||
|
||||
for svc in system_services:
|
||||
log('ceph: Starting service {} after migrating data.'
|
||||
.format(svc))
|
||||
service_start(svc)
|
||||
|
||||
|
||||
def ensure_ceph_keyring(service, user=None, group=None):
|
||||
'''
|
||||
Ensures a ceph keyring is created for a named service
|
||||
and optionally ensures user and group ownership.
|
||||
|
||||
Returns False if no ceph key is available in relation state.
|
||||
'''
|
||||
key = None
|
||||
for rid in relation_ids('ceph'):
|
||||
for unit in related_units(rid):
|
||||
key = relation_get('key', rid=rid, unit=unit)
|
||||
if key:
|
||||
break
|
||||
if not key:
|
||||
return False
|
||||
create_keyring(service=service, key=key)
|
||||
keyring = _keyring_path(service)
|
||||
if user and group:
|
||||
check_call(['chown', '%s.%s' % (user, group), keyring])
|
||||
return True
|
||||
|
||||
|
||||
def ceph_version():
|
||||
''' Retrieve the local version of ceph '''
|
||||
if os.path.exists('/usr/bin/ceph'):
|
||||
cmd = ['ceph', '-v']
|
||||
output = check_output(cmd)
|
||||
output = output.split()
|
||||
if len(output) > 3:
|
||||
return output[2]
|
||||
else:
|
||||
return None
|
||||
else:
|
||||
return None
|
62
hooks/charmhelpers/contrib/storage/linux/loopback.py
Normal file
62
hooks/charmhelpers/contrib/storage/linux/loopback.py
Normal file
@ -0,0 +1,62 @@
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from subprocess import (
|
||||
check_call,
|
||||
check_output,
|
||||
)
|
||||
|
||||
|
||||
##################################################
|
||||
# loopback device helpers.
|
||||
##################################################
|
||||
def loopback_devices():
|
||||
'''
|
||||
Parse through 'losetup -a' output to determine currently mapped
|
||||
loopback devices. Output is expected to look like:
|
||||
|
||||
/dev/loop0: [0807]:961814 (/tmp/my.img)
|
||||
|
||||
:returns: dict: a dict mapping {loopback_dev: backing_file}
|
||||
'''
|
||||
loopbacks = {}
|
||||
cmd = ['losetup', '-a']
|
||||
devs = [d.strip().split(' ') for d in
|
||||
check_output(cmd).splitlines() if d != '']
|
||||
for dev, _, f in devs:
|
||||
loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
|
||||
return loopbacks
|
||||
|
||||
|
||||
def create_loopback(file_path):
|
||||
'''
|
||||
Create a loopback device for a given backing file.
|
||||
|
||||
:returns: str: Full path to new loopback device (eg, /dev/loop0)
|
||||
'''
|
||||
file_path = os.path.abspath(file_path)
|
||||
check_call(['losetup', '--find', file_path])
|
||||
for d, f in loopback_devices().iteritems():
|
||||
if f == file_path:
|
||||
return d
|
||||
|
||||
|
||||
def ensure_loopback_device(path, size):
|
||||
'''
|
||||
Ensure a loopback device exists for a given backing file path and size.
|
||||
If it a loopback device is not mapped to file, a new one will be created.
|
||||
|
||||
TODO: Confirm size of found loopback device.
|
||||
|
||||
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
|
||||
'''
|
||||
for d, f in loopback_devices().iteritems():
|
||||
if f == path:
|
||||
return d
|
||||
|
||||
if not os.path.exists(path):
|
||||
cmd = ['truncate', '--size', size, path]
|
||||
check_call(cmd)
|
||||
|
||||
return create_loopback(path)
|
88
hooks/charmhelpers/contrib/storage/linux/lvm.py
Normal file
88
hooks/charmhelpers/contrib/storage/linux/lvm.py
Normal file
@ -0,0 +1,88 @@
|
||||
from subprocess import (
|
||||
CalledProcessError,
|
||||
check_call,
|
||||
check_output,
|
||||
Popen,
|
||||
PIPE,
|
||||
)
|
||||
|
||||
|
||||
##################################################
|
||||
# LVM helpers.
|
||||
##################################################
|
||||
def deactivate_lvm_volume_group(block_device):
|
||||
'''
|
||||
Deactivate any volume gruop associated with an LVM physical volume.
|
||||
|
||||
:param block_device: str: Full path to LVM physical volume
|
||||
'''
|
||||
vg = list_lvm_volume_group(block_device)
|
||||
if vg:
|
||||
cmd = ['vgchange', '-an', vg]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def is_lvm_physical_volume(block_device):
|
||||
'''
|
||||
Determine whether a block device is initialized as an LVM PV.
|
||||
|
||||
:param block_device: str: Full path of block device to inspect.
|
||||
|
||||
:returns: boolean: True if block device is a PV, False if not.
|
||||
'''
|
||||
try:
|
||||
check_output(['pvdisplay', block_device])
|
||||
return True
|
||||
except CalledProcessError:
|
||||
return False
|
||||
|
||||
|
||||
def remove_lvm_physical_volume(block_device):
|
||||
'''
|
||||
Remove LVM PV signatures from a given block device.
|
||||
|
||||
:param block_device: str: Full path of block device to scrub.
|
||||
'''
|
||||
p = Popen(['pvremove', '-ff', block_device],
|
||||
stdin=PIPE)
|
||||
p.communicate(input='y\n')
|
||||
|
||||
|
||||
def list_lvm_volume_group(block_device):
|
||||
'''
|
||||
List LVM volume group associated with a given block device.
|
||||
|
||||
Assumes block device is a valid LVM PV.
|
||||
|
||||
:param block_device: str: Full path of block device to inspect.
|
||||
|
||||
:returns: str: Name of volume group associated with block device or None
|
||||
'''
|
||||
vg = None
|
||||
pvd = check_output(['pvdisplay', block_device]).splitlines()
|
||||
for l in pvd:
|
||||
if l.strip().startswith('VG Name'):
|
||||
vg = ' '.join(l.split()).split(' ').pop()
|
||||
return vg
|
||||
|
||||
|
||||
def create_lvm_physical_volume(block_device):
|
||||
'''
|
||||
Initialize a block device as an LVM physical volume.
|
||||
|
||||
:param block_device: str: Full path of block device to initialize.
|
||||
|
||||
'''
|
||||
check_call(['pvcreate', block_device])
|
||||
|
||||
|
||||
def create_lvm_volume_group(volume_group, block_device):
|
||||
'''
|
||||
Create an LVM volume group backed by a given block device.
|
||||
|
||||
Assumes block device has already been initialized as an LVM PV.
|
||||
|
||||
:param volume_group: str: Name of volume group to create.
|
||||
:block_device: str: Full path of PV-initialized block device.
|
||||
'''
|
||||
check_call(['vgcreate', volume_group, block_device])
|
25
hooks/charmhelpers/contrib/storage/linux/utils.py
Normal file
25
hooks/charmhelpers/contrib/storage/linux/utils.py
Normal file
@ -0,0 +1,25 @@
|
||||
from os import stat
|
||||
from stat import S_ISBLK
|
||||
|
||||
from subprocess import (
|
||||
check_call
|
||||
)
|
||||
|
||||
|
||||
def is_block_device(path):
|
||||
'''
|
||||
Confirm device at path is a valid block device node.
|
||||
|
||||
:returns: boolean: True if path is a block device, False if not.
|
||||
'''
|
||||
return S_ISBLK(stat(path).st_mode)
|
||||
|
||||
|
||||
def zap_disk(block_device):
|
||||
'''
|
||||
Clear a block device of partition table. Relies on sgdisk, which is
|
||||
installed as pat of the 'gdisk' package in Ubuntu.
|
||||
|
||||
:param block_device: str: Full path of block device to clean.
|
||||
'''
|
||||
check_call(['sgdisk', '--zap-all', '--mbrtogpt', block_device])
|
0
hooks/charmhelpers/core/__init__.py
Normal file
0
hooks/charmhelpers/core/__init__.py
Normal file
395
hooks/charmhelpers/core/hookenv.py
Normal file
395
hooks/charmhelpers/core/hookenv.py
Normal file
@ -0,0 +1,395 @@
|
||||
"Interactions with the Juju environment"
|
||||
# Copyright 2013 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
||||
|
||||
import os
|
||||
import json
|
||||
import yaml
|
||||
import subprocess
|
||||
import UserDict
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
CRITICAL = "CRITICAL"
|
||||
ERROR = "ERROR"
|
||||
WARNING = "WARNING"
|
||||
INFO = "INFO"
|
||||
DEBUG = "DEBUG"
|
||||
MARKER = object()
|
||||
|
||||
cache = {}
|
||||
|
||||
|
||||
def cached(func):
|
||||
"""Cache return values for multiple executions of func + args
|
||||
|
||||
For example:
|
||||
|
||||
@cached
|
||||
def unit_get(attribute):
|
||||
pass
|
||||
|
||||
unit_get('test')
|
||||
|
||||
will cache the result of unit_get + 'test' for future calls.
|
||||
"""
|
||||
def wrapper(*args, **kwargs):
|
||||
global cache
|
||||
key = str((func, args, kwargs))
|
||||
try:
|
||||
return cache[key]
|
||||
except KeyError:
|
||||
res = func(*args, **kwargs)
|
||||
cache[key] = res
|
||||
return res
|
||||
return wrapper
|
||||
|
||||
|
||||
def flush(key):
|
||||
"""Flushes any entries from function cache where the
|
||||
key is found in the function+args """
|
||||
flush_list = []
|
||||
for item in cache:
|
||||
if key in item:
|
||||
flush_list.append(item)
|
||||
for item in flush_list:
|
||||
del cache[item]
|
||||
|
||||
|
||||
def log(message, level=None):
|
||||
"""Write a message to the juju log"""
|
||||
command = ['juju-log']
|
||||
if level:
|
||||
command += ['-l', level]
|
||||
command += [message]
|
||||
subprocess.call(command)
|
||||
|
||||
|
||||
class Serializable(UserDict.IterableUserDict):
|
||||
"""Wrapper, an object that can be serialized to yaml or json"""
|
||||
|
||||
def __init__(self, obj):
|
||||
# wrap the object
|
||||
UserDict.IterableUserDict.__init__(self)
|
||||
self.data = obj
|
||||
|
||||
def __getattr__(self, attr):
|
||||
# See if this object has attribute.
|
||||
if attr in ("json", "yaml", "data"):
|
||||
return self.__dict__[attr]
|
||||
# Check for attribute in wrapped object.
|
||||
got = getattr(self.data, attr, MARKER)
|
||||
if got is not MARKER:
|
||||
return got
|
||||
# Proxy to the wrapped object via dict interface.
|
||||
try:
|
||||
return self.data[attr]
|
||||
except KeyError:
|
||||
raise AttributeError(attr)
|
||||
|
||||
def __getstate__(self):
|
||||
# Pickle as a standard dictionary.
|
||||
return self.data
|
||||
|
||||
def __setstate__(self, state):
|
||||
# Unpickle into our wrapper.
|
||||
self.data = state
|
||||
|
||||
def json(self):
|
||||
"""Serialize the object to json"""
|
||||
return json.dumps(self.data)
|
||||
|
||||
def yaml(self):
|
||||
"""Serialize the object to yaml"""
|
||||
return yaml.dump(self.data)
|
||||
|
||||
|
||||
def execution_environment():
|
||||
"""A convenient bundling of the current execution context"""
|
||||
context = {}
|
||||
context['conf'] = config()
|
||||
if relation_id():
|
||||
context['reltype'] = relation_type()
|
||||
context['relid'] = relation_id()
|
||||
context['rel'] = relation_get()
|
||||
context['unit'] = local_unit()
|
||||
context['rels'] = relations()
|
||||
context['env'] = os.environ
|
||||
return context
|
||||
|
||||
|
||||
def in_relation_hook():
|
||||
"""Determine whether we're running in a relation hook"""
|
||||
return 'JUJU_RELATION' in os.environ
|
||||
|
||||
|
||||
def relation_type():
|
||||
"""The scope for the current relation hook"""
|
||||
return os.environ.get('JUJU_RELATION', None)
|
||||
|
||||
|
||||
def relation_id():
|
||||
"""The relation ID for the current relation hook"""
|
||||
return os.environ.get('JUJU_RELATION_ID', None)
|
||||
|
||||
|
||||
def local_unit():
|
||||
"""Local unit ID"""
|
||||
return os.environ['JUJU_UNIT_NAME']
|
||||
|
||||
|
||||
def remote_unit():
|
||||
"""The remote unit for the current relation hook"""
|
||||
return os.environ['JUJU_REMOTE_UNIT']
|
||||
|
||||
|
||||
def service_name():
|
||||
"""The name service group this unit belongs to"""
|
||||
return local_unit().split('/')[0]
|
||||
|
||||
|
||||
@cached
|
||||
def config(scope=None):
|
||||
"""Juju charm configuration"""
|
||||
config_cmd_line = ['config-get']
|
||||
if scope is not None:
|
||||
config_cmd_line.append(scope)
|
||||
config_cmd_line.append('--format=json')
|
||||
try:
|
||||
return json.loads(subprocess.check_output(config_cmd_line))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
@cached
|
||||
def relation_get(attribute=None, unit=None, rid=None):
|
||||
"""Get relation information"""
|
||||
_args = ['relation-get', '--format=json']
|
||||
if rid:
|
||||
_args.append('-r')
|
||||
_args.append(rid)
|
||||
_args.append(attribute or '-')
|
||||
if unit:
|
||||
_args.append(unit)
|
||||
try:
|
||||
return json.loads(subprocess.check_output(_args))
|
||||
except ValueError:
|
||||
return None
|
||||
except CalledProcessError, e:
|
||||
if e.returncode == 2:
|
||||
return None
|
||||
raise
|
||||
|
||||
|
||||
def relation_set(relation_id=None, relation_settings={}, **kwargs):
|
||||
"""Set relation information for the current unit"""
|
||||
relation_cmd_line = ['relation-set']
|
||||
if relation_id is not None:
|
||||
relation_cmd_line.extend(('-r', relation_id))
|
||||
for k, v in (relation_settings.items() + kwargs.items()):
|
||||
if v is None:
|
||||
relation_cmd_line.append('{}='.format(k))
|
||||
else:
|
||||
relation_cmd_line.append('{}={}'.format(k, v))
|
||||
subprocess.check_call(relation_cmd_line)
|
||||
# Flush cache of any relation-gets for local unit
|
||||
flush(local_unit())
|
||||
|
||||
|
||||
@cached
|
||||
def relation_ids(reltype=None):
|
||||
"""A list of relation_ids"""
|
||||
reltype = reltype or relation_type()
|
||||
relid_cmd_line = ['relation-ids', '--format=json']
|
||||
if reltype is not None:
|
||||
relid_cmd_line.append(reltype)
|
||||
return json.loads(subprocess.check_output(relid_cmd_line)) or []
|
||||
return []
|
||||
|
||||
|
||||
@cached
|
||||
def related_units(relid=None):
|
||||
"""A list of related units"""
|
||||
relid = relid or relation_id()
|
||||
units_cmd_line = ['relation-list', '--format=json']
|
||||
if relid is not None:
|
||||
units_cmd_line.extend(('-r', relid))
|
||||
return json.loads(subprocess.check_output(units_cmd_line)) or []
|
||||
|
||||
|
||||
@cached
|
||||
def relation_for_unit(unit=None, rid=None):
|
||||
"""Get the json represenation of a unit's relation"""
|
||||
unit = unit or remote_unit()
|
||||
relation = relation_get(unit=unit, rid=rid)
|
||||
for key in relation:
|
||||
if key.endswith('-list'):
|
||||
relation[key] = relation[key].split()
|
||||
relation['__unit__'] = unit
|
||||
return relation
|
||||
|
||||
|
||||
@cached
|
||||
def relations_for_id(relid=None):
|
||||
"""Get relations of a specific relation ID"""
|
||||
relation_data = []
|
||||
relid = relid or relation_ids()
|
||||
for unit in related_units(relid):
|
||||
unit_data = relation_for_unit(unit, relid)
|
||||
unit_data['__relid__'] = relid
|
||||
relation_data.append(unit_data)
|
||||
return relation_data
|
||||
|
||||
|
||||
@cached
|
||||
def relations_of_type(reltype=None):
|
||||
"""Get relations of a specific type"""
|
||||
relation_data = []
|
||||
reltype = reltype or relation_type()
|
||||
for relid in relation_ids(reltype):
|
||||
for relation in relations_for_id(relid):
|
||||
relation['__relid__'] = relid
|
||||
relation_data.append(relation)
|
||||
return relation_data
|
||||
|
||||
|
||||
@cached
|
||||
def relation_types():
|
||||
"""Get a list of relation types supported by this charm"""
|
||||
charmdir = os.environ.get('CHARM_DIR', '')
|
||||
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
|
||||
md = yaml.safe_load(mdf)
|
||||
rel_types = []
|
||||
for key in ('provides', 'requires', 'peers'):
|
||||
section = md.get(key)
|
||||
if section:
|
||||
rel_types.extend(section.keys())
|
||||
mdf.close()
|
||||
return rel_types
|
||||
|
||||
|
||||
@cached
|
||||
def relations():
|
||||
"""Get a nested dictionary of relation data for all related units"""
|
||||
rels = {}
|
||||
for reltype in relation_types():
|
||||
relids = {}
|
||||
for relid in relation_ids(reltype):
|
||||
units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
|
||||
for unit in related_units(relid):
|
||||
reldata = relation_get(unit=unit, rid=relid)
|
||||
units[unit] = reldata
|
||||
relids[relid] = units
|
||||
rels[reltype] = relids
|
||||
return rels
|
||||
|
||||
|
||||
@cached
|
||||
def is_relation_made(relation, keys='private-address'):
|
||||
'''
|
||||
Determine whether a relation is established by checking for
|
||||
presence of key(s). If a list of keys is provided, they
|
||||
must all be present for the relation to be identified as made
|
||||
'''
|
||||
if isinstance(keys, str):
|
||||
keys = [keys]
|
||||
for r_id in relation_ids(relation):
|
||||
for unit in related_units(r_id):
|
||||
context = {}
|
||||
for k in keys:
|
||||
context[k] = relation_get(k, rid=r_id,
|
||||
unit=unit)
|
||||
if None not in context.values():
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def open_port(port, protocol="TCP"):
|
||||
"""Open a service network port"""
|
||||
_args = ['open-port']
|
||||
_args.append('{}/{}'.format(port, protocol))
|
||||
subprocess.check_call(_args)
|
||||
|
||||
|
||||
def close_port(port, protocol="TCP"):
|
||||
"""Close a service network port"""
|
||||
_args = ['close-port']
|
||||
_args.append('{}/{}'.format(port, protocol))
|
||||
subprocess.check_call(_args)
|
||||
|
||||
|
||||
@cached
|
||||
def unit_get(attribute):
|
||||
"""Get the unit ID for the remote unit"""
|
||||
_args = ['unit-get', '--format=json', attribute]
|
||||
try:
|
||||
return json.loads(subprocess.check_output(_args))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def unit_private_ip():
|
||||
"""Get this unit's private IP address"""
|
||||
return unit_get('private-address')
|
||||
|
||||
|
||||
class UnregisteredHookError(Exception):
|
||||
"""Raised when an undefined hook is called"""
|
||||
pass
|
||||
|
||||
|
||||
class Hooks(object):
|
||||
"""A convenient handler for hook functions.
|
||||
|
||||
Example:
|
||||
hooks = Hooks()
|
||||
|
||||
# register a hook, taking its name from the function name
|
||||
@hooks.hook()
|
||||
def install():
|
||||
...
|
||||
|
||||
# register a hook, providing a custom hook name
|
||||
@hooks.hook("config-changed")
|
||||
def config_changed():
|
||||
...
|
||||
|
||||
if __name__ == "__main__":
|
||||
# execute a hook based on the name the program is called by
|
||||
hooks.execute(sys.argv)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(Hooks, self).__init__()
|
||||
self._hooks = {}
|
||||
|
||||
def register(self, name, function):
|
||||
"""Register a hook"""
|
||||
self._hooks[name] = function
|
||||
|
||||
def execute(self, args):
|
||||
"""Execute a registered hook based on args[0]"""
|
||||
hook_name = os.path.basename(args[0])
|
||||
if hook_name in self._hooks:
|
||||
self._hooks[hook_name]()
|
||||
else:
|
||||
raise UnregisteredHookError(hook_name)
|
||||
|
||||
def hook(self, *hook_names):
|
||||
"""Decorator, registering them as hooks"""
|
||||
def wrapper(decorated):
|
||||
for hook_name in hook_names:
|
||||
self.register(hook_name, decorated)
|
||||
else:
|
||||
self.register(decorated.__name__, decorated)
|
||||
if '_' in decorated.__name__:
|
||||
self.register(
|
||||
decorated.__name__.replace('_', '-'), decorated)
|
||||
return decorated
|
||||
return wrapper
|
||||
|
||||
|
||||
def charm_dir():
|
||||
"""Return the root directory of the current charm"""
|
||||
return os.environ.get('CHARM_DIR')
|
291
hooks/charmhelpers/core/host.py
Normal file
291
hooks/charmhelpers/core/host.py
Normal file
@ -0,0 +1,291 @@
|
||||
"""Tools for working with the host system"""
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# Nick Moffitt <nick.moffitt@canonical.com>
|
||||
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
|
||||
|
||||
import os
|
||||
import pwd
|
||||
import grp
|
||||
import random
|
||||
import string
|
||||
import subprocess
|
||||
import hashlib
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
from hookenv import log
|
||||
|
||||
|
||||
def service_start(service_name):
|
||||
"""Start a system service"""
|
||||
return service('start', service_name)
|
||||
|
||||
|
||||
def service_stop(service_name):
|
||||
"""Stop a system service"""
|
||||
return service('stop', service_name)
|
||||
|
||||
|
||||
def service_restart(service_name):
|
||||
"""Restart a system service"""
|
||||
return service('restart', service_name)
|
||||
|
||||
|
||||
def service_reload(service_name, restart_on_failure=False):
|
||||
"""Reload a system service, optionally falling back to restart if reload fails"""
|
||||
service_result = service('reload', service_name)
|
||||
if not service_result and restart_on_failure:
|
||||
service_result = service('restart', service_name)
|
||||
return service_result
|
||||
|
||||
|
||||
def service(action, service_name):
|
||||
"""Control a system service"""
|
||||
cmd = ['service', service_name, action]
|
||||
return subprocess.call(cmd) == 0
|
||||
|
||||
|
||||
def service_running(service):
|
||||
"""Determine whether a system service is running"""
|
||||
try:
|
||||
output = subprocess.check_output(['service', service, 'status'])
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
if ("start/running" in output or "is running" in output):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
||||
"""Add a user to the system"""
|
||||
try:
|
||||
user_info = pwd.getpwnam(username)
|
||||
log('user {0} already exists!'.format(username))
|
||||
except KeyError:
|
||||
log('creating user {0}'.format(username))
|
||||
cmd = ['useradd']
|
||||
if system_user or password is None:
|
||||
cmd.append('--system')
|
||||
else:
|
||||
cmd.extend([
|
||||
'--create-home',
|
||||
'--shell', shell,
|
||||
'--password', password,
|
||||
])
|
||||
cmd.append(username)
|
||||
subprocess.check_call(cmd)
|
||||
user_info = pwd.getpwnam(username)
|
||||
return user_info
|
||||
|
||||
|
||||
def add_user_to_group(username, group):
|
||||
"""Add a user to a group"""
|
||||
cmd = [
|
||||
'gpasswd', '-a',
|
||||
username,
|
||||
group
|
||||
]
|
||||
log("Adding user {} to group {}".format(username, group))
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def rsync(from_path, to_path, flags='-r', options=None):
|
||||
"""Replicate the contents of a path"""
|
||||
options = options or ['--delete', '--executability']
|
||||
cmd = ['/usr/bin/rsync', flags]
|
||||
cmd.extend(options)
|
||||
cmd.append(from_path)
|
||||
cmd.append(to_path)
|
||||
log(" ".join(cmd))
|
||||
return subprocess.check_output(cmd).strip()
|
||||
|
||||
|
||||
def symlink(source, destination):
|
||||
"""Create a symbolic link"""
|
||||
log("Symlinking {} as {}".format(source, destination))
|
||||
cmd = [
|
||||
'ln',
|
||||
'-sf',
|
||||
source,
|
||||
destination,
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def mkdir(path, owner='root', group='root', perms=0555, force=False):
|
||||
"""Create a directory"""
|
||||
log("Making dir {} {}:{} {:o}".format(path, owner, group,
|
||||
perms))
|
||||
uid = pwd.getpwnam(owner).pw_uid
|
||||
gid = grp.getgrnam(group).gr_gid
|
||||
realpath = os.path.abspath(path)
|
||||
if os.path.exists(realpath):
|
||||
if force and not os.path.isdir(realpath):
|
||||
log("Removing non-directory file {} prior to mkdir()".format(path))
|
||||
os.unlink(realpath)
|
||||
else:
|
||||
os.makedirs(realpath, perms)
|
||||
os.chown(realpath, uid, gid)
|
||||
|
||||
|
||||
def write_file(path, content, owner='root', group='root', perms=0444):
|
||||
"""Create or overwrite a file with the contents of a string"""
|
||||
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
|
||||
uid = pwd.getpwnam(owner).pw_uid
|
||||
gid = grp.getgrnam(group).gr_gid
|
||||
with open(path, 'w') as target:
|
||||
os.fchown(target.fileno(), uid, gid)
|
||||
os.fchmod(target.fileno(), perms)
|
||||
target.write(content)
|
||||
|
||||
|
||||
def mount(device, mountpoint, options=None, persist=False):
|
||||
"""Mount a filesystem at a particular mountpoint"""
|
||||
cmd_args = ['mount']
|
||||
if options is not None:
|
||||
cmd_args.extend(['-o', options])
|
||||
cmd_args.extend([device, mountpoint])
|
||||
try:
|
||||
subprocess.check_output(cmd_args)
|
||||
except subprocess.CalledProcessError, e:
|
||||
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
|
||||
return False
|
||||
if persist:
|
||||
# TODO: update fstab
|
||||
pass
|
||||
return True
|
||||
|
||||
|
||||
def umount(mountpoint, persist=False):
|
||||
"""Unmount a filesystem"""
|
||||
cmd_args = ['umount', mountpoint]
|
||||
try:
|
||||
subprocess.check_output(cmd_args)
|
||||
except subprocess.CalledProcessError, e:
|
||||
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||
return False
|
||||
if persist:
|
||||
# TODO: update fstab
|
||||
pass
|
||||
return True
|
||||
|
||||
|
||||
def mounts():
|
||||
"""Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
|
||||
with open('/proc/mounts') as f:
|
||||
# [['/mount/point','/dev/path'],[...]]
|
||||
system_mounts = [m[1::-1] for m in [l.strip().split()
|
||||
for l in f.readlines()]]
|
||||
return system_mounts
|
||||
|
||||
|
||||
def file_hash(path):
|
||||
"""Generate a md5 hash of the contents of 'path' or None if not found """
|
||||
if os.path.exists(path):
|
||||
h = hashlib.md5()
|
||||
with open(path, 'r') as source:
|
||||
h.update(source.read()) # IGNORE:E1101 - it does have update
|
||||
return h.hexdigest()
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def restart_on_change(restart_map):
|
||||
"""Restart services based on configuration files changing
|
||||
|
||||
This function is used a decorator, for example
|
||||
|
||||
@restart_on_change({
|
||||
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
|
||||
})
|
||||
def ceph_client_changed():
|
||||
...
|
||||
|
||||
In this example, the cinder-api and cinder-volume services
|
||||
would be restarted if /etc/ceph/ceph.conf is changed by the
|
||||
ceph_client_changed function.
|
||||
"""
|
||||
def wrap(f):
|
||||
def wrapped_f(*args):
|
||||
checksums = {}
|
||||
for path in restart_map:
|
||||
checksums[path] = file_hash(path)
|
||||
f(*args)
|
||||
restarts = []
|
||||
for path in restart_map:
|
||||
if checksums[path] != file_hash(path):
|
||||
restarts += restart_map[path]
|
||||
for service_name in list(OrderedDict.fromkeys(restarts)):
|
||||
service('restart', service_name)
|
||||
return wrapped_f
|
||||
return wrap
|
||||
|
||||
|
||||
def lsb_release():
|
||||
"""Return /etc/lsb-release in a dict"""
|
||||
d = {}
|
||||
with open('/etc/lsb-release', 'r') as lsb:
|
||||
for l in lsb:
|
||||
k, v = l.split('=')
|
||||
d[k.strip()] = v.strip()
|
||||
return d
|
||||
|
||||
|
||||
def pwgen(length=None):
|
||||
"""Generate a random pasword."""
|
||||
if length is None:
|
||||
length = random.choice(range(35, 45))
|
||||
alphanumeric_chars = [
|
||||
l for l in (string.letters + string.digits)
|
||||
if l not in 'l0QD1vAEIOUaeiou']
|
||||
random_chars = [
|
||||
random.choice(alphanumeric_chars) for _ in range(length)]
|
||||
return(''.join(random_chars))
|
||||
|
||||
|
||||
def list_nics(nic_type):
|
||||
'''Return a list of nics of given type(s)'''
|
||||
if isinstance(nic_type, basestring):
|
||||
int_types = [nic_type]
|
||||
else:
|
||||
int_types = nic_type
|
||||
interfaces = []
|
||||
for int_type in int_types:
|
||||
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
||||
ip_output = subprocess.check_output(cmd).split('\n')
|
||||
ip_output = (line for line in ip_output if line)
|
||||
for line in ip_output:
|
||||
if line.split()[1].startswith(int_type):
|
||||
interfaces.append(line.split()[1].replace(":", ""))
|
||||
return interfaces
|
||||
|
||||
|
||||
def set_nic_mtu(nic, mtu):
|
||||
'''Set MTU on a network interface'''
|
||||
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def get_nic_mtu(nic):
|
||||
cmd = ['ip', 'addr', 'show', nic]
|
||||
ip_output = subprocess.check_output(cmd).split('\n')
|
||||
mtu = ""
|
||||
for line in ip_output:
|
||||
words = line.split()
|
||||
if 'mtu' in words:
|
||||
mtu = words[words.index("mtu") + 1]
|
||||
return mtu
|
||||
|
||||
|
||||
def get_nic_hwaddr(nic):
|
||||
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
||||
ip_output = subprocess.check_output(cmd)
|
||||
hwaddr = ""
|
||||
words = ip_output.split()
|
||||
if 'link/ether' in words:
|
||||
hwaddr = words[words.index('link/ether') + 1]
|
||||
return hwaddr
|
279
hooks/charmhelpers/fetch/__init__.py
Normal file
279
hooks/charmhelpers/fetch/__init__.py
Normal file
@ -0,0 +1,279 @@
|
||||
import importlib
|
||||
from yaml import safe_load
|
||||
from charmhelpers.core.host import (
|
||||
lsb_release
|
||||
)
|
||||
from urlparse import (
|
||||
urlparse,
|
||||
urlunparse,
|
||||
)
|
||||
import subprocess
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
log,
|
||||
)
|
||||
import apt_pkg
|
||||
import os
|
||||
|
||||
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
|
||||
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
||||
"""
|
||||
PROPOSED_POCKET = """# Proposed
|
||||
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
|
||||
"""
|
||||
CLOUD_ARCHIVE_POCKETS = {
|
||||
# Folsom
|
||||
'folsom': 'precise-updates/folsom',
|
||||
'precise-folsom': 'precise-updates/folsom',
|
||||
'precise-folsom/updates': 'precise-updates/folsom',
|
||||
'precise-updates/folsom': 'precise-updates/folsom',
|
||||
'folsom/proposed': 'precise-proposed/folsom',
|
||||
'precise-folsom/proposed': 'precise-proposed/folsom',
|
||||
'precise-proposed/folsom': 'precise-proposed/folsom',
|
||||
# Grizzly
|
||||
'grizzly': 'precise-updates/grizzly',
|
||||
'precise-grizzly': 'precise-updates/grizzly',
|
||||
'precise-grizzly/updates': 'precise-updates/grizzly',
|
||||
'precise-updates/grizzly': 'precise-updates/grizzly',
|
||||
'grizzly/proposed': 'precise-proposed/grizzly',
|
||||
'precise-grizzly/proposed': 'precise-proposed/grizzly',
|
||||
'precise-proposed/grizzly': 'precise-proposed/grizzly',
|
||||
# Havana
|
||||
'havana': 'precise-updates/havana',
|
||||
'precise-havana': 'precise-updates/havana',
|
||||
'precise-havana/updates': 'precise-updates/havana',
|
||||
'precise-updates/havana': 'precise-updates/havana',
|
||||
'havana/proposed': 'precise-proposed/havana',
|
||||
'precise-havana/proposed': 'precise-proposed/havana',
|
||||
'precise-proposed/havana': 'precise-proposed/havana',
|
||||
# Icehouse
|
||||
'icehouse': 'precise-updates/icehouse',
|
||||
'precise-icehouse': 'precise-updates/icehouse',
|
||||
'precise-icehouse/updates': 'precise-updates/icehouse',
|
||||
'precise-updates/icehouse': 'precise-updates/icehouse',
|
||||
'icehouse/proposed': 'precise-proposed/icehouse',
|
||||
'precise-icehouse/proposed': 'precise-proposed/icehouse',
|
||||
'precise-proposed/icehouse': 'precise-proposed/icehouse',
|
||||
}
|
||||
|
||||
|
||||
def filter_installed_packages(packages):
|
||||
"""Returns a list of packages that require installation"""
|
||||
apt_pkg.init()
|
||||
cache = apt_pkg.Cache()
|
||||
_pkgs = []
|
||||
for package in packages:
|
||||
try:
|
||||
p = cache[package]
|
||||
p.current_ver or _pkgs.append(package)
|
||||
except KeyError:
|
||||
log('Package {} has no installation candidate.'.format(package),
|
||||
level='WARNING')
|
||||
_pkgs.append(package)
|
||||
return _pkgs
|
||||
|
||||
|
||||
def apt_install(packages, options=None, fatal=False):
|
||||
"""Install one or more packages"""
|
||||
if options is None:
|
||||
options = ['--option=Dpkg::Options::=--force-confold']
|
||||
|
||||
cmd = ['apt-get', '--assume-yes']
|
||||
cmd.extend(options)
|
||||
cmd.append('install')
|
||||
if isinstance(packages, basestring):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Installing {} with options: {}".format(packages,
|
||||
options))
|
||||
env = os.environ.copy()
|
||||
if 'DEBIAN_FRONTEND' not in env:
|
||||
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
||||
|
||||
if fatal:
|
||||
subprocess.check_call(cmd, env=env)
|
||||
else:
|
||||
subprocess.call(cmd, env=env)
|
||||
|
||||
|
||||
def apt_update(fatal=False):
|
||||
"""Update local apt cache"""
|
||||
cmd = ['apt-get', 'update']
|
||||
if fatal:
|
||||
subprocess.check_call(cmd)
|
||||
else:
|
||||
subprocess.call(cmd)
|
||||
|
||||
|
||||
def apt_purge(packages, fatal=False):
|
||||
"""Purge one or more packages"""
|
||||
cmd = ['apt-get', '--assume-yes', 'purge']
|
||||
if isinstance(packages, basestring):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Purging {}".format(packages))
|
||||
if fatal:
|
||||
subprocess.check_call(cmd)
|
||||
else:
|
||||
subprocess.call(cmd)
|
||||
|
||||
|
||||
def apt_hold(packages, fatal=False):
|
||||
"""Hold one or more packages"""
|
||||
cmd = ['apt-mark', 'hold']
|
||||
if isinstance(packages, basestring):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Holding {}".format(packages))
|
||||
if fatal:
|
||||
subprocess.check_call(cmd)
|
||||
else:
|
||||
subprocess.call(cmd)
|
||||
|
||||
|
||||
def add_source(source, key=None):
|
||||
if (source.startswith('ppa:') or
|
||||
source.startswith('http:') or
|
||||
source.startswith('deb ') or
|
||||
source.startswith('cloud-archive:')):
|
||||
subprocess.check_call(['add-apt-repository', '--yes', source])
|
||||
elif source.startswith('cloud:'):
|
||||
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
|
||||
fatal=True)
|
||||
pocket = source.split(':')[-1]
|
||||
if pocket not in CLOUD_ARCHIVE_POCKETS:
|
||||
raise SourceConfigError(
|
||||
'Unsupported cloud: source option %s' %
|
||||
pocket)
|
||||
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
|
||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
||||
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
|
||||
elif source == 'proposed':
|
||||
release = lsb_release()['DISTRIB_CODENAME']
|
||||
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
||||
apt.write(PROPOSED_POCKET.format(release))
|
||||
if key:
|
||||
subprocess.check_call(['apt-key', 'import', key])
|
||||
|
||||
|
||||
class SourceConfigError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def configure_sources(update=False,
|
||||
sources_var='install_sources',
|
||||
keys_var='install_keys'):
|
||||
"""
|
||||
Configure multiple sources from charm configuration
|
||||
|
||||
Example config:
|
||||
install_sources:
|
||||
- "ppa:foo"
|
||||
- "http://example.com/repo precise main"
|
||||
install_keys:
|
||||
- null
|
||||
- "a1b2c3d4"
|
||||
|
||||
Note that 'null' (a.k.a. None) should not be quoted.
|
||||
"""
|
||||
sources = safe_load(config(sources_var))
|
||||
keys = config(keys_var)
|
||||
if keys is not None:
|
||||
keys = safe_load(keys)
|
||||
if isinstance(sources, basestring) and (
|
||||
keys is None or isinstance(keys, basestring)):
|
||||
add_source(sources, keys)
|
||||
else:
|
||||
if not len(sources) == len(keys):
|
||||
msg = 'Install sources and keys lists are different lengths'
|
||||
raise SourceConfigError(msg)
|
||||
for src_num in range(len(sources)):
|
||||
add_source(sources[src_num], keys[src_num])
|
||||
if update:
|
||||
apt_update(fatal=True)
|
||||
|
||||
# The order of this list is very important. Handlers should be listed in from
|
||||
# least- to most-specific URL matching.
|
||||
FETCH_HANDLERS = (
|
||||
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
|
||||
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
|
||||
)
|
||||
|
||||
|
||||
class UnhandledSource(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def install_remote(source):
|
||||
"""
|
||||
Install a file tree from a remote source
|
||||
|
||||
The specified source should be a url of the form:
|
||||
scheme://[host]/path[#[option=value][&...]]
|
||||
|
||||
Schemes supported are based on this modules submodules
|
||||
Options supported are submodule-specific"""
|
||||
# We ONLY check for True here because can_handle may return a string
|
||||
# explaining why it can't handle a given source.
|
||||
handlers = [h for h in plugins() if h.can_handle(source) is True]
|
||||
installed_to = None
|
||||
for handler in handlers:
|
||||
try:
|
||||
installed_to = handler.install(source)
|
||||
except UnhandledSource:
|
||||
pass
|
||||
if not installed_to:
|
||||
raise UnhandledSource("No handler found for source {}".format(source))
|
||||
return installed_to
|
||||
|
||||
|
||||
def install_from_config(config_var_name):
|
||||
charm_config = config()
|
||||
source = charm_config[config_var_name]
|
||||
return install_remote(source)
|
||||
|
||||
|
||||
class BaseFetchHandler(object):
|
||||
|
||||
"""Base class for FetchHandler implementations in fetch plugins"""
|
||||
|
||||
def can_handle(self, source):
|
||||
"""Returns True if the source can be handled. Otherwise returns
|
||||
a string explaining why it cannot"""
|
||||
return "Wrong source type"
|
||||
|
||||
def install(self, source):
|
||||
"""Try to download and unpack the source. Return the path to the
|
||||
unpacked files or raise UnhandledSource."""
|
||||
raise UnhandledSource("Wrong source type {}".format(source))
|
||||
|
||||
def parse_url(self, url):
|
||||
return urlparse(url)
|
||||
|
||||
def base_url(self, url):
|
||||
"""Return url without querystring or fragment"""
|
||||
parts = list(self.parse_url(url))
|
||||
parts[4:] = ['' for i in parts[4:]]
|
||||
return urlunparse(parts)
|
||||
|
||||
|
||||
def plugins(fetch_handlers=None):
|
||||
if not fetch_handlers:
|
||||
fetch_handlers = FETCH_HANDLERS
|
||||
plugin_list = []
|
||||
for handler_name in fetch_handlers:
|
||||
package, classname = handler_name.rsplit('.', 1)
|
||||
try:
|
||||
handler_class = getattr(
|
||||
importlib.import_module(package),
|
||||
classname)
|
||||
plugin_list.append(handler_class())
|
||||
except (ImportError, AttributeError):
|
||||
# Skip missing plugins so that they can be ommitted from
|
||||
# installation if desired
|
||||
log("FetchHandler {} not found, skipping plugin".format(
|
||||
handler_name))
|
||||
return plugin_list
|
48
hooks/charmhelpers/fetch/archiveurl.py
Normal file
48
hooks/charmhelpers/fetch/archiveurl.py
Normal file
@ -0,0 +1,48 @@
|
||||
import os
|
||||
import urllib2
|
||||
from charmhelpers.fetch import (
|
||||
BaseFetchHandler,
|
||||
UnhandledSource
|
||||
)
|
||||
from charmhelpers.payload.archive import (
|
||||
get_archive_handler,
|
||||
extract,
|
||||
)
|
||||
from charmhelpers.core.host import mkdir
|
||||
|
||||
|
||||
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
"""Handler for archives via generic URLs"""
|
||||
def can_handle(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
|
||||
return "Wrong source type"
|
||||
if get_archive_handler(self.base_url(source)):
|
||||
return True
|
||||
return False
|
||||
|
||||
def download(self, source, dest):
|
||||
# propogate all exceptions
|
||||
# URLError, OSError, etc
|
||||
response = urllib2.urlopen(source)
|
||||
try:
|
||||
with open(dest, 'w') as dest_file:
|
||||
dest_file.write(response.read())
|
||||
except Exception as e:
|
||||
if os.path.isfile(dest):
|
||||
os.unlink(dest)
|
||||
raise e
|
||||
|
||||
def install(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
|
||||
if not os.path.exists(dest_dir):
|
||||
mkdir(dest_dir, perms=0755)
|
||||
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
|
||||
try:
|
||||
self.download(source, dld_file)
|
||||
except urllib2.URLError as e:
|
||||
raise UnhandledSource(e.reason)
|
||||
except OSError as e:
|
||||
raise UnhandledSource(e.strerror)
|
||||
return extract(dld_file)
|
49
hooks/charmhelpers/fetch/bzrurl.py
Normal file
49
hooks/charmhelpers/fetch/bzrurl.py
Normal file
@ -0,0 +1,49 @@
|
||||
import os
|
||||
from charmhelpers.fetch import (
|
||||
BaseFetchHandler,
|
||||
UnhandledSource
|
||||
)
|
||||
from charmhelpers.core.host import mkdir
|
||||
|
||||
try:
|
||||
from bzrlib.branch import Branch
|
||||
except ImportError:
|
||||
from charmhelpers.fetch import apt_install
|
||||
apt_install("python-bzrlib")
|
||||
from bzrlib.branch import Branch
|
||||
|
||||
|
||||
class BzrUrlFetchHandler(BaseFetchHandler):
|
||||
"""Handler for bazaar branches via generic and lp URLs"""
|
||||
def can_handle(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
if url_parts.scheme not in ('bzr+ssh', 'lp'):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def branch(self, source, dest):
|
||||
url_parts = self.parse_url(source)
|
||||
# If we use lp:branchname scheme we need to load plugins
|
||||
if not self.can_handle(source):
|
||||
raise UnhandledSource("Cannot handle {}".format(source))
|
||||
if url_parts.scheme == "lp":
|
||||
from bzrlib.plugin import load_plugins
|
||||
load_plugins()
|
||||
try:
|
||||
remote_branch = Branch.open(source)
|
||||
remote_branch.bzrdir.sprout(dest).open_branch()
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def install(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
|
||||
if not os.path.exists(dest_dir):
|
||||
mkdir(dest_dir, perms=0755)
|
||||
try:
|
||||
self.branch(source, dest_dir)
|
||||
except OSError as e:
|
||||
raise UnhandledSource(e.strerror)
|
||||
return dest_dir
|
1
hooks/charmhelpers/payload/__init__.py
Normal file
1
hooks/charmhelpers/payload/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
"Tools for working with files injected into a charm just before deployment."
|
50
hooks/charmhelpers/payload/execd.py
Normal file
50
hooks/charmhelpers/payload/execd.py
Normal file
@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
|
||||
def default_execd_dir():
|
||||
return os.path.join(os.environ['CHARM_DIR'], 'exec.d')
|
||||
|
||||
|
||||
def execd_module_paths(execd_dir=None):
|
||||
"""Generate a list of full paths to modules within execd_dir."""
|
||||
if not execd_dir:
|
||||
execd_dir = default_execd_dir()
|
||||
|
||||
if not os.path.exists(execd_dir):
|
||||
return
|
||||
|
||||
for subpath in os.listdir(execd_dir):
|
||||
module = os.path.join(execd_dir, subpath)
|
||||
if os.path.isdir(module):
|
||||
yield module
|
||||
|
||||
|
||||
def execd_submodule_paths(command, execd_dir=None):
|
||||
"""Generate a list of full paths to the specified command within exec_dir.
|
||||
"""
|
||||
for module_path in execd_module_paths(execd_dir):
|
||||
path = os.path.join(module_path, command)
|
||||
if os.access(path, os.X_OK) and os.path.isfile(path):
|
||||
yield path
|
||||
|
||||
|
||||
def execd_run(command, execd_dir=None, die_on_error=False, stderr=None):
|
||||
"""Run command for each module within execd_dir which defines it."""
|
||||
for submodule_path in execd_submodule_paths(command, execd_dir):
|
||||
try:
|
||||
subprocess.check_call(submodule_path, shell=True, stderr=stderr)
|
||||
except subprocess.CalledProcessError as e:
|
||||
hookenv.log("Error ({}) running {}. Output: {}".format(
|
||||
e.returncode, e.cmd, e.output))
|
||||
if die_on_error:
|
||||
sys.exit(e.returncode)
|
||||
|
||||
|
||||
def execd_preinstall(execd_dir=None):
|
||||
"""Run charm-pre-install for each module within execd_dir."""
|
||||
execd_run('charm-pre-install', execd_dir=execd_dir)
|
36
hooks/cinder_contexts.py
Normal file
36
hooks/cinder_contexts.py
Normal file
@ -0,0 +1,36 @@
|
||||
from charmhelpers.core.hookenv import (
|
||||
service_name,
|
||||
is_relation_made
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.context import (
|
||||
OSContextGenerator,
|
||||
)
|
||||
|
||||
|
||||
class CephSubordinateContext(OSContextGenerator):
|
||||
interfaces = ['ceph-cinder']
|
||||
|
||||
def __call__(self):
|
||||
"""
|
||||
Used to generate template context to be added to cinder.conf in the
|
||||
presence of a ceph relation.
|
||||
"""
|
||||
if not is_relation_made('ceph', 'key'):
|
||||
return {}
|
||||
service = service_name()
|
||||
return {
|
||||
"cinder": {
|
||||
"/etc/cinder/cinder.conf": {
|
||||
"sections": {
|
||||
service: [
|
||||
('volume_backend_name', service),
|
||||
('volume_driver',
|
||||
'cinder.volume.driver.RBDDriver'),
|
||||
('rbd_pool', service),
|
||||
('rbd_user', service),
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
101
hooks/cinder_hooks.py
Executable file
101
hooks/cinder_hooks.py
Executable file
@ -0,0 +1,101 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from cinder_utils import (
|
||||
ensure_ceph_pool,
|
||||
register_configs,
|
||||
restart_map,
|
||||
set_ceph_env_variables,
|
||||
CEPH_CONF,
|
||||
PACKAGES
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
Hooks,
|
||||
UnregisteredHookError,
|
||||
config,
|
||||
service_name,
|
||||
relation_set,
|
||||
relation_ids
|
||||
)
|
||||
|
||||
from cinder_contexts import CephSubordinateContext
|
||||
|
||||
from charmhelpers.fetch import apt_install, apt_update
|
||||
from charmhelpers.core.host import restart_on_change, log
|
||||
|
||||
from charmhelpers.contrib.storage.linux.ceph import ensure_ceph_keyring
|
||||
from charmhelpers.contrib.hahelpers.cluster import eligible_leader
|
||||
|
||||
from charmhelpers.payload.execd import execd_preinstall
|
||||
|
||||
hooks = Hooks()
|
||||
|
||||
CONFIGS = register_configs()
|
||||
|
||||
|
||||
@hooks.hook('install')
|
||||
def install():
|
||||
execd_preinstall()
|
||||
apt_update()
|
||||
apt_install(PACKAGES, fatal=True)
|
||||
|
||||
|
||||
@hooks.hook('config-changed')
|
||||
def config_changed():
|
||||
CONFIGS.write_all()
|
||||
|
||||
|
||||
@hooks.hook('ceph-relation-joined')
|
||||
def ceph_joined():
|
||||
if not os.path.isdir('/etc/ceph'):
|
||||
os.mkdir('/etc/ceph')
|
||||
|
||||
|
||||
@hooks.hook('ceph-relation-changed')
|
||||
@restart_on_change(restart_map())
|
||||
def ceph_changed():
|
||||
if 'ceph' not in CONFIGS.complete_contexts():
|
||||
log('ceph relation incomplete. Peer not ready?')
|
||||
else:
|
||||
svc = service_name()
|
||||
if not ensure_ceph_keyring(service=svc,
|
||||
user='cinder', group='cinder'):
|
||||
log('Could not create ceph keyring: peer not ready?')
|
||||
else:
|
||||
CONFIGS.write(CEPH_CONF)
|
||||
set_ceph_env_variables(service=svc)
|
||||
if eligible_leader(None):
|
||||
ensure_ceph_pool(service=svc,
|
||||
replicas=config('ceph-osd-replication-count'))
|
||||
for rid in relation_ids('storage-backend'):
|
||||
storage_backend(rid)
|
||||
|
||||
|
||||
@hooks.hook('ceph-relation-broken')
|
||||
@restart_on_change(restart_map())
|
||||
def relation_broken():
|
||||
CONFIGS.write_all()
|
||||
|
||||
|
||||
@hooks.hook('upgrade-charm')
|
||||
def upgrade_charm():
|
||||
pass
|
||||
|
||||
|
||||
@hooks.hook('storage-backend-relation-joined')
|
||||
def storage_backend(rel_id=None):
|
||||
if 'ceph' not in CONFIGS.complete_contexts():
|
||||
log('ceph relation incomplete. Peer not ready?')
|
||||
else:
|
||||
relation_set(relation_id=rel_id,
|
||||
subordinate_configuration=CephSubordinateContext())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
hooks.execute(sys.argv)
|
||||
except UnregisteredHookError as e:
|
||||
log('Unknown hook {} - skipping.'.format(e))
|
103
hooks/cinder_utils.py
Normal file
103
hooks/cinder_utils.py
Normal file
@ -0,0 +1,103 @@
|
||||
import os
|
||||
from collections import OrderedDict
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
relation_ids,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.storage.linux.ceph import (
|
||||
create_pool as ceph_create_pool,
|
||||
pool_exists as ceph_pool_exists,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack import (
|
||||
templating,
|
||||
context,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import (
|
||||
get_os_codename_package,
|
||||
)
|
||||
|
||||
|
||||
PACKAGES = [
|
||||
'ceph-common',
|
||||
]
|
||||
|
||||
CEPH_CONF = '/etc/ceph/ceph.conf'
|
||||
|
||||
TEMPLATES = 'templates/'
|
||||
|
||||
# Map config files to hook contexts and services that will be associated
|
||||
# with file in restart_on_changes()'s service map.
|
||||
CONFIG_FILES = OrderedDict([
|
||||
(CEPH_CONF, {
|
||||
'hook_contexts': [context.CephContext()],
|
||||
'services': ['cinder-volume'],
|
||||
}),
|
||||
])
|
||||
|
||||
|
||||
def register_configs():
|
||||
"""
|
||||
Register config files with their respective contexts.
|
||||
Regstration of some configs may not be required depending on
|
||||
existing of certain relations.
|
||||
"""
|
||||
# if called without anything installed (eg during install hook)
|
||||
# just default to earliest supported release. configs dont get touched
|
||||
# till post-install, anyway.
|
||||
release = get_os_codename_package('cinder-common', fatal=False) or 'folsom'
|
||||
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
|
||||
openstack_release=release)
|
||||
|
||||
confs = []
|
||||
|
||||
if relation_ids('ceph'):
|
||||
# need to create this early, new peers will have a relation during
|
||||
# registration # before they've run the ceph hooks to create the
|
||||
# directory.
|
||||
if not os.path.isdir(os.path.dirname(CEPH_CONF)):
|
||||
os.mkdir(os.path.dirname(CEPH_CONF))
|
||||
confs.append(CEPH_CONF)
|
||||
|
||||
for conf in confs:
|
||||
configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])
|
||||
|
||||
return configs
|
||||
|
||||
|
||||
def restart_map():
|
||||
'''
|
||||
Determine the correct resource map to be passed to
|
||||
charmhelpers.core.restart_on_change() based on the services configured.
|
||||
|
||||
:returns: dict: A dictionary mapping config file to lists of services
|
||||
that should be restarted when file changes.
|
||||
'''
|
||||
_map = []
|
||||
for f, ctxt in CONFIG_FILES.iteritems():
|
||||
svcs = []
|
||||
for svc in ctxt['services']:
|
||||
svcs.append(svc)
|
||||
if svcs:
|
||||
_map.append((f, svcs))
|
||||
return OrderedDict(_map)
|
||||
|
||||
|
||||
def ensure_ceph_pool(service, replicas):
|
||||
'''Creates a ceph pool for service if one does not exist'''
|
||||
# TODO: Ditto about moving somewhere sharable.
|
||||
if not ceph_pool_exists(service=service, name=service):
|
||||
ceph_create_pool(service=service, name=service, replicas=replicas)
|
||||
|
||||
|
||||
def set_ceph_env_variables(service):
|
||||
# XXX: Horrid kludge to make cinder-volume use
|
||||
# a different ceph username than admin
|
||||
env = open('/etc/environment', 'r').read()
|
||||
if 'CEPH_ARGS' not in env:
|
||||
with open('/etc/environment', 'a') as out:
|
||||
out.write('CEPH_ARGS="--id %s"\n' % service)
|
||||
with open('/etc/init/cinder-volume.override', 'w') as out:
|
||||
out.write('env CEPH_ARGS="--id %s"\n' % service)
|
1
hooks/config-changed
Symbolic link
1
hooks/config-changed
Symbolic link
@ -0,0 +1 @@
|
||||
cinder_hooks.py
|
1
hooks/install
Symbolic link
1
hooks/install
Symbolic link
@ -0,0 +1 @@
|
||||
cinder_hooks.py
|
1
hooks/start
Symbolic link
1
hooks/start
Symbolic link
@ -0,0 +1 @@
|
||||
cinder_hooks.py
|
1
hooks/stop
Symbolic link
1
hooks/stop
Symbolic link
@ -0,0 +1 @@
|
||||
cinder_hooks.py
|
1
hooks/storage-backend-relation-broken
Symbolic link
1
hooks/storage-backend-relation-broken
Symbolic link
@ -0,0 +1 @@
|
||||
cinder_hooks.py
|
1
hooks/storage-backend-relation-changed
Symbolic link
1
hooks/storage-backend-relation-changed
Symbolic link
@ -0,0 +1 @@
|
||||
cinder_hooks.py
|
1
hooks/storage-backend-relation-joined
Symbolic link
1
hooks/storage-backend-relation-joined
Symbolic link
@ -0,0 +1 @@
|
||||
cinder_hooks.py
|
636
icon.svg
Normal file
636
icon.svg
Normal file
@ -0,0 +1,636 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
sodipodi:docname="openstack-cinder.svg"
|
||||
inkscape:version="0.48+devel r12591"
|
||||
version="1.1"
|
||||
id="svg6517"
|
||||
height="96"
|
||||
width="96">
|
||||
<sodipodi:namedview
|
||||
id="base"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:zoom="2.0861625"
|
||||
inkscape:cx="100.56201"
|
||||
inkscape:cy="47.468164"
|
||||
inkscape:document-units="px"
|
||||
inkscape:current-layer="layer1"
|
||||
showgrid="false"
|
||||
fit-margin-top="0"
|
||||
fit-margin-left="0"
|
||||
fit-margin-right="0"
|
||||
fit-margin-bottom="0"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="1029"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="24"
|
||||
inkscape:window-maximized="1"
|
||||
showborder="true"
|
||||
showguides="false"
|
||||
inkscape:guide-bbox="true"
|
||||
inkscape:showpageshadow="false"
|
||||
inkscape:snap-global="true"
|
||||
inkscape:snap-bbox="true"
|
||||
inkscape:bbox-paths="true"
|
||||
inkscape:bbox-nodes="true"
|
||||
inkscape:snap-bbox-edge-midpoints="true"
|
||||
inkscape:snap-bbox-midpoints="true"
|
||||
inkscape:object-paths="true"
|
||||
inkscape:snap-intersection-paths="true"
|
||||
inkscape:object-nodes="true"
|
||||
inkscape:snap-smooth-nodes="true"
|
||||
inkscape:snap-midpoints="true"
|
||||
inkscape:snap-object-midpoints="true"
|
||||
inkscape:snap-center="true"
|
||||
inkscape:snap-grids="false"
|
||||
inkscape:snap-nodes="true"
|
||||
inkscape:snap-others="false">
|
||||
<inkscape:grid
|
||||
id="grid821"
|
||||
type="xygrid" />
|
||||
<sodipodi:guide
|
||||
id="guide823"
|
||||
position="18.34962,45.78585"
|
||||
orientation="1,0" />
|
||||
<sodipodi:guide
|
||||
id="guide827"
|
||||
position="78.02001,46.32673"
|
||||
orientation="1,0" />
|
||||
<sodipodi:guide
|
||||
inkscape:label=""
|
||||
id="guide4184"
|
||||
position="65.586619,19.307"
|
||||
orientation="-0.087155743,0.9961947" />
|
||||
<sodipodi:guide
|
||||
inkscape:label=""
|
||||
id="guide4188"
|
||||
position="62.756032,71.583147"
|
||||
orientation="-0.087155743,0.9961947" />
|
||||
<sodipodi:guide
|
||||
inkscape:label=""
|
||||
id="guide4190"
|
||||
position="47.812194,78.049658"
|
||||
orientation="-0.087155743,0.9961947" />
|
||||
<sodipodi:guide
|
||||
id="guide4194"
|
||||
position="25.60516,42.21665"
|
||||
orientation="1,0" />
|
||||
<sodipodi:guide
|
||||
inkscape:label=""
|
||||
id="guide4202"
|
||||
position="25.60516,42.070975"
|
||||
orientation="-0.087155743,0.9961947" />
|
||||
<sodipodi:guide
|
||||
inkscape:label=""
|
||||
id="guide4204"
|
||||
position="25.60516,42.070975"
|
||||
orientation="-0.70710678,-0.70710678" />
|
||||
<sodipodi:guide
|
||||
inkscape:label=""
|
||||
id="guide4242"
|
||||
position="51.81985,44.36226"
|
||||
orientation="-0.70710678,-0.70710678" />
|
||||
<sodipodi:guide
|
||||
inkscape:label=""
|
||||
id="guide4252"
|
||||
position="73.5625,75.210937"
|
||||
orientation="-0.70710678,-0.70710678" />
|
||||
<sodipodi:guide
|
||||
inkscape:label=""
|
||||
inkscape:color="rgb(140,140,240)"
|
||||
id="guide4254"
|
||||
position="18.34962,75.472017"
|
||||
orientation="-0.70710678,-0.70710678" />
|
||||
<sodipodi:guide
|
||||
inkscape:label=""
|
||||
id="guide4288"
|
||||
position="21.871042,21.577512"
|
||||
orientation="-0.70710678,-0.70710678" />
|
||||
</sodipodi:namedview>
|
||||
<defs
|
||||
id="defs6519">
|
||||
<filter
|
||||
id="filter1121"
|
||||
inkscape:label="Inner Shadow"
|
||||
style="color-interpolation-filters:sRGB;">
|
||||
<feFlood
|
||||
id="feFlood1123"
|
||||
result="flood"
|
||||
flood-color="rgb(0,0,0)"
|
||||
flood-opacity="0.59999999999999998" />
|
||||
<feComposite
|
||||
id="feComposite1125"
|
||||
result="composite1"
|
||||
operator="out"
|
||||
in2="SourceGraphic"
|
||||
in="flood" />
|
||||
<feGaussianBlur
|
||||
id="feGaussianBlur1127"
|
||||
result="blur"
|
||||
stdDeviation="1"
|
||||
in="composite1" />
|
||||
<feOffset
|
||||
id="feOffset1129"
|
||||
result="offset"
|
||||
dy="2"
|
||||
dx="0" />
|
||||
<feComposite
|
||||
id="feComposite1131"
|
||||
result="composite2"
|
||||
operator="atop"
|
||||
in2="SourceGraphic"
|
||||
in="offset" />
|
||||
</filter>
|
||||
<filter
|
||||
id="filter950"
|
||||
inkscape:label="Drop Shadow"
|
||||
style="color-interpolation-filters:sRGB;">
|
||||
<feFlood
|
||||
id="feFlood952"
|
||||
result="flood"
|
||||
flood-color="rgb(0,0,0)"
|
||||
flood-opacity="0.25" />
|
||||
<feComposite
|
||||
id="feComposite954"
|
||||
result="composite1"
|
||||
operator="in"
|
||||
in2="SourceGraphic"
|
||||
in="flood" />
|
||||
<feGaussianBlur
|
||||
id="feGaussianBlur956"
|
||||
result="blur"
|
||||
stdDeviation="1"
|
||||
in="composite1" />
|
||||
<feOffset
|
||||
id="feOffset958"
|
||||
result="offset"
|
||||
dy="1"
|
||||
dx="0" />
|
||||
<feComposite
|
||||
id="feComposite960"
|
||||
result="composite2"
|
||||
operator="over"
|
||||
in2="offset"
|
||||
in="SourceGraphic" />
|
||||
</filter>
|
||||
<filter
|
||||
inkscape:label="Badge Shadow"
|
||||
id="filter891"
|
||||
inkscape:collect="always">
|
||||
<feGaussianBlur
|
||||
id="feGaussianBlur893"
|
||||
stdDeviation="0.71999962"
|
||||
inkscape:collect="always" />
|
||||
</filter>
|
||||
<filter
|
||||
inkscape:collect="always"
|
||||
id="filter3831">
|
||||
<feGaussianBlur
|
||||
inkscape:collect="always"
|
||||
stdDeviation="0.86309522"
|
||||
id="feGaussianBlur3833" />
|
||||
</filter>
|
||||
<filter
|
||||
inkscape:collect="always"
|
||||
id="filter3868"
|
||||
x="-0.17186206"
|
||||
width="1.3437241"
|
||||
y="-0.1643077"
|
||||
height="1.3286154">
|
||||
<feGaussianBlur
|
||||
inkscape:collect="always"
|
||||
stdDeviation="0.62628186"
|
||||
id="feGaussianBlur3870" />
|
||||
</filter>
|
||||
<linearGradient
|
||||
id="linearGradient4328"
|
||||
inkscape:collect="always">
|
||||
<stop
|
||||
id="stop4330"
|
||||
offset="0"
|
||||
style="stop-color:#871f1c;stop-opacity:1;" />
|
||||
<stop
|
||||
id="stop4332"
|
||||
offset="1"
|
||||
style="stop-color:#651715;stop-opacity:1" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
id="linearGradient902"
|
||||
inkscape:collect="always">
|
||||
<stop
|
||||
id="stop904"
|
||||
offset="0"
|
||||
style="stop-color:#cccccc;stop-opacity:1" />
|
||||
<stop
|
||||
id="stop906"
|
||||
offset="1"
|
||||
style="stop-color:#e6e6e6;stop-opacity:1" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
id="Background">
|
||||
<stop
|
||||
style="stop-color:#22779e;stop-opacity:1"
|
||||
offset="0"
|
||||
id="stop4178" />
|
||||
<stop
|
||||
style="stop-color:#2991c0;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop4180" />
|
||||
</linearGradient>
|
||||
<clipPath
|
||||
id="clipPath873"
|
||||
clipPathUnits="userSpaceOnUse">
|
||||
<g
|
||||
style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
|
||||
inkscape:label="Layer 1"
|
||||
id="g875"
|
||||
transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)">
|
||||
<path
|
||||
sodipodi:nodetypes="sssssssss"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path877"
|
||||
d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
|
||||
style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline" />
|
||||
</g>
|
||||
</clipPath>
|
||||
<style
|
||||
type="text/css"
|
||||
id="style867">
|
||||
.fil0 {fill:#1F1A17}
|
||||
</style>
|
||||
<linearGradient
|
||||
gradientUnits="userSpaceOnUse"
|
||||
y2="635.29077"
|
||||
x2="-220"
|
||||
y1="731.29077"
|
||||
x1="-220"
|
||||
id="linearGradient908"
|
||||
xlink:href="#linearGradient902"
|
||||
inkscape:collect="always" />
|
||||
<clipPath
|
||||
id="clipPath16">
|
||||
<path
|
||||
d="m -9,-9 614,0 0,231 -614,0 0,-231 z"
|
||||
id="path18" />
|
||||
</clipPath>
|
||||
<clipPath
|
||||
id="clipPath116">
|
||||
<path
|
||||
d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129"
|
||||
id="path118" />
|
||||
</clipPath>
|
||||
<clipPath
|
||||
id="clipPath128">
|
||||
<path
|
||||
d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129"
|
||||
id="path130" />
|
||||
</clipPath>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient3850">
|
||||
<stop
|
||||
style="stop-color:#000000;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop3852" />
|
||||
<stop
|
||||
style="stop-color:#000000;stop-opacity:0;"
|
||||
offset="1"
|
||||
id="stop3854" />
|
||||
</linearGradient>
|
||||
<clipPath
|
||||
id="clipPath3095"
|
||||
clipPathUnits="userSpaceOnUse">
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path3097"
|
||||
d="m 976.648,389.551 -842.402,0 0,839.999 842.402,0 0,-839.999" />
|
||||
</clipPath>
|
||||
<clipPath
|
||||
id="clipPath3195"
|
||||
clipPathUnits="userSpaceOnUse">
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path3197"
|
||||
d="m 611.836,756.738 -106.34,105.207 c -8.473,8.289 -13.617,20.102 -13.598,33.379 L 598.301,790.207 c -0.031,-13.418 5.094,-25.031 13.535,-33.469" />
|
||||
</clipPath>
|
||||
<clipPath
|
||||
id="clipPath3235"
|
||||
clipPathUnits="userSpaceOnUse">
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path3237"
|
||||
d="m 1095.64,1501.81 c 35.46,-35.07 70.89,-70.11 106.35,-105.17 4.4,-4.38 7.11,-10.53 7.11,-17.55 l -106.37,105.21 c 0,7 -2.71,13.11 -7.09,17.51" />
|
||||
</clipPath>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient4389">
|
||||
<stop
|
||||
style="stop-color:#871f1c;stop-opacity:1"
|
||||
offset="0"
|
||||
id="stop4391" />
|
||||
<stop
|
||||
style="stop-color:#c42e24;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop4393" />
|
||||
</linearGradient>
|
||||
<clipPath
|
||||
clipPathUnits="userSpaceOnUse"
|
||||
id="clipPath4591">
|
||||
<path
|
||||
id="path4593"
|
||||
style="fill:#ff00ff;fill-opacity:1;fill-rule:nonzero;stroke:none"
|
||||
d="m 1106.6009,730.43734 -0.036,21.648 c -0.01,3.50825 -2.8675,6.61375 -6.4037,6.92525 l -83.6503,7.33162 c -3.5205,0.30763 -6.3812,-2.29987 -6.3671,-5.8145 l 0.036,-21.6475 20.1171,-1.76662 -0.011,4.63775 c 0,1.83937 1.4844,3.19925 3.3262,3.0395 l 49.5274,-4.33975 c 1.8425,-0.166 3.3425,-1.78125 3.3538,-3.626 l 0.01,-4.63025 20.1,-1.7575"
|
||||
inkscape:connector-curvature="0" />
|
||||
</clipPath>
|
||||
<radialGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient3850"
|
||||
id="radialGradient3856"
|
||||
cx="-26.508606"
|
||||
cy="93.399292"
|
||||
fx="-26.508606"
|
||||
fy="93.399292"
|
||||
r="20.40658"
|
||||
gradientTransform="matrix(-1.4333926,-2.2742838,1.1731823,-0.73941125,-174.08025,98.374394)"
|
||||
gradientUnits="userSpaceOnUse" />
|
||||
<filter
|
||||
inkscape:collect="always"
|
||||
id="filter3885">
|
||||
<feGaussianBlur
|
||||
inkscape:collect="always"
|
||||
stdDeviation="5.7442192"
|
||||
id="feGaussianBlur3887" />
|
||||
</filter>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient3850"
|
||||
id="linearGradient3895"
|
||||
x1="348.20132"
|
||||
y1="593.11615"
|
||||
x2="-51.879555"
|
||||
y2="993.19702"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(-318.48033,212.32022)" />
|
||||
<radialGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient3850"
|
||||
id="radialGradient3902"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="matrix(-1.4333926,-2.2742838,1.1731823,-0.73941125,-174.08025,98.374394)"
|
||||
cx="-26.508606"
|
||||
cy="93.399292"
|
||||
fx="-26.508606"
|
||||
fy="93.399292"
|
||||
r="20.40658" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient3850"
|
||||
id="linearGradient3904"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(-318.48033,212.32022)"
|
||||
x1="348.20132"
|
||||
y1="593.11615"
|
||||
x2="-51.879555"
|
||||
y2="993.19702" />
|
||||
<linearGradient
|
||||
gradientUnits="userSpaceOnUse"
|
||||
y2="23.383789"
|
||||
x2="25.217773"
|
||||
y1="27.095703"
|
||||
x1="21.505859"
|
||||
id="linearGradient4318"
|
||||
xlink:href="#linearGradient4389"
|
||||
inkscape:collect="always" />
|
||||
<linearGradient
|
||||
gradientUnits="userSpaceOnUse"
|
||||
y2="20.884073"
|
||||
x2="71.960243"
|
||||
y1="20.041777"
|
||||
x1="72.802544"
|
||||
id="linearGradient4326"
|
||||
xlink:href="#linearGradient4389"
|
||||
inkscape:collect="always" />
|
||||
<linearGradient
|
||||
gradientUnits="userSpaceOnUse"
|
||||
y2="74.246689"
|
||||
x2="21.69179"
|
||||
y1="73.643555"
|
||||
x1="22.294922"
|
||||
id="linearGradient4334"
|
||||
xlink:href="#linearGradient4328"
|
||||
inkscape:collect="always" />
|
||||
</defs>
|
||||
<metadata
|
||||
id="metadata6522">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title></dc:title>
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<g
|
||||
style="display:inline"
|
||||
transform="translate(268,-635.29076)"
|
||||
id="layer1"
|
||||
inkscape:groupmode="layer"
|
||||
inkscape:label="BACKGROUND">
|
||||
<path
|
||||
sodipodi:nodetypes="sssssssss"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path6455"
|
||||
d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
|
||||
style="fill:url(#linearGradient908);fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)" />
|
||||
<g
|
||||
id="g4336">
|
||||
<g
|
||||
transform="matrix(0.06790711,0,0,-0.06790711,-239.0411,765.68623)"
|
||||
id="g3897"
|
||||
xml:space="default">
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
style="opacity:0.7;color:#000000;fill:url(#radialGradient3902);fill-opacity:1;stroke:none;stroke-width:2;marker:none;visibility:visible;display:inline;overflow:visible;filter:url(#filter3831);enable-background:accumulate"
|
||||
d="m -48.09375,67.8125 c -0.873996,-0.0028 -2.089735,0.01993 -3.40625,0.09375 -2.633031,0.147647 -5.700107,0.471759 -7.78125,1.53125 a 1.0001,1.0001 0 0 0 -0.25,1.59375 L -38.8125,92.375 a 1.0001,1.0001 0 0 0 0.84375,0.3125 L -24,90.5625 a 1.0001,1.0001 0 0 0 0.53125,-1.71875 L -46.0625,68.125 a 1.0001,1.0001 0 0 0 -0.625,-0.28125 c 0,0 -0.532254,-0.02842 -1.40625,-0.03125 z"
|
||||
transform="matrix(10.616011,0,0,-10.616011,357.98166,1725.8152)"
|
||||
id="path3821"
|
||||
xml:space="default" />
|
||||
<path
|
||||
style="opacity:0.6;color:#000000;fill:none;stroke:#000000;stroke-width:2.77429962;stroke-linecap:round;marker:none;visibility:visible;display:inline;overflow:visible;filter:url(#filter3868);enable-background:accumulate"
|
||||
d="m -15.782705,81.725197 8.7458304,9.147937"
|
||||
id="path3858"
|
||||
inkscape:connector-curvature="0"
|
||||
transform="matrix(10.616011,0,0,-10.616011,39.50133,1725.8152)"
|
||||
xml:space="default" />
|
||||
<path
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:0.3;color:#000000;fill:url(#linearGradient3904);fill-opacity:1;stroke:none;stroke-width:2;marker:none;visibility:visible;display:inline;overflow:visible;filter:url(#filter3885);enable-background:accumulate;font-family:Sans;-inkscape-font-specification:Sans"
|
||||
d="m -95.18931,981.03569 a 10.617073,10.617073 0 0 1 -0.995251,-0.3318 l -42.795789,-5.308 a 10.617073,10.617073 0 0 1 -6.30326,-17.9145 L -4.2897203,812.5065 a 10.617073,10.617073 0 0 1 8.95726,-3.3175 l 49.0990503,7.63026 a 10.617073,10.617073 0 0 1 5.97151,17.91452 L -87.55905,978.04989 a 10.617073,10.617073 0 0 1 -7.63026,2.9858 z"
|
||||
id="path3874"
|
||||
inkscape:connector-curvature="0"
|
||||
xml:space="default" />
|
||||
</g>
|
||||
<path
|
||||
style="opacity:1;color:#000000;fill:#871f1c;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.1;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
|
||||
d="M 20.697266 20.515625 C 19.336871 21.10204 18.348875 22.456253 18.345703 23.970703 L 18.345703 24 C 18.345703 23.9808 18.353156 23.962559 18.353516 23.943359 L 18.353516 28.300781 L 18.353516 35.341797 L 21.425781 38.349609 L 18.353516 38.625 L 18.353516 55.039062 L 21.425781 58.046875 L 18.353516 58.322266 L 18.353516 55.039062 L 18.345703 24.0625 L 18.353516 69.601562 C 18.349848 70.477025 18.685456 71.239319 19.222656 71.802734 L 19.212891 71.8125 L 19.357422 71.955078 C 19.360505 71.957909 19.364093 71.960073 19.367188 71.962891 L 26.660156 79.126953 L 33.458984 71.771484 L 21.814453 72.791016 C 21.791653 72.793016 21.770747 72.789016 21.748047 72.791016 L 33.488281 71.738281 L 67.492188 68.685547 C 67.874994 68.651208 68.237746 68.545454 68.578125 68.394531 L 55.199219 55.015625 L 25.611328 57.671875 L 25.611328 54.388672 L 52.1875 52.003906 L 37.123047 36.941406 L 25.611328 37.974609 L 25.611328 34.691406 L 34.111328 33.927734 L 20.697266 20.515625 z "
|
||||
transform="translate(-268,635.29076)"
|
||||
id="path4308" />
|
||||
<path
|
||||
style="color:#000000;fill:#c42e24;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.1;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
|
||||
d="m -200.67969,651.54467 -45.49804,3.95898 c -0.39583,0.0351 -0.7701,0.14975 -1.125,0.30273 l 13.41406,13.41211 36.65625,-3.28711 0.01,0.74415 6.45508,-6.98633 -7.33984,-7.21875 -0.008,0.01 c -0.63301,-0.64671 -1.5421,-1.01814 -2.56446,-0.93554 z m -39,3.42382 -6.67187,0.59766 c 0.0594,-0.008 0.11568,-0.0282 0.17578,-0.0332 z m 42.44727,14.2461 -33.64453,3.01758 15.06445,15.0625 18.57813,-1.66602 0.002,-2.13672 0,-14.27734 z m -0.002,19.69531 -15.56641,1.39648 13.37891,13.37891 c 0.053,-0.0235 0.10451,-0.0502 0.15625,-0.0762 1.19087,-0.65347 2.02247,-1.91423 2.02539,-3.30274 l 0.006,-11.39648 z"
|
||||
id="path4233"
|
||||
inkscape:connector-curvature="0"
|
||||
xml:space="default"
|
||||
sodipodi:nodetypes="ccccccccccccccccccccccccccccc" />
|
||||
<path
|
||||
style="fill:#df4438;fill-opacity:1;fill-rule:nonzero;stroke:none"
|
||||
d="m -193.41992,658.68199 -39.00195,3.39453 -6.66993,0.59766 c -1.81216,0.25153 -3.26311,1.84158 -3.29687,3.66797 l 0,11.39843 52.41406,-4.70117 0,-11.34375 c -0.0805,-1.83267 -1.58243,-3.16418 -3.44531,-3.01367 z"
|
||||
id="path4674" />
|
||||
<path
|
||||
style="fill:#dd3b2f;fill-opacity:1;fill-rule:nonzero;stroke:none"
|
||||
d="m -189.97461,676.32262 -52.41406,4.70117 0,16.41406 52.41406,-4.70117 0,-16.41406 z"
|
||||
id="path4672" />
|
||||
<path
|
||||
style="fill:#d93023;fill-opacity:1;fill-rule:nonzero;stroke:none"
|
||||
d="m -189.97461,696.01793 -52.41406,4.70312 0.002,11.3086 c -0.008,1.88995 1.51656,3.29383 3.40235,3.16015 l 45.73437,-4.10547 c 0.66788,-0.0599 1.28587,-0.3155 1.80273,-0.70312 0.88331,-0.70488 1.46437,-1.77799 1.4668,-2.9375 l 0.006,-11.42578 z"
|
||||
id="path4670" />
|
||||
<path
|
||||
style="fill:#d93023;fill-opacity:1;fill-rule:nonzero;stroke:none"
|
||||
d="m -191.44727,710.38121 c -0.0994,0.0793 -0.20788,0.14708 -0.31445,0.2168 0.10723,-0.0697 0.21469,-0.13718 0.31445,-0.2168 z"
|
||||
id="path4668" />
|
||||
<path
|
||||
style="fill:#d93023;fill-opacity:1;fill-rule:nonzero;stroke:none"
|
||||
d="m -191.96484,710.72496 c -0.0984,0.0562 -0.19952,0.10691 -0.30274,0.1543 0.10395,-0.0471 0.20372,-0.0983 0.30274,-0.1543 z"
|
||||
id="path4666" />
|
||||
<path
|
||||
style="fill:#d93023;fill-opacity:1;fill-rule:nonzero;stroke:none"
|
||||
d="m -192.58594,711.00426 c -0.082,0.0289 -0.1637,0.0589 -0.24804,0.082 0.0849,-0.0229 0.16545,-0.0534 0.24804,-0.082 z"
|
||||
id="path4633" />
|
||||
<rect
|
||||
xml:space="default"
|
||||
y="648.49109"
|
||||
x="-258.70667"
|
||||
height="69.20665"
|
||||
width="69.20665"
|
||||
id="rect3585-3"
|
||||
style="opacity:0.8;color:#000000;fill:none;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" />
|
||||
<path
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:1;color:#000000;color-interpolation:sRGB;color-interpolation-filters:sRGB;fill:url(#linearGradient4318);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:5.25;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif"
|
||||
d="M 22.029297 20.195312 L 21.822266 20.212891 C 19.919838 20.381715 18.370776 22.043134 18.349609 23.939453 L 24.662109 30.251953 L 25.605469 31.195312 L 25.605469 31.103516 C 25.609469 29.193966 27.168951 27.515473 29.082031 27.345703 L 29.171875 27.337891 L 28.373047 26.539062 L 22.029297 20.195312 z "
|
||||
transform="translate(-268,635.29076)"
|
||||
id="path4256" />
|
||||
<path
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:0.5;color:#000000;color-interpolation:sRGB;color-interpolation-filters:sRGB;fill:url(#linearGradient4326);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:2.4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif;stroke-miterlimit:4;stroke-dasharray:none"
|
||||
d="M 67.330078 16.253906 L 68.03125 16.955078 L 74.472656 23.396484 L 74.580078 23.386719 C 75.531927 23.309814 76.390588 23.620657 77.015625 24.185547 L 69.892578 17.179688 L 69.884766 17.189453 C 69.253843 16.544862 68.348328 16.174551 67.330078 16.253906 z M 77.054688 24.222656 C 77.115589 24.279686 77.164628 24.348282 77.220703 24.410156 L 77.232422 24.398438 L 77.054688 24.222656 z "
|
||||
transform="translate(-268,635.29076)"
|
||||
id="path4272" />
|
||||
<path
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:1;color:#000000;color-interpolation:sRGB;color-interpolation-filters:sRGB;fill:url(#linearGradient4334);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:1.7;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif;stroke-miterlimit:4;stroke-dasharray:none"
|
||||
d="M 18.363281 69.712891 C 18.387957 70.540342 18.709001 71.264013 19.222656 71.802734 L 19.212891 71.8125 L 19.357422 71.955078 C 19.360505 71.957909 19.364093 71.960073 19.367188 71.962891 L 26.599609 79.068359 C 26.044831 78.550125 25.698241 77.821152 25.638672 76.988281 L 18.951172 70.298828 L 18.363281 69.712891 z M 26.636719 79.103516 L 26.660156 79.126953 L 26.664062 79.123047 C 26.655656 79.11562 26.645042 79.111033 26.636719 79.103516 z "
|
||||
transform="translate(-268,635.29076)"
|
||||
id="path4290" />
|
||||
<path
|
||||
style="color:#000000;fill:#871f1c;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.1;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
|
||||
d="m 75.006338,38.020624 -45.602041,4.088751 0,3.283203 48.615713,-4.360235 z m 0.002,19.69531 -45.603995,4.090707 0,3.283203 48.615713,-4.362191 z m 1.026864,17.71766 c -0.09902,0.056 -0.198784,0.107197 -0.302734,0.154297 0.10322,-0.04739 0.204334,-0.0981 0.302734,-0.154297 z m -0.621094,0.279297 c -0.08259,0.0286 -0.163146,0.05913 -0.248046,0.08203 0.08434,-0.0231 0.166047,-0.05313 0.248046,-0.08203 z"
|
||||
transform="translate(-268,635.29076)"
|
||||
id="path4656"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cccccccccccccccc" />
|
||||
<path
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none;opacity:0.3"
|
||||
d="M 74.580078 23.390625 L 35.578125 26.785156 L 28.908203 27.382812 C 27.096043 27.634343 25.645088 29.224391 25.611328 31.050781 L 25.611328 31.25 C 25.645088 29.42361 27.096043 27.833561 28.908203 27.582031 L 35.578125 26.984375 L 74.580078 23.589844 C 76.442958 23.439334 77.944891 24.770846 78.025391 26.603516 L 78.025391 26.404297 C 77.944891 24.571627 76.442958 23.240115 74.580078 23.390625 z M 78.025391 41.03125 L 25.611328 45.732422 L 25.611328 45.931641 L 78.025391 41.230469 L 78.025391 41.03125 z M 78.025391 60.726562 L 25.611328 65.429688 L 25.611328 65.628906 L 78.025391 60.925781 L 78.025391 60.726562 z "
|
||||
transform="translate(-268,635.29076)"
|
||||
id="path4676" />
|
||||
</g>
|
||||
</g>
|
||||
<g
|
||||
style="display:inline"
|
||||
inkscape:label="PLACE YOUR PICTOGRAM HERE"
|
||||
id="layer3"
|
||||
inkscape:groupmode="layer" />
|
||||
<g
|
||||
sodipodi:insensitive="true"
|
||||
style="display:none"
|
||||
inkscape:label="BADGE"
|
||||
id="layer2"
|
||||
inkscape:groupmode="layer">
|
||||
<g
|
||||
clip-path="none"
|
||||
id="g4394"
|
||||
transform="translate(-340.00001,-581)"
|
||||
style="display:inline">
|
||||
<g
|
||||
id="g855">
|
||||
<g
|
||||
style="opacity:0.6;filter:url(#filter891)"
|
||||
clip-path="url(#clipPath873)"
|
||||
id="g870"
|
||||
inkscape:groupmode="maskhelper">
|
||||
<path
|
||||
sodipodi:type="arc"
|
||||
style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
|
||||
id="path844"
|
||||
sodipodi:cx="252"
|
||||
sodipodi:cy="552.36218"
|
||||
sodipodi:rx="12"
|
||||
sodipodi:ry="12"
|
||||
d="m 264,552.36218 c 0,6.62742 -5.37258,12 -12,12 -6.62742,0 -12,-5.37258 -12,-12 0,-6.62741 5.37258,-12 12,-12 6.62742,0 12,5.37259 12,12 z"
|
||||
transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)" />
|
||||
</g>
|
||||
<g
|
||||
id="g862">
|
||||
<path
|
||||
transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)"
|
||||
d="m 264,552.36218 c 0,6.62742 -5.37258,12 -12,12 -6.62742,0 -12,-5.37258 -12,-12 0,-6.62741 5.37258,-12 12,-12 6.62742,0 12,5.37259 12,12 z"
|
||||
sodipodi:ry="12"
|
||||
sodipodi:rx="12"
|
||||
sodipodi:cy="552.36218"
|
||||
sodipodi:cx="252"
|
||||
id="path4398"
|
||||
style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
|
||||
sodipodi:type="arc" />
|
||||
<path
|
||||
sodipodi:type="arc"
|
||||
style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
|
||||
id="path4400"
|
||||
sodipodi:cx="252"
|
||||
sodipodi:cy="552.36218"
|
||||
sodipodi:rx="12"
|
||||
sodipodi:ry="12"
|
||||
d="m 264,552.36218 c 0,6.62742 -5.37258,12 -12,12 -6.62742,0 -12,-5.37258 -12,-12 0,-6.62741 5.37258,-12 12,-12 6.62742,0 12,5.37259 12,12 z"
|
||||
transform="matrix(1.25,0,0,1.25,33,-100.45273)" />
|
||||
<path
|
||||
transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)"
|
||||
d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 -0.18379,0.41279 0.0427,4.27917 -0.34859,4.5051 z"
|
||||
inkscape:randomized="0"
|
||||
inkscape:rounded="0.1"
|
||||
inkscape:flatsided="false"
|
||||
sodipodi:arg2="1.6755161"
|
||||
sodipodi:arg1="1.0471976"
|
||||
sodipodi:r2="4.3458705"
|
||||
sodipodi:r1="7.2431178"
|
||||
sodipodi:cy="589.50385"
|
||||
sodipodi:cx="666.19574"
|
||||
sodipodi:sides="5"
|
||||
id="path4459"
|
||||
style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
|
||||
sodipodi:type="star" />
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 32 KiB |
16
metadata.yaml
Normal file
16
metadata.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
name: cinder-ceph
|
||||
summary: Ceph integration for OpenStack Block Storage
|
||||
maintainer: James Page <james.page@ubuntu.com>
|
||||
description: |
|
||||
Cinder is the block storage service for the Openstack project.
|
||||
.
|
||||
This charm provides a Ceph storage backend for Cinder
|
||||
categories:
|
||||
- miscellaneous
|
||||
provides:
|
||||
storage-backend:
|
||||
interface: cinder-backend
|
||||
scope: container
|
||||
requires:
|
||||
ceph:
|
||||
interface: ceph-client
|
6
setup.cfg
Normal file
6
setup.cfg
Normal file
@ -0,0 +1,6 @@
|
||||
[nosetests]
|
||||
verbosity=1
|
||||
with-coverage=1
|
||||
cover-erase=1
|
||||
cover-package=hooks
|
||||
|
2
unit_tests/__init__.py
Normal file
2
unit_tests/__init__.py
Normal file
@ -0,0 +1,2 @@
|
||||
import sys
|
||||
sys.path.append('hooks')
|
40
unit_tests/test_cinder_contexts.py
Normal file
40
unit_tests/test_cinder_contexts.py
Normal file
@ -0,0 +1,40 @@
|
||||
import cinder_contexts as contexts
|
||||
|
||||
from test_utils import (
|
||||
CharmTestCase
|
||||
)
|
||||
|
||||
TO_PATCH = [
|
||||
'is_relation_made',
|
||||
'service_name',
|
||||
]
|
||||
|
||||
|
||||
class TestCinderContext(CharmTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestCinderContext, self).setUp(contexts, TO_PATCH)
|
||||
|
||||
def test_ceph_not_related(self):
|
||||
self.is_relation_made.return_value = False
|
||||
self.assertEquals(contexts.CephSubordinateContext()(), {})
|
||||
|
||||
def test_ceph_related(self):
|
||||
self.is_relation_made.return_value = True
|
||||
service = 'mycinder'
|
||||
self.service_name.return_value = service
|
||||
self.assertEquals(
|
||||
contexts.CephSubordinateContext()(),
|
||||
{"cinder": {
|
||||
"/etc/cinder/cinder.conf": {
|
||||
"sections": {
|
||||
service: [
|
||||
('volume_backend_name', service),
|
||||
('volume_driver',
|
||||
'cinder.volume.driver.RBDDriver'),
|
||||
('rbd_pool', service),
|
||||
('rbd_user', service),
|
||||
]
|
||||
}
|
||||
}
|
||||
}})
|
110
unit_tests/test_cinder_hooks.py
Normal file
110
unit_tests/test_cinder_hooks.py
Normal file
@ -0,0 +1,110 @@
|
||||
|
||||
from mock import MagicMock, patch, call
|
||||
|
||||
|
||||
import cinder_utils as utils
|
||||
|
||||
from test_utils import (
|
||||
CharmTestCase,
|
||||
)
|
||||
|
||||
# Need to do some early patching to get the module loaded.
|
||||
_register_configs = utils.register_configs
|
||||
utils.register_configs = MagicMock()
|
||||
import cinder_hooks as hooks
|
||||
utils.register_configs = _register_configs
|
||||
|
||||
TO_PATCH = [
|
||||
# cinder_utils
|
||||
'ensure_ceph_pool',
|
||||
'ensure_ceph_keyring',
|
||||
'register_configs',
|
||||
'restart_map',
|
||||
'set_ceph_env_variables',
|
||||
'CONFIGS',
|
||||
# charmhelpers.core.hookenv
|
||||
'config',
|
||||
'relation_ids',
|
||||
'relation_set',
|
||||
'service_name',
|
||||
'log',
|
||||
# charmhelpers.core.host
|
||||
'apt_install',
|
||||
'apt_update',
|
||||
# charmhelpers.contrib.hahelpers.cluster_utils
|
||||
'eligible_leader',
|
||||
'execd_preinstall'
|
||||
]
|
||||
|
||||
|
||||
class TestInstallHook(CharmTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestInstallHook, self).setUp(hooks, TO_PATCH)
|
||||
self.config.side_effect = self.test_config.get
|
||||
|
||||
def test_correct_install_packages(self):
|
||||
hooks.hooks.execute(['hooks/install'])
|
||||
self.apt_install.assert_called_with(['ceph-common'], fatal=True)
|
||||
|
||||
|
||||
class TestChangedHooks(CharmTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestChangedHooks, self).setUp(hooks, TO_PATCH)
|
||||
self.config.side_effect = self.test_config.get
|
||||
|
||||
|
||||
class TestJoinedHooks(CharmTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestJoinedHooks, self).setUp(hooks, TO_PATCH)
|
||||
self.config.side_effect = self.test_config.get
|
||||
|
||||
@patch('os.mkdir')
|
||||
def test_ceph_joined(self, mkdir):
|
||||
'''It correctly prepares for a ceph changed hook'''
|
||||
with patch('os.path.isdir') as isdir:
|
||||
isdir.return_value = False
|
||||
hooks.hooks.execute(['hooks/ceph-relation-joined'])
|
||||
mkdir.assert_called_with('/etc/ceph')
|
||||
|
||||
def test_ceph_changed_no_key(self):
|
||||
'''It does nothing when ceph key is not available'''
|
||||
self.CONFIGS.complete_contexts.return_value = ['']
|
||||
hooks.hooks.execute(['hooks/ceph-relation-changed'])
|
||||
m = 'ceph relation incomplete. Peer not ready?'
|
||||
self.log.assert_called_with(m)
|
||||
|
||||
def test_ceph_changed(self):
|
||||
'''It ensures ceph assets created on ceph changed'''
|
||||
self.CONFIGS.complete_contexts.return_value = ['ceph']
|
||||
self.service_name.return_value = 'cinder'
|
||||
self.ensure_ceph_keyring.return_value = True
|
||||
hooks.hooks.execute(['hooks/ceph-relation-changed'])
|
||||
self.ensure_ceph_keyring.assert_called_with(service='cinder',
|
||||
user='cinder',
|
||||
group='cinder')
|
||||
self.ensure_ceph_pool.assert_called_with(service='cinder', replicas=2)
|
||||
for c in [call('/etc/ceph/ceph.conf')]:
|
||||
self.assertIn(c, self.CONFIGS.write.call_args_list)
|
||||
self.set_ceph_env_variables.assert_called_with(service='cinder')
|
||||
|
||||
def test_ceph_changed_no_keys(self):
|
||||
'''It ensures ceph assets created on ceph changed'''
|
||||
self.CONFIGS.complete_contexts.return_value = ['ceph']
|
||||
self.service_name.return_value = 'cinder'
|
||||
self.ensure_ceph_keyring.return_value = False
|
||||
hooks.hooks.execute(['hooks/ceph-relation-changed'])
|
||||
# NOTE(jamespage): If ensure_ceph keyring fails, then
|
||||
# the hook should just exit 0 and return.
|
||||
self.assertTrue(self.log.called)
|
||||
self.assertFalse(self.CONFIGS.write.called)
|
||||
|
||||
def test_ceph_changed_no_leadership(self):
|
||||
'''It does not attempt to create ceph pool if not leader'''
|
||||
self.eligible_leader.return_value = False
|
||||
self.service_name.return_value = 'cinder'
|
||||
self.ensure_ceph_keyring.return_value = True
|
||||
hooks.hooks.execute(['hooks/ceph-relation-changed'])
|
||||
self.assertFalse(self.ensure_ceph_pool.called)
|
72
unit_tests/test_cinder_utils.py
Normal file
72
unit_tests/test_cinder_utils.py
Normal file
@ -0,0 +1,72 @@
|
||||
from mock import patch, call
|
||||
import cinder_utils as cinder_utils
|
||||
|
||||
from test_utils import (
|
||||
CharmTestCase,
|
||||
)
|
||||
|
||||
TO_PATCH = [
|
||||
# helpers.core.hookenv
|
||||
'relation_ids',
|
||||
# ceph utils
|
||||
'ceph_create_pool',
|
||||
'ceph_pool_exists',
|
||||
# storage_utils
|
||||
'get_os_codename_package',
|
||||
'templating',
|
||||
]
|
||||
|
||||
|
||||
MOUNTS = [
|
||||
['/mnt', '/dev/vdb']
|
||||
]
|
||||
|
||||
|
||||
class TestCinderUtils(CharmTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestCinderUtils, self).setUp(cinder_utils, TO_PATCH)
|
||||
|
||||
def test_ensure_ceph_pool(self):
|
||||
self.ceph_pool_exists.return_value = False
|
||||
cinder_utils.ensure_ceph_pool(service='cinder', replicas=3)
|
||||
self.ceph_create_pool.assert_called_with(service='cinder',
|
||||
name='cinder',
|
||||
replicas=3)
|
||||
|
||||
def test_ensure_ceph_pool_already_exists(self):
|
||||
self.ceph_pool_exists.return_value = True
|
||||
cinder_utils.ensure_ceph_pool(service='cinder', replicas=3)
|
||||
self.assertFalse(self.ceph_create_pool.called)
|
||||
|
||||
@patch('os.mkdir')
|
||||
@patch('os.path.isdir')
|
||||
@patch('os.path.exists')
|
||||
def test_register_configs_ceph(self, exists, isdir, mkdir):
|
||||
exists.return_value = False
|
||||
isdir.return_value = False
|
||||
self.get_os_codename_package.return_value = 'grizzly'
|
||||
self.relation_ids.return_value = ['ceph:0']
|
||||
configs = cinder_utils.register_configs()
|
||||
calls = []
|
||||
for conf in [cinder_utils.CEPH_CONF]:
|
||||
calls.append(
|
||||
call(conf,
|
||||
cinder_utils.CONFIG_FILES[conf]['hook_contexts'])
|
||||
)
|
||||
configs.register.assert_has_calls(calls, any_order=True)
|
||||
self.assertTrue(mkdir.called)
|
||||
|
||||
def test_set_ceph_kludge(self):
|
||||
pass
|
||||
"""
|
||||
def set_ceph_env_variables(service):
|
||||
# XXX: Horrid kludge to make cinder-volume use
|
||||
# a different ceph username than admin
|
||||
env = open('/etc/environment', 'r').read()
|
||||
if 'CEPH_ARGS' not in env:
|
||||
with open('/etc/environment', 'a') as out:
|
||||
out.write('CEPH_ARGS="--id %s"\n' % service)
|
||||
with open('/etc/init/cinder-volume.override', 'w') as out:
|
||||
out.write('env CEPH_ARGS="--id %s"\n' % service)
|
||||
"""
|
100
unit_tests/test_utils.py
Normal file
100
unit_tests/test_utils.py
Normal file
@ -0,0 +1,100 @@
|
||||
import logging
|
||||
import unittest
|
||||
import os
|
||||
import yaml
|
||||
|
||||
from mock import patch
|
||||
|
||||
|
||||
def load_config():
|
||||
'''
|
||||
Walk backwords from __file__ looking for config.yaml, load and return the
|
||||
'options' section'
|
||||
'''
|
||||
config = None
|
||||
f = __file__
|
||||
while config is None:
|
||||
d = os.path.dirname(f)
|
||||
if os.path.isfile(os.path.join(d, 'config.yaml')):
|
||||
config = os.path.join(d, 'config.yaml')
|
||||
break
|
||||
f = d
|
||||
|
||||
if not config:
|
||||
logging.error('Could not find config.yaml in any parent directory '
|
||||
'of %s. ' % file)
|
||||
raise Exception
|
||||
|
||||
return yaml.safe_load(open(config).read())['options']
|
||||
|
||||
|
||||
def get_default_config():
|
||||
'''
|
||||
Load default charm config from config.yaml return as a dict.
|
||||
If no default is set in config.yaml, its value is None.
|
||||
'''
|
||||
default_config = {}
|
||||
config = load_config()
|
||||
for k, v in config.iteritems():
|
||||
if 'default' in v:
|
||||
default_config[k] = v['default']
|
||||
else:
|
||||
default_config[k] = None
|
||||
return default_config
|
||||
|
||||
|
||||
class CharmTestCase(unittest.TestCase):
|
||||
|
||||
def setUp(self, obj, patches):
|
||||
super(CharmTestCase, self).setUp()
|
||||
self.patches = patches
|
||||
self.obj = obj
|
||||
self.test_config = TestConfig()
|
||||
self.test_relation = TestRelation()
|
||||
self.patch_all()
|
||||
|
||||
def patch(self, method):
|
||||
_m = patch.object(self.obj, method)
|
||||
mock = _m.start()
|
||||
self.addCleanup(_m.stop)
|
||||
return mock
|
||||
|
||||
def patch_all(self):
|
||||
for method in self.patches:
|
||||
setattr(self, method, self.patch(method))
|
||||
|
||||
|
||||
class TestConfig(object):
|
||||
|
||||
def __init__(self):
|
||||
self.config = get_default_config()
|
||||
|
||||
def get(self, attr):
|
||||
try:
|
||||
return self.config[attr]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
def get_all(self):
|
||||
return self.config
|
||||
|
||||
def set(self, attr, value):
|
||||
if attr not in self.config:
|
||||
raise KeyError
|
||||
self.config[attr] = value
|
||||
|
||||
|
||||
class TestRelation(object):
|
||||
|
||||
def __init__(self, relation_data={}):
|
||||
self.relation_data = relation_data
|
||||
|
||||
def set(self, relation_data):
|
||||
self.relation_data = relation_data
|
||||
|
||||
def get(self, attr=None, unit=None, rid=None):
|
||||
if attr is None:
|
||||
return self.relation_data
|
||||
elif attr in self.relation_data:
|
||||
return self.relation_data[attr]
|
||||
return None
|
Loading…
Reference in New Issue
Block a user