Merging python-redux and havana work.
This commit is contained in:
7
.coveragerc
Normal file
7
.coveragerc
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
[report]
|
||||||
|
# Regexes for lines to exclude from consideration
|
||||||
|
exclude_lines =
|
||||||
|
if __name__ == .__main__.:
|
||||||
|
include=
|
||||||
|
hooks/cinder_*
|
||||||
|
|
||||||
17
.project
Normal file
17
.project
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<projectDescription>
|
||||||
|
<name>cinder</name>
|
||||||
|
<comment></comment>
|
||||||
|
<projects>
|
||||||
|
</projects>
|
||||||
|
<buildSpec>
|
||||||
|
<buildCommand>
|
||||||
|
<name>org.python.pydev.PyDevBuilder</name>
|
||||||
|
<arguments>
|
||||||
|
</arguments>
|
||||||
|
</buildCommand>
|
||||||
|
</buildSpec>
|
||||||
|
<natures>
|
||||||
|
<nature>org.python.pydev.pythonNature</nature>
|
||||||
|
</natures>
|
||||||
|
</projectDescription>
|
||||||
9
.pydevproject
Normal file
9
.pydevproject
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||||
|
<?eclipse-pydev version="1.0"?><pydev_project>
|
||||||
|
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
|
||||||
|
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
|
||||||
|
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
|
||||||
|
<path>/cinder/hooks</path>
|
||||||
|
<path>/cinder/unit_tests</path>
|
||||||
|
</pydev_pathproperty>
|
||||||
|
</pydev_project>
|
||||||
14
Makefile
Normal file
14
Makefile
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/usr/bin/make
|
||||||
|
PYTHON := /usr/bin/env python
|
||||||
|
|
||||||
|
lint:
|
||||||
|
@flake8 --exclude hooks/charmhelpers hooks
|
||||||
|
@flake8 --exclude hooks/charmhelpers unit_tests
|
||||||
|
@charm proof
|
||||||
|
|
||||||
|
test:
|
||||||
|
@echo Starting tests...
|
||||||
|
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
|
||||||
|
|
||||||
|
sync:
|
||||||
|
@charm-helper-sync -c charm-helpers.yaml
|
||||||
@@ -24,7 +24,8 @@ services deployed via Juju charms, specifically: mysql, rabbitmq-server,
|
|||||||
keystone and nova-cloud-controller. The following assumes these services
|
keystone and nova-cloud-controller. The following assumes these services
|
||||||
have already been deployed.
|
have already been deployed.
|
||||||
|
|
||||||
a. Basic, all-in-one using local storage and iSCSI.
|
Basic, all-in-one using local storage and iSCSI
|
||||||
|
===============================================
|
||||||
|
|
||||||
The api server, scheduler and volume service are all deployed into the same
|
The api server, scheduler and volume service are all deployed into the same
|
||||||
unit. Local storage will be initialized as a LVM phsyical device, and a volume
|
unit. Local storage will be initialized as a LVM phsyical device, and a volume
|
||||||
@@ -32,66 +33,68 @@ group initialized. Instance volumes will be created locally as logical volumes
|
|||||||
and exported to instances via iSCSI. This is ideal for small-scale deployments
|
and exported to instances via iSCSI. This is ideal for small-scale deployments
|
||||||
or testing:
|
or testing:
|
||||||
|
|
||||||
$ cat >cinder.cfg <<END
|
cat >cinder.cfg <<END
|
||||||
cinder:
|
cinder:
|
||||||
block-device: sdc
|
block-device: sdc
|
||||||
overwrite: true
|
overwrite: true
|
||||||
END
|
END
|
||||||
$ juju deploy --config=cinder.cfg cinder
|
juju deploy --config=cinder.cfg cinder
|
||||||
$ juju add-relation cinder keystone
|
juju add-relation cinder keystone
|
||||||
$ juju add-relation cinder mysql
|
juju add-relation cinder mysql
|
||||||
$ juju add-relation cinder rabbitmq-server
|
juju add-relation cinder rabbitmq-server
|
||||||
$ juju add-relation cinder nova-cloud-controller
|
juju add-relation cinder nova-cloud-controller
|
||||||
|
|
||||||
b. Separate volume units for scale out, using local storage and iSCSI.
|
Separate volume units for scale out, using local storage and iSCSI
|
||||||
|
==================================================================
|
||||||
|
|
||||||
Separating the volume service from the API service allows the storage pool
|
Separating the volume service from the API service allows the storage pool
|
||||||
to easily scale without the added complexity that accompanies load-balancing
|
to easily scale without the added complexity that accompanies load-balancing
|
||||||
the API server. When we've exhausted local storage on volume serve, we can
|
the API server. When we've exhausted local storage on volume server, we can
|
||||||
simply add-unit to expand our capacity. Future requests to allocate volumes
|
simply add-unit to expand our capacity. Future requests to allocate volumes
|
||||||
will be distributed across the pool for volume servers according to the
|
will be distributed across the pool of volume servers according to the
|
||||||
availability of storage space.
|
availability of storage space.
|
||||||
|
|
||||||
$ cat >cinder.cfg <<END
|
cat >cinder.cfg <<END
|
||||||
cinder-api:
|
cinder-api:
|
||||||
enabled-services: api, scheduler
|
enabled-services: api, scheduler
|
||||||
cinder-volume:
|
cinder-volume:
|
||||||
enabled-serfvices: volume
|
enabled-services: volume
|
||||||
block-device: sdc
|
block-device: sdc
|
||||||
overwrite: true
|
overwrite: true
|
||||||
END
|
END
|
||||||
$ juju deploy --config=cinder.cfg cinder cinder-api
|
juju deploy --config=cinder.cfg cinder cinder-api
|
||||||
$ juju deploy --config=cinder.cfg cinder cinder-api
|
juju deploy --config=cinder.cfg cinder cinder-volume
|
||||||
$ juju add-relation cinder-api mysql
|
juju add-relation cinder-api mysql
|
||||||
$ juju add-relation cinder-api rabbitmq-server
|
juju add-relation cinder-api rabbitmq-server
|
||||||
$ juju add-relation cinder-api keystone
|
juju add-relation cinder-api keystone
|
||||||
$ juju add-relation cinder-api nova-cloud-controller
|
juju add-relation cinder-api nova-cloud-controller
|
||||||
$ juju add-relation cinder-volume mysql
|
juju add-relation cinder-volume mysql
|
||||||
$ juju add-relation cinder-volume rabbitmq-server
|
juju add-relation cinder-volume rabbitmq-server
|
||||||
|
|
||||||
# When more storage is needed, simply add more volume servers.
|
# When more storage is needed, simply add more volume servers.
|
||||||
$ juju add-unit cinder-volume
|
juju add-unit cinder-volume
|
||||||
|
|
||||||
c. All-in-one using Ceph-backed RBD volumes.
|
All-in-one using Ceph-backed RBD volumes
|
||||||
|
========================================
|
||||||
|
|
||||||
All 3 services can be deployed to the same unit, but instead of relying
|
All 3 services can be deployed to the same unit, but instead of relying
|
||||||
on local storage to back volumes an external Ceph cluster is used. This
|
on local storage to back volumes an external Ceph cluster is used. This
|
||||||
allows scalability and redundancy needs to be satisified and Cinder's RBD
|
allows scalability and redundancy needs to be satisified and Cinder's RBD
|
||||||
driver used to create, export and connect volumes to instances. This assumes
|
driver used to create, export and connect volumes to instances. This assumes
|
||||||
a functioning Ceph cluster has already been deployed using the official Ceph
|
a functioning Ceph cluster has already been deployed using the official Ceph
|
||||||
charm and a relation exists between the Ceph service and nova-compute.
|
charm and a relation exists between the Ceph service and the nova-compute
|
||||||
service.
|
service.
|
||||||
|
|
||||||
$ cat >cinder.cfg <<END
|
cat >cinder.cfg <<END
|
||||||
cinder:
|
cinder:
|
||||||
block-device: None
|
block-device: None
|
||||||
END
|
END
|
||||||
$ juju deploy --config=cinder.cfg cinder
|
juju deploy --config=cinder.cfg cinder
|
||||||
$ juju add-relation cinder ceph
|
juju add-relation cinder ceph
|
||||||
$ juju add-relation cinder keystone
|
juju add-relation cinder keystone
|
||||||
$ juju add-relation cinder mysql
|
juju add-relation cinder mysql
|
||||||
$ juju add-relation cinder rabbitmq-server
|
juju add-relation cinder rabbitmq-server
|
||||||
$ juju add-relation cinder nova-cloud-controller
|
juju add-relation cinder nova-cloud-controller
|
||||||
|
|
||||||
|
|
||||||
Configuration
|
Configuration
|
||||||
12
charm-helpers.yaml
Normal file
12
charm-helpers.yaml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
branch: lp:charm-helpers
|
||||||
|
destination: hooks/charmhelpers
|
||||||
|
include:
|
||||||
|
- core
|
||||||
|
- fetch
|
||||||
|
- contrib.openstack|inc=*
|
||||||
|
- contrib.storage
|
||||||
|
- contrib.hahelpers:
|
||||||
|
- apache
|
||||||
|
- cluster
|
||||||
|
- fetch
|
||||||
|
- payload.execd
|
||||||
12
config.yaml
12
config.yaml
@@ -48,11 +48,11 @@ options:
|
|||||||
description: |
|
description: |
|
||||||
If true, charm will attempt to overwrite block devices containin
|
If true, charm will attempt to overwrite block devices containin
|
||||||
previous filesystems or LVM, assuming it is not in use.
|
previous filesystems or LVM, assuming it is not in use.
|
||||||
db-user:
|
database-user:
|
||||||
default: cinder
|
default: cinder
|
||||||
type: string
|
type: string
|
||||||
description: Username to request database access.
|
description: Username to request database access.
|
||||||
cinder-db:
|
database:
|
||||||
default: cinder
|
default: cinder
|
||||||
type: string
|
type: string
|
||||||
description: Database to request access.
|
description: Database to request access.
|
||||||
@@ -72,6 +72,14 @@ options:
|
|||||||
default: RegionOne
|
default: RegionOne
|
||||||
type: string
|
type: string
|
||||||
description: OpenStack Region
|
description: OpenStack Region
|
||||||
|
glance-api-version:
|
||||||
|
default: 1
|
||||||
|
type: int
|
||||||
|
description: |
|
||||||
|
Newer storage drivers may require the v2 Glance API to perform certain
|
||||||
|
actions e.g. the RBD driver requires requires this to support COW
|
||||||
|
cloning of images. This option will default to v1 for backwards
|
||||||
|
compatibility older glance services.
|
||||||
# HA configuration settings
|
# HA configuration settings
|
||||||
vip:
|
vip:
|
||||||
type: string
|
type: string
|
||||||
|
|||||||
0
hooks/__init__.py
Normal file
0
hooks/__init__.py
Normal file
1
hooks/amqp-relation-broken
Symbolic link
1
hooks/amqp-relation-broken
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
cinder_hooks.py
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
1
hooks/ceph-relation-broken
Symbolic link
1
hooks/ceph-relation-broken
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
cinder_hooks.py
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
0
hooks/charmhelpers/__init__.py
Normal file
0
hooks/charmhelpers/__init__.py
Normal file
0
hooks/charmhelpers/contrib/__init__.py
Normal file
0
hooks/charmhelpers/contrib/__init__.py
Normal file
0
hooks/charmhelpers/contrib/hahelpers/__init__.py
Normal file
0
hooks/charmhelpers/contrib/hahelpers/__init__.py
Normal file
58
hooks/charmhelpers/contrib/hahelpers/apache.py
Normal file
58
hooks/charmhelpers/contrib/hahelpers/apache.py
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2012 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# This file is sourced from lp:openstack-charm-helpers
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# James Page <james.page@ubuntu.com>
|
||||||
|
# Adam Gandelman <adamg@ubuntu.com>
|
||||||
|
#
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config as config_get,
|
||||||
|
relation_get,
|
||||||
|
relation_ids,
|
||||||
|
related_units as relation_list,
|
||||||
|
log,
|
||||||
|
INFO,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_cert():
|
||||||
|
cert = config_get('ssl_cert')
|
||||||
|
key = config_get('ssl_key')
|
||||||
|
if not (cert and key):
|
||||||
|
log("Inspecting identity-service relations for SSL certificate.",
|
||||||
|
level=INFO)
|
||||||
|
cert = key = None
|
||||||
|
for r_id in relation_ids('identity-service'):
|
||||||
|
for unit in relation_list(r_id):
|
||||||
|
if not cert:
|
||||||
|
cert = relation_get('ssl_cert',
|
||||||
|
rid=r_id, unit=unit)
|
||||||
|
if not key:
|
||||||
|
key = relation_get('ssl_key',
|
||||||
|
rid=r_id, unit=unit)
|
||||||
|
return (cert, key)
|
||||||
|
|
||||||
|
|
||||||
|
def get_ca_cert():
|
||||||
|
ca_cert = None
|
||||||
|
log("Inspecting identity-service relations for CA SSL certificate.",
|
||||||
|
level=INFO)
|
||||||
|
for r_id in relation_ids('identity-service'):
|
||||||
|
for unit in relation_list(r_id):
|
||||||
|
if not ca_cert:
|
||||||
|
ca_cert = relation_get('ca_cert',
|
||||||
|
rid=r_id, unit=unit)
|
||||||
|
return ca_cert
|
||||||
|
|
||||||
|
|
||||||
|
def install_ca_cert(ca_cert):
|
||||||
|
if ca_cert:
|
||||||
|
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
|
||||||
|
'w') as crt:
|
||||||
|
crt.write(ca_cert)
|
||||||
|
subprocess.check_call(['update-ca-certificates', '--fresh'])
|
||||||
183
hooks/charmhelpers/contrib/hahelpers/cluster.py
Normal file
183
hooks/charmhelpers/contrib/hahelpers/cluster.py
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2012 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# James Page <james.page@ubuntu.com>
|
||||||
|
# Adam Gandelman <adamg@ubuntu.com>
|
||||||
|
#
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
|
||||||
|
from socket import gethostname as get_unit_hostname
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
relation_ids,
|
||||||
|
related_units as relation_list,
|
||||||
|
relation_get,
|
||||||
|
config as config_get,
|
||||||
|
INFO,
|
||||||
|
ERROR,
|
||||||
|
unit_get,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class HAIncompleteConfig(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def is_clustered():
|
||||||
|
for r_id in (relation_ids('ha') or []):
|
||||||
|
for unit in (relation_list(r_id) or []):
|
||||||
|
clustered = relation_get('clustered',
|
||||||
|
rid=r_id,
|
||||||
|
unit=unit)
|
||||||
|
if clustered:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def is_leader(resource):
|
||||||
|
cmd = [
|
||||||
|
"crm", "resource",
|
||||||
|
"show", resource
|
||||||
|
]
|
||||||
|
try:
|
||||||
|
status = subprocess.check_output(cmd)
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
if get_unit_hostname() in status:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def peer_units():
|
||||||
|
peers = []
|
||||||
|
for r_id in (relation_ids('cluster') or []):
|
||||||
|
for unit in (relation_list(r_id) or []):
|
||||||
|
peers.append(unit)
|
||||||
|
return peers
|
||||||
|
|
||||||
|
|
||||||
|
def oldest_peer(peers):
|
||||||
|
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
|
||||||
|
for peer in peers:
|
||||||
|
remote_unit_no = int(peer.split('/')[1])
|
||||||
|
if remote_unit_no < local_unit_no:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def eligible_leader(resource):
|
||||||
|
if is_clustered():
|
||||||
|
if not is_leader(resource):
|
||||||
|
log('Deferring action to CRM leader.', level=INFO)
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
peers = peer_units()
|
||||||
|
if peers and not oldest_peer(peers):
|
||||||
|
log('Deferring action to oldest service unit.', level=INFO)
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def https():
|
||||||
|
'''
|
||||||
|
Determines whether enough data has been provided in configuration
|
||||||
|
or relation data to configure HTTPS
|
||||||
|
.
|
||||||
|
returns: boolean
|
||||||
|
'''
|
||||||
|
if config_get('use-https') == "yes":
|
||||||
|
return True
|
||||||
|
if config_get('ssl_cert') and config_get('ssl_key'):
|
||||||
|
return True
|
||||||
|
for r_id in relation_ids('identity-service'):
|
||||||
|
for unit in relation_list(r_id):
|
||||||
|
rel_state = [
|
||||||
|
relation_get('https_keystone', rid=r_id, unit=unit),
|
||||||
|
relation_get('ssl_cert', rid=r_id, unit=unit),
|
||||||
|
relation_get('ssl_key', rid=r_id, unit=unit),
|
||||||
|
relation_get('ca_cert', rid=r_id, unit=unit),
|
||||||
|
]
|
||||||
|
# NOTE: works around (LP: #1203241)
|
||||||
|
if (None not in rel_state) and ('' not in rel_state):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def determine_api_port(public_port):
|
||||||
|
'''
|
||||||
|
Determine correct API server listening port based on
|
||||||
|
existence of HTTPS reverse proxy and/or haproxy.
|
||||||
|
|
||||||
|
public_port: int: standard public port for given service
|
||||||
|
|
||||||
|
returns: int: the correct listening port for the API service
|
||||||
|
'''
|
||||||
|
i = 0
|
||||||
|
if len(peer_units()) > 0 or is_clustered():
|
||||||
|
i += 1
|
||||||
|
if https():
|
||||||
|
i += 1
|
||||||
|
return public_port - (i * 10)
|
||||||
|
|
||||||
|
|
||||||
|
def determine_haproxy_port(public_port):
|
||||||
|
'''
|
||||||
|
Description: Determine correct proxy listening port based on public IP +
|
||||||
|
existence of HTTPS reverse proxy.
|
||||||
|
|
||||||
|
public_port: int: standard public port for given service
|
||||||
|
|
||||||
|
returns: int: the correct listening port for the HAProxy service
|
||||||
|
'''
|
||||||
|
i = 0
|
||||||
|
if https():
|
||||||
|
i += 1
|
||||||
|
return public_port - (i * 10)
|
||||||
|
|
||||||
|
|
||||||
|
def get_hacluster_config():
|
||||||
|
'''
|
||||||
|
Obtains all relevant configuration from charm configuration required
|
||||||
|
for initiating a relation to hacluster:
|
||||||
|
|
||||||
|
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
|
||||||
|
|
||||||
|
returns: dict: A dict containing settings keyed by setting name.
|
||||||
|
raises: HAIncompleteConfig if settings are missing.
|
||||||
|
'''
|
||||||
|
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
|
||||||
|
conf = {}
|
||||||
|
for setting in settings:
|
||||||
|
conf[setting] = config_get(setting)
|
||||||
|
missing = []
|
||||||
|
[missing.append(s) for s, v in conf.iteritems() if v is None]
|
||||||
|
if missing:
|
||||||
|
log('Insufficient config data to configure hacluster.', level=ERROR)
|
||||||
|
raise HAIncompleteConfig
|
||||||
|
return conf
|
||||||
|
|
||||||
|
|
||||||
|
def canonical_url(configs, vip_setting='vip'):
|
||||||
|
'''
|
||||||
|
Returns the correct HTTP URL to this host given the state of HTTPS
|
||||||
|
configuration and hacluster.
|
||||||
|
|
||||||
|
:configs : OSTemplateRenderer: A config tempating object to inspect for
|
||||||
|
a complete https context.
|
||||||
|
:vip_setting: str: Setting in charm config that specifies
|
||||||
|
VIP address.
|
||||||
|
'''
|
||||||
|
scheme = 'http'
|
||||||
|
if 'https' in configs.complete_contexts():
|
||||||
|
scheme = 'https'
|
||||||
|
if is_clustered():
|
||||||
|
addr = config_get(vip_setting)
|
||||||
|
else:
|
||||||
|
addr = unit_get('private-address')
|
||||||
|
return '%s://%s' % (scheme, addr)
|
||||||
0
hooks/charmhelpers/contrib/openstack/__init__.py
Normal file
0
hooks/charmhelpers/contrib/openstack/__init__.py
Normal file
522
hooks/charmhelpers/contrib/openstack/context.py
Normal file
522
hooks/charmhelpers/contrib/openstack/context.py
Normal file
@@ -0,0 +1,522 @@
|
|||||||
|
import json
|
||||||
|
import os
|
||||||
|
|
||||||
|
from base64 import b64decode
|
||||||
|
|
||||||
|
from subprocess import (
|
||||||
|
check_call
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
from charmhelpers.fetch import (
|
||||||
|
apt_install,
|
||||||
|
filter_installed_packages,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config,
|
||||||
|
local_unit,
|
||||||
|
log,
|
||||||
|
relation_get,
|
||||||
|
relation_ids,
|
||||||
|
related_units,
|
||||||
|
unit_get,
|
||||||
|
unit_private_ip,
|
||||||
|
ERROR,
|
||||||
|
WARNING,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.hahelpers.cluster import (
|
||||||
|
determine_api_port,
|
||||||
|
determine_haproxy_port,
|
||||||
|
https,
|
||||||
|
is_clustered,
|
||||||
|
peer_units,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.hahelpers.apache import (
|
||||||
|
get_cert,
|
||||||
|
get_ca_cert,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.openstack.neutron import (
|
||||||
|
neutron_plugin_attribute,
|
||||||
|
)
|
||||||
|
|
||||||
|
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
||||||
|
|
||||||
|
|
||||||
|
class OSContextError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_packages(packages):
|
||||||
|
'''Install but do not upgrade required plugin packages'''
|
||||||
|
required = filter_installed_packages(packages)
|
||||||
|
if required:
|
||||||
|
apt_install(required, fatal=True)
|
||||||
|
|
||||||
|
|
||||||
|
def context_complete(ctxt):
|
||||||
|
_missing = []
|
||||||
|
for k, v in ctxt.iteritems():
|
||||||
|
if v is None or v == '':
|
||||||
|
_missing.append(k)
|
||||||
|
if _missing:
|
||||||
|
log('Missing required data: %s' % ' '.join(_missing), level='INFO')
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class OSContextGenerator(object):
|
||||||
|
interfaces = []
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
class SharedDBContext(OSContextGenerator):
|
||||||
|
interfaces = ['shared-db']
|
||||||
|
|
||||||
|
def __init__(self, database=None, user=None, relation_prefix=None):
|
||||||
|
'''
|
||||||
|
Allows inspecting relation for settings prefixed with relation_prefix.
|
||||||
|
This is useful for parsing access for multiple databases returned via
|
||||||
|
the shared-db interface (eg, nova_password, quantum_password)
|
||||||
|
'''
|
||||||
|
self.relation_prefix = relation_prefix
|
||||||
|
self.database = database
|
||||||
|
self.user = user
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
self.database = self.database or config('database')
|
||||||
|
self.user = self.user or config('database-user')
|
||||||
|
if None in [self.database, self.user]:
|
||||||
|
log('Could not generate shared_db context. '
|
||||||
|
'Missing required charm config options. '
|
||||||
|
'(database name and user)')
|
||||||
|
raise OSContextError
|
||||||
|
ctxt = {}
|
||||||
|
|
||||||
|
password_setting = 'password'
|
||||||
|
if self.relation_prefix:
|
||||||
|
password_setting = self.relation_prefix + '_password'
|
||||||
|
|
||||||
|
for rid in relation_ids('shared-db'):
|
||||||
|
for unit in related_units(rid):
|
||||||
|
passwd = relation_get(password_setting, rid=rid, unit=unit)
|
||||||
|
ctxt = {
|
||||||
|
'database_host': relation_get('db_host', rid=rid,
|
||||||
|
unit=unit),
|
||||||
|
'database': self.database,
|
||||||
|
'database_user': self.user,
|
||||||
|
'database_password': passwd,
|
||||||
|
}
|
||||||
|
if context_complete(ctxt):
|
||||||
|
return ctxt
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class IdentityServiceContext(OSContextGenerator):
|
||||||
|
interfaces = ['identity-service']
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
log('Generating template context for identity-service')
|
||||||
|
ctxt = {}
|
||||||
|
|
||||||
|
for rid in relation_ids('identity-service'):
|
||||||
|
for unit in related_units(rid):
|
||||||
|
ctxt = {
|
||||||
|
'service_port': relation_get('service_port', rid=rid,
|
||||||
|
unit=unit),
|
||||||
|
'service_host': relation_get('service_host', rid=rid,
|
||||||
|
unit=unit),
|
||||||
|
'auth_host': relation_get('auth_host', rid=rid, unit=unit),
|
||||||
|
'auth_port': relation_get('auth_port', rid=rid, unit=unit),
|
||||||
|
'admin_tenant_name': relation_get('service_tenant',
|
||||||
|
rid=rid, unit=unit),
|
||||||
|
'admin_user': relation_get('service_username', rid=rid,
|
||||||
|
unit=unit),
|
||||||
|
'admin_password': relation_get('service_password', rid=rid,
|
||||||
|
unit=unit),
|
||||||
|
# XXX: Hard-coded http.
|
||||||
|
'service_protocol': 'http',
|
||||||
|
'auth_protocol': 'http',
|
||||||
|
}
|
||||||
|
if context_complete(ctxt):
|
||||||
|
return ctxt
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class AMQPContext(OSContextGenerator):
|
||||||
|
interfaces = ['amqp']
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
log('Generating template context for amqp')
|
||||||
|
conf = config()
|
||||||
|
try:
|
||||||
|
username = conf['rabbit-user']
|
||||||
|
vhost = conf['rabbit-vhost']
|
||||||
|
except KeyError as e:
|
||||||
|
log('Could not generate shared_db context. '
|
||||||
|
'Missing required charm config options: %s.' % e)
|
||||||
|
raise OSContextError
|
||||||
|
|
||||||
|
ctxt = {}
|
||||||
|
for rid in relation_ids('amqp'):
|
||||||
|
for unit in related_units(rid):
|
||||||
|
if relation_get('clustered', rid=rid, unit=unit):
|
||||||
|
ctxt['clustered'] = True
|
||||||
|
ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
|
||||||
|
unit=unit)
|
||||||
|
else:
|
||||||
|
ctxt['rabbitmq_host'] = relation_get('private-address',
|
||||||
|
rid=rid, unit=unit)
|
||||||
|
ctxt.update({
|
||||||
|
'rabbitmq_user': username,
|
||||||
|
'rabbitmq_password': relation_get('password', rid=rid,
|
||||||
|
unit=unit),
|
||||||
|
'rabbitmq_virtual_host': vhost,
|
||||||
|
})
|
||||||
|
if context_complete(ctxt):
|
||||||
|
# Sufficient information found = break out!
|
||||||
|
break
|
||||||
|
# Used for active/active rabbitmq >= grizzly
|
||||||
|
ctxt['rabbitmq_hosts'] = []
|
||||||
|
for unit in related_units(rid):
|
||||||
|
ctxt['rabbitmq_hosts'].append(relation_get('private-address',
|
||||||
|
rid=rid, unit=unit))
|
||||||
|
if not context_complete(ctxt):
|
||||||
|
return {}
|
||||||
|
else:
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
|
class CephContext(OSContextGenerator):
|
||||||
|
interfaces = ['ceph']
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
'''This generates context for /etc/ceph/ceph.conf templates'''
|
||||||
|
if not relation_ids('ceph'):
|
||||||
|
return {}
|
||||||
|
log('Generating template context for ceph')
|
||||||
|
mon_hosts = []
|
||||||
|
auth = None
|
||||||
|
key = None
|
||||||
|
for rid in relation_ids('ceph'):
|
||||||
|
for unit in related_units(rid):
|
||||||
|
mon_hosts.append(relation_get('private-address', rid=rid,
|
||||||
|
unit=unit))
|
||||||
|
auth = relation_get('auth', rid=rid, unit=unit)
|
||||||
|
key = relation_get('key', rid=rid, unit=unit)
|
||||||
|
|
||||||
|
ctxt = {
|
||||||
|
'mon_hosts': ' '.join(mon_hosts),
|
||||||
|
'auth': auth,
|
||||||
|
'key': key,
|
||||||
|
}
|
||||||
|
|
||||||
|
if not os.path.isdir('/etc/ceph'):
|
||||||
|
os.mkdir('/etc/ceph')
|
||||||
|
|
||||||
|
if not context_complete(ctxt):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
ensure_packages(['ceph-common'])
|
||||||
|
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
|
class HAProxyContext(OSContextGenerator):
|
||||||
|
interfaces = ['cluster']
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
'''
|
||||||
|
Builds half a context for the haproxy template, which describes
|
||||||
|
all peers to be included in the cluster. Each charm needs to include
|
||||||
|
its own context generator that describes the port mapping.
|
||||||
|
'''
|
||||||
|
if not relation_ids('cluster'):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
cluster_hosts = {}
|
||||||
|
l_unit = local_unit().replace('/', '-')
|
||||||
|
cluster_hosts[l_unit] = unit_get('private-address')
|
||||||
|
|
||||||
|
for rid in relation_ids('cluster'):
|
||||||
|
for unit in related_units(rid):
|
||||||
|
_unit = unit.replace('/', '-')
|
||||||
|
addr = relation_get('private-address', rid=rid, unit=unit)
|
||||||
|
cluster_hosts[_unit] = addr
|
||||||
|
|
||||||
|
ctxt = {
|
||||||
|
'units': cluster_hosts,
|
||||||
|
}
|
||||||
|
if len(cluster_hosts.keys()) > 1:
|
||||||
|
# Enable haproxy when we have enough peers.
|
||||||
|
log('Ensuring haproxy enabled in /etc/default/haproxy.')
|
||||||
|
with open('/etc/default/haproxy', 'w') as out:
|
||||||
|
out.write('ENABLED=1\n')
|
||||||
|
return ctxt
|
||||||
|
log('HAProxy context is incomplete, this unit has no peers.')
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class ImageServiceContext(OSContextGenerator):
|
||||||
|
interfaces = ['image-service']
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
'''
|
||||||
|
Obtains the glance API server from the image-service relation. Useful
|
||||||
|
in nova and cinder (currently).
|
||||||
|
'''
|
||||||
|
log('Generating template context for image-service.')
|
||||||
|
rids = relation_ids('image-service')
|
||||||
|
if not rids:
|
||||||
|
return {}
|
||||||
|
for rid in rids:
|
||||||
|
for unit in related_units(rid):
|
||||||
|
api_server = relation_get('glance-api-server',
|
||||||
|
rid=rid, unit=unit)
|
||||||
|
if api_server:
|
||||||
|
return {'glance_api_servers': api_server}
|
||||||
|
log('ImageService context is incomplete. '
|
||||||
|
'Missing required relation data.')
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class ApacheSSLContext(OSContextGenerator):
|
||||||
|
"""
|
||||||
|
Generates a context for an apache vhost configuration that configures
|
||||||
|
HTTPS reverse proxying for one or many endpoints. Generated context
|
||||||
|
looks something like:
|
||||||
|
{
|
||||||
|
'namespace': 'cinder',
|
||||||
|
'private_address': 'iscsi.mycinderhost.com',
|
||||||
|
'endpoints': [(8776, 8766), (8777, 8767)]
|
||||||
|
}
|
||||||
|
|
||||||
|
The endpoints list consists of a tuples mapping external ports
|
||||||
|
to internal ports.
|
||||||
|
"""
|
||||||
|
interfaces = ['https']
|
||||||
|
|
||||||
|
# charms should inherit this context and set external ports
|
||||||
|
# and service namespace accordingly.
|
||||||
|
external_ports = []
|
||||||
|
service_namespace = None
|
||||||
|
|
||||||
|
def enable_modules(self):
|
||||||
|
cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
def configure_cert(self):
|
||||||
|
if not os.path.isdir('/etc/apache2/ssl'):
|
||||||
|
os.mkdir('/etc/apache2/ssl')
|
||||||
|
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
|
||||||
|
if not os.path.isdir(ssl_dir):
|
||||||
|
os.mkdir(ssl_dir)
|
||||||
|
cert, key = get_cert()
|
||||||
|
with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
|
||||||
|
cert_out.write(b64decode(cert))
|
||||||
|
with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
|
||||||
|
key_out.write(b64decode(key))
|
||||||
|
ca_cert = get_ca_cert()
|
||||||
|
if ca_cert:
|
||||||
|
with open(CA_CERT_PATH, 'w') as ca_out:
|
||||||
|
ca_out.write(b64decode(ca_cert))
|
||||||
|
check_call(['update-ca-certificates'])
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
if isinstance(self.external_ports, basestring):
|
||||||
|
self.external_ports = [self.external_ports]
|
||||||
|
if (not self.external_ports or not https()):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
self.configure_cert()
|
||||||
|
self.enable_modules()
|
||||||
|
|
||||||
|
ctxt = {
|
||||||
|
'namespace': self.service_namespace,
|
||||||
|
'private_address': unit_get('private-address'),
|
||||||
|
'endpoints': []
|
||||||
|
}
|
||||||
|
for ext_port in self.external_ports:
|
||||||
|
if peer_units() or is_clustered():
|
||||||
|
int_port = determine_haproxy_port(ext_port)
|
||||||
|
else:
|
||||||
|
int_port = determine_api_port(ext_port)
|
||||||
|
portmap = (int(ext_port), int(int_port))
|
||||||
|
ctxt['endpoints'].append(portmap)
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
|
class NeutronContext(object):
|
||||||
|
interfaces = []
|
||||||
|
|
||||||
|
@property
|
||||||
|
def plugin(self):
|
||||||
|
return None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def network_manager(self):
|
||||||
|
return None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def packages(self):
|
||||||
|
return neutron_plugin_attribute(
|
||||||
|
self.plugin, 'packages', self.network_manager)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def neutron_security_groups(self):
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _ensure_packages(self):
|
||||||
|
[ensure_packages(pkgs) for pkgs in self.packages]
|
||||||
|
|
||||||
|
def _save_flag_file(self):
|
||||||
|
if self.network_manager == 'quantum':
|
||||||
|
_file = '/etc/nova/quantum_plugin.conf'
|
||||||
|
else:
|
||||||
|
_file = '/etc/nova/neutron_plugin.conf'
|
||||||
|
with open(_file, 'wb') as out:
|
||||||
|
out.write(self.plugin + '\n')
|
||||||
|
|
||||||
|
def ovs_ctxt(self):
|
||||||
|
driver = neutron_plugin_attribute(self.plugin, 'driver',
|
||||||
|
self.network_manager)
|
||||||
|
|
||||||
|
ovs_ctxt = {
|
||||||
|
'core_plugin': driver,
|
||||||
|
'neutron_plugin': 'ovs',
|
||||||
|
'neutron_security_groups': self.neutron_security_groups,
|
||||||
|
'local_ip': unit_private_ip(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return ovs_ctxt
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
self._ensure_packages()
|
||||||
|
|
||||||
|
if self.network_manager not in ['quantum', 'neutron']:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
if not self.plugin:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
ctxt = {'network_manager': self.network_manager}
|
||||||
|
|
||||||
|
if self.plugin == 'ovs':
|
||||||
|
ctxt.update(self.ovs_ctxt())
|
||||||
|
|
||||||
|
self._save_flag_file()
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
|
class OSConfigFlagContext(OSContextGenerator):
|
||||||
|
'''
|
||||||
|
Responsible adding user-defined config-flags in charm config to a
|
||||||
|
to a template context.
|
||||||
|
'''
|
||||||
|
def __call__(self):
|
||||||
|
config_flags = config('config-flags')
|
||||||
|
if not config_flags or config_flags in ['None', '']:
|
||||||
|
return {}
|
||||||
|
config_flags = config_flags.split(',')
|
||||||
|
flags = {}
|
||||||
|
for flag in config_flags:
|
||||||
|
if '=' not in flag:
|
||||||
|
log('Improperly formatted config-flag, expected k=v '
|
||||||
|
'got %s' % flag, level=WARNING)
|
||||||
|
continue
|
||||||
|
k, v = flag.split('=')
|
||||||
|
flags[k.strip()] = v
|
||||||
|
ctxt = {'user_config_flags': flags}
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
|
class SubordinateConfigContext(OSContextGenerator):
|
||||||
|
"""
|
||||||
|
Responsible for inspecting relations to subordinates that
|
||||||
|
may be exporting required config via a json blob.
|
||||||
|
|
||||||
|
The subordinate interface allows subordinates to export their
|
||||||
|
configuration requirements to the principle for multiple config
|
||||||
|
files and multiple serivces. Ie, a subordinate that has interfaces
|
||||||
|
to both glance and nova may export to following yaml blob as json:
|
||||||
|
|
||||||
|
glance:
|
||||||
|
/etc/glance/glance-api.conf:
|
||||||
|
sections:
|
||||||
|
DEFAULT:
|
||||||
|
- [key1, value1]
|
||||||
|
/etc/glance/glance-registry.conf:
|
||||||
|
MYSECTION:
|
||||||
|
- [key2, value2]
|
||||||
|
nova:
|
||||||
|
/etc/nova/nova.conf:
|
||||||
|
sections:
|
||||||
|
DEFAULT:
|
||||||
|
- [key3, value3]
|
||||||
|
|
||||||
|
|
||||||
|
It is then up to the principle charms to subscribe this context to
|
||||||
|
the service+config file it is interestd in. Configuration data will
|
||||||
|
be available in the template context, in glance's case, as:
|
||||||
|
ctxt = {
|
||||||
|
... other context ...
|
||||||
|
'subordinate_config': {
|
||||||
|
'DEFAULT': {
|
||||||
|
'key1': 'value1',
|
||||||
|
},
|
||||||
|
'MYSECTION': {
|
||||||
|
'key2': 'value2',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self, service, config_file, interface):
|
||||||
|
"""
|
||||||
|
:param service : Service name key to query in any subordinate
|
||||||
|
data found
|
||||||
|
:param config_file : Service's config file to query sections
|
||||||
|
:param interface : Subordinate interface to inspect
|
||||||
|
"""
|
||||||
|
self.service = service
|
||||||
|
self.config_file = config_file
|
||||||
|
self.interface = interface
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
ctxt = {}
|
||||||
|
for rid in relation_ids(self.interface):
|
||||||
|
for unit in related_units(rid):
|
||||||
|
sub_config = relation_get('subordinate_configuration',
|
||||||
|
rid=rid, unit=unit)
|
||||||
|
if sub_config and sub_config != '':
|
||||||
|
try:
|
||||||
|
sub_config = json.loads(sub_config)
|
||||||
|
except:
|
||||||
|
log('Could not parse JSON from subordinate_config '
|
||||||
|
'setting from %s' % rid, level=ERROR)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if self.service not in sub_config:
|
||||||
|
log('Found subordinate_config on %s but it contained'
|
||||||
|
'nothing for %s service' % (rid, self.service))
|
||||||
|
continue
|
||||||
|
|
||||||
|
sub_config = sub_config[self.service]
|
||||||
|
if self.config_file not in sub_config:
|
||||||
|
log('Found subordinate_config on %s but it contained'
|
||||||
|
'nothing for %s' % (rid, self.config_file))
|
||||||
|
continue
|
||||||
|
|
||||||
|
sub_config = sub_config[self.config_file]
|
||||||
|
for k, v in sub_config.iteritems():
|
||||||
|
ctxt[k] = v
|
||||||
|
|
||||||
|
if not ctxt:
|
||||||
|
ctxt['sections'] = {}
|
||||||
|
|
||||||
|
return ctxt
|
||||||
117
hooks/charmhelpers/contrib/openstack/neutron.py
Normal file
117
hooks/charmhelpers/contrib/openstack/neutron.py
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
# Various utilies for dealing with Neutron and the renaming from Quantum.
|
||||||
|
|
||||||
|
from subprocess import check_output
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config,
|
||||||
|
log,
|
||||||
|
ERROR,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.openstack.utils import os_release
|
||||||
|
|
||||||
|
|
||||||
|
def headers_package():
|
||||||
|
"""Ensures correct linux-headers for running kernel are installed,
|
||||||
|
for building DKMS package"""
|
||||||
|
kver = check_output(['uname', '-r']).strip()
|
||||||
|
return 'linux-headers-%s' % kver
|
||||||
|
|
||||||
|
|
||||||
|
# legacy
|
||||||
|
def quantum_plugins():
|
||||||
|
from charmhelpers.contrib.openstack import context
|
||||||
|
return {
|
||||||
|
'ovs': {
|
||||||
|
'config': '/etc/quantum/plugins/openvswitch/'
|
||||||
|
'ovs_quantum_plugin.ini',
|
||||||
|
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
|
||||||
|
'OVSQuantumPluginV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron')],
|
||||||
|
'services': ['quantum-plugin-openvswitch-agent'],
|
||||||
|
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
|
||||||
|
['quantum-plugin-openvswitch-agent']],
|
||||||
|
},
|
||||||
|
'nvp': {
|
||||||
|
'config': '/etc/quantum/plugins/nicira/nvp.ini',
|
||||||
|
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
|
||||||
|
'QuantumPlugin.NvpPluginV2',
|
||||||
|
'services': [],
|
||||||
|
'packages': [],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def neutron_plugins():
|
||||||
|
from charmhelpers.contrib.openstack import context
|
||||||
|
return {
|
||||||
|
'ovs': {
|
||||||
|
'config': '/etc/neutron/plugins/openvswitch/'
|
||||||
|
'ovs_neutron_plugin.ini',
|
||||||
|
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
|
||||||
|
'OVSNeutronPluginV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron')],
|
||||||
|
'services': ['neutron-plugin-openvswitch-agent'],
|
||||||
|
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
|
||||||
|
['quantum-plugin-openvswitch-agent']],
|
||||||
|
},
|
||||||
|
'nvp': {
|
||||||
|
'config': '/etc/neutron/plugins/nicira/nvp.ini',
|
||||||
|
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
|
||||||
|
'NeutronPlugin.NvpPluginV2',
|
||||||
|
'services': [],
|
||||||
|
'packages': [],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def neutron_plugin_attribute(plugin, attr, net_manager=None):
|
||||||
|
manager = net_manager or network_manager()
|
||||||
|
if manager == 'quantum':
|
||||||
|
plugins = quantum_plugins()
|
||||||
|
elif manager == 'neutron':
|
||||||
|
plugins = neutron_plugins()
|
||||||
|
else:
|
||||||
|
log('Error: Network manager does not support plugins.')
|
||||||
|
raise Exception
|
||||||
|
|
||||||
|
try:
|
||||||
|
_plugin = plugins[plugin]
|
||||||
|
except KeyError:
|
||||||
|
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
|
||||||
|
raise Exception
|
||||||
|
|
||||||
|
try:
|
||||||
|
return _plugin[attr]
|
||||||
|
except KeyError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def network_manager():
|
||||||
|
'''
|
||||||
|
Deals with the renaming of Quantum to Neutron in H and any situations
|
||||||
|
that require compatability (eg, deploying H with network-manager=quantum,
|
||||||
|
upgrading from G).
|
||||||
|
'''
|
||||||
|
release = os_release('nova-common')
|
||||||
|
manager = config('network-manager').lower()
|
||||||
|
|
||||||
|
if manager not in ['quantum', 'neutron']:
|
||||||
|
return manager
|
||||||
|
|
||||||
|
if release in ['essex']:
|
||||||
|
# E does not support neutron
|
||||||
|
log('Neutron networking not supported in Essex.', level=ERROR)
|
||||||
|
raise Exception
|
||||||
|
elif release in ['folsom', 'grizzly']:
|
||||||
|
# neutron is named quantum in F and G
|
||||||
|
return 'quantum'
|
||||||
|
else:
|
||||||
|
# ensure accurate naming for all releases post-H
|
||||||
|
return 'neutron'
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
# dummy __init__.py to fool syncer into thinking this is a syncable python
|
||||||
|
# module
|
||||||
11
hooks/charmhelpers/contrib/openstack/templates/ceph.conf
Normal file
11
hooks/charmhelpers/contrib/openstack/templates/ceph.conf
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
###############################################################################
|
||||||
|
# [ WARNING ]
|
||||||
|
# cinder configuration file maintained by Juju
|
||||||
|
# local changes may be overwritten.
|
||||||
|
###############################################################################
|
||||||
|
{% if auth -%}
|
||||||
|
[global]
|
||||||
|
auth_supported = {{ auth }}
|
||||||
|
keyring = /etc/ceph/$cluster.$name.keyring
|
||||||
|
mon host = {{ mon_hosts }}
|
||||||
|
{% endif -%}
|
||||||
37
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
Normal file
37
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
global
|
||||||
|
log 127.0.0.1 local0
|
||||||
|
log 127.0.0.1 local1 notice
|
||||||
|
maxconn 20000
|
||||||
|
user haproxy
|
||||||
|
group haproxy
|
||||||
|
spread-checks 0
|
||||||
|
|
||||||
|
defaults
|
||||||
|
log global
|
||||||
|
mode http
|
||||||
|
option httplog
|
||||||
|
option dontlognull
|
||||||
|
retries 3
|
||||||
|
timeout queue 1000
|
||||||
|
timeout connect 1000
|
||||||
|
timeout client 30000
|
||||||
|
timeout server 30000
|
||||||
|
|
||||||
|
listen stats :8888
|
||||||
|
mode http
|
||||||
|
stats enable
|
||||||
|
stats hide-version
|
||||||
|
stats realm Haproxy\ Statistics
|
||||||
|
stats uri /
|
||||||
|
stats auth admin:password
|
||||||
|
|
||||||
|
{% if units -%}
|
||||||
|
{% for service, ports in service_ports.iteritems() -%}
|
||||||
|
listen {{ service }} 0.0.0.0:{{ ports[0] }}
|
||||||
|
balance roundrobin
|
||||||
|
option tcplog
|
||||||
|
{% for unit, address in units.iteritems() -%}
|
||||||
|
server {{ unit }} {{ address }}:{{ ports[1] }} check
|
||||||
|
{% endfor %}
|
||||||
|
{% endfor -%}
|
||||||
|
{% endif -%}
|
||||||
@@ -0,0 +1,23 @@
|
|||||||
|
{% if endpoints -%}
|
||||||
|
{% for ext, int in endpoints -%}
|
||||||
|
Listen {{ ext }}
|
||||||
|
NameVirtualHost *:{{ ext }}
|
||||||
|
<VirtualHost *:{{ ext }}>
|
||||||
|
ServerName {{ private_address }}
|
||||||
|
SSLEngine on
|
||||||
|
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
|
||||||
|
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
|
||||||
|
ProxyPass / http://localhost:{{ int }}/
|
||||||
|
ProxyPassReverse / http://localhost:{{ int }}/
|
||||||
|
ProxyPreserveHost on
|
||||||
|
</VirtualHost>
|
||||||
|
<Proxy *>
|
||||||
|
Order deny,allow
|
||||||
|
Allow from all
|
||||||
|
</Proxy>
|
||||||
|
<Location />
|
||||||
|
Order allow,deny
|
||||||
|
Allow from all
|
||||||
|
</Location>
|
||||||
|
{% endfor -%}
|
||||||
|
{% endif -%}
|
||||||
@@ -0,0 +1,23 @@
|
|||||||
|
{% if endpoints -%}
|
||||||
|
{% for ext, int in endpoints -%}
|
||||||
|
Listen {{ ext }}
|
||||||
|
NameVirtualHost *:{{ ext }}
|
||||||
|
<VirtualHost *:{{ ext }}>
|
||||||
|
ServerName {{ private_address }}
|
||||||
|
SSLEngine on
|
||||||
|
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
|
||||||
|
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
|
||||||
|
ProxyPass / http://localhost:{{ int }}/
|
||||||
|
ProxyPassReverse / http://localhost:{{ int }}/
|
||||||
|
ProxyPreserveHost on
|
||||||
|
</VirtualHost>
|
||||||
|
<Proxy *>
|
||||||
|
Order deny,allow
|
||||||
|
Allow from all
|
||||||
|
</Proxy>
|
||||||
|
<Location />
|
||||||
|
Order allow,deny
|
||||||
|
Allow from all
|
||||||
|
</Location>
|
||||||
|
{% endfor -%}
|
||||||
|
{% endif -%}
|
||||||
280
hooks/charmhelpers/contrib/openstack/templating.py
Normal file
280
hooks/charmhelpers/contrib/openstack/templating.py
Normal file
@@ -0,0 +1,280 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from charmhelpers.fetch import apt_install
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
ERROR,
|
||||||
|
INFO
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
|
||||||
|
|
||||||
|
try:
|
||||||
|
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
||||||
|
except ImportError:
|
||||||
|
# python-jinja2 may not be installed yet, or we're running unittests.
|
||||||
|
FileSystemLoader = ChoiceLoader = Environment = exceptions = None
|
||||||
|
|
||||||
|
|
||||||
|
class OSConfigException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def get_loader(templates_dir, os_release):
|
||||||
|
"""
|
||||||
|
Create a jinja2.ChoiceLoader containing template dirs up to
|
||||||
|
and including os_release. If directory template directory
|
||||||
|
is missing at templates_dir, it will be omitted from the loader.
|
||||||
|
templates_dir is added to the bottom of the search list as a base
|
||||||
|
loading dir.
|
||||||
|
|
||||||
|
A charm may also ship a templates dir with this module
|
||||||
|
and it will be appended to the bottom of the search list, eg:
|
||||||
|
hooks/charmhelpers/contrib/openstack/templates.
|
||||||
|
|
||||||
|
:param templates_dir: str: Base template directory containing release
|
||||||
|
sub-directories.
|
||||||
|
:param os_release : str: OpenStack release codename to construct template
|
||||||
|
loader.
|
||||||
|
|
||||||
|
:returns : jinja2.ChoiceLoader constructed with a list of
|
||||||
|
jinja2.FilesystemLoaders, ordered in descending
|
||||||
|
order by OpenStack release.
|
||||||
|
"""
|
||||||
|
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
|
||||||
|
for rel in OPENSTACK_CODENAMES.itervalues()]
|
||||||
|
|
||||||
|
if not os.path.isdir(templates_dir):
|
||||||
|
log('Templates directory not found @ %s.' % templates_dir,
|
||||||
|
level=ERROR)
|
||||||
|
raise OSConfigException
|
||||||
|
|
||||||
|
# the bottom contains tempaltes_dir and possibly a common templates dir
|
||||||
|
# shipped with the helper.
|
||||||
|
loaders = [FileSystemLoader(templates_dir)]
|
||||||
|
helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
|
||||||
|
if os.path.isdir(helper_templates):
|
||||||
|
loaders.append(FileSystemLoader(helper_templates))
|
||||||
|
|
||||||
|
for rel, tmpl_dir in tmpl_dirs:
|
||||||
|
if os.path.isdir(tmpl_dir):
|
||||||
|
loaders.insert(0, FileSystemLoader(tmpl_dir))
|
||||||
|
if rel == os_release:
|
||||||
|
break
|
||||||
|
log('Creating choice loader with dirs: %s' %
|
||||||
|
[l.searchpath for l in loaders], level=INFO)
|
||||||
|
return ChoiceLoader(loaders)
|
||||||
|
|
||||||
|
|
||||||
|
class OSConfigTemplate(object):
|
||||||
|
"""
|
||||||
|
Associates a config file template with a list of context generators.
|
||||||
|
Responsible for constructing a template context based on those generators.
|
||||||
|
"""
|
||||||
|
def __init__(self, config_file, contexts):
|
||||||
|
self.config_file = config_file
|
||||||
|
|
||||||
|
if hasattr(contexts, '__call__'):
|
||||||
|
self.contexts = [contexts]
|
||||||
|
else:
|
||||||
|
self.contexts = contexts
|
||||||
|
|
||||||
|
self._complete_contexts = []
|
||||||
|
|
||||||
|
def context(self):
|
||||||
|
ctxt = {}
|
||||||
|
for context in self.contexts:
|
||||||
|
_ctxt = context()
|
||||||
|
if _ctxt:
|
||||||
|
ctxt.update(_ctxt)
|
||||||
|
# track interfaces for every complete context.
|
||||||
|
[self._complete_contexts.append(interface)
|
||||||
|
for interface in context.interfaces
|
||||||
|
if interface not in self._complete_contexts]
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
def complete_contexts(self):
|
||||||
|
'''
|
||||||
|
Return a list of interfaces that have atisfied contexts.
|
||||||
|
'''
|
||||||
|
if self._complete_contexts:
|
||||||
|
return self._complete_contexts
|
||||||
|
self.context()
|
||||||
|
return self._complete_contexts
|
||||||
|
|
||||||
|
|
||||||
|
class OSConfigRenderer(object):
|
||||||
|
"""
|
||||||
|
This class provides a common templating system to be used by OpenStack
|
||||||
|
charms. It is intended to help charms share common code and templates,
|
||||||
|
and ease the burden of managing config templates across multiple OpenStack
|
||||||
|
releases.
|
||||||
|
|
||||||
|
Basic usage:
|
||||||
|
# import some common context generates from charmhelpers
|
||||||
|
from charmhelpers.contrib.openstack import context
|
||||||
|
|
||||||
|
# Create a renderer object for a specific OS release.
|
||||||
|
configs = OSConfigRenderer(templates_dir='/tmp/templates',
|
||||||
|
openstack_release='folsom')
|
||||||
|
# register some config files with context generators.
|
||||||
|
configs.register(config_file='/etc/nova/nova.conf',
|
||||||
|
contexts=[context.SharedDBContext(),
|
||||||
|
context.AMQPContext()])
|
||||||
|
configs.register(config_file='/etc/nova/api-paste.ini',
|
||||||
|
contexts=[context.IdentityServiceContext()])
|
||||||
|
configs.register(config_file='/etc/haproxy/haproxy.conf',
|
||||||
|
contexts=[context.HAProxyContext()])
|
||||||
|
# write out a single config
|
||||||
|
configs.write('/etc/nova/nova.conf')
|
||||||
|
# write out all registered configs
|
||||||
|
configs.write_all()
|
||||||
|
|
||||||
|
Details:
|
||||||
|
|
||||||
|
OpenStack Releases and template loading
|
||||||
|
---------------------------------------
|
||||||
|
When the object is instantiated, it is associated with a specific OS
|
||||||
|
release. This dictates how the template loader will be constructed.
|
||||||
|
|
||||||
|
The constructed loader attempts to load the template from several places
|
||||||
|
in the following order:
|
||||||
|
- from the most recent OS release-specific template dir (if one exists)
|
||||||
|
- the base templates_dir
|
||||||
|
- a template directory shipped in the charm with this helper file.
|
||||||
|
|
||||||
|
|
||||||
|
For the example above, '/tmp/templates' contains the following structure:
|
||||||
|
/tmp/templates/nova.conf
|
||||||
|
/tmp/templates/api-paste.ini
|
||||||
|
/tmp/templates/grizzly/api-paste.ini
|
||||||
|
/tmp/templates/havana/api-paste.ini
|
||||||
|
|
||||||
|
Since it was registered with the grizzly release, it first seraches
|
||||||
|
the grizzly directory for nova.conf, then the templates dir.
|
||||||
|
|
||||||
|
When writing api-paste.ini, it will find the template in the grizzly
|
||||||
|
directory.
|
||||||
|
|
||||||
|
If the object were created with folsom, it would fall back to the
|
||||||
|
base templates dir for its api-paste.ini template.
|
||||||
|
|
||||||
|
This system should help manage changes in config files through
|
||||||
|
openstack releases, allowing charms to fall back to the most recently
|
||||||
|
updated config template for a given release
|
||||||
|
|
||||||
|
The haproxy.conf, since it is not shipped in the templates dir, will
|
||||||
|
be loaded from the module directory's template directory, eg
|
||||||
|
$CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
|
||||||
|
us to ship common templates (haproxy, apache) with the helpers.
|
||||||
|
|
||||||
|
Context generators
|
||||||
|
---------------------------------------
|
||||||
|
Context generators are used to generate template contexts during hook
|
||||||
|
execution. Doing so may require inspecting service relations, charm
|
||||||
|
config, etc. When registered, a config file is associated with a list
|
||||||
|
of generators. When a template is rendered and written, all context
|
||||||
|
generates are called in a chain to generate the context dictionary
|
||||||
|
passed to the jinja2 template. See context.py for more info.
|
||||||
|
"""
|
||||||
|
def __init__(self, templates_dir, openstack_release):
|
||||||
|
if not os.path.isdir(templates_dir):
|
||||||
|
log('Could not locate templates dir %s' % templates_dir,
|
||||||
|
level=ERROR)
|
||||||
|
raise OSConfigException
|
||||||
|
|
||||||
|
self.templates_dir = templates_dir
|
||||||
|
self.openstack_release = openstack_release
|
||||||
|
self.templates = {}
|
||||||
|
self._tmpl_env = None
|
||||||
|
|
||||||
|
if None in [Environment, ChoiceLoader, FileSystemLoader]:
|
||||||
|
# if this code is running, the object is created pre-install hook.
|
||||||
|
# jinja2 shouldn't get touched until the module is reloaded on next
|
||||||
|
# hook execution, with proper jinja2 bits successfully imported.
|
||||||
|
apt_install('python-jinja2')
|
||||||
|
|
||||||
|
def register(self, config_file, contexts):
|
||||||
|
"""
|
||||||
|
Register a config file with a list of context generators to be called
|
||||||
|
during rendering.
|
||||||
|
"""
|
||||||
|
self.templates[config_file] = OSConfigTemplate(config_file=config_file,
|
||||||
|
contexts=contexts)
|
||||||
|
log('Registered config file: %s' % config_file, level=INFO)
|
||||||
|
|
||||||
|
def _get_tmpl_env(self):
|
||||||
|
if not self._tmpl_env:
|
||||||
|
loader = get_loader(self.templates_dir, self.openstack_release)
|
||||||
|
self._tmpl_env = Environment(loader=loader)
|
||||||
|
|
||||||
|
def _get_template(self, template):
|
||||||
|
self._get_tmpl_env()
|
||||||
|
template = self._tmpl_env.get_template(template)
|
||||||
|
log('Loaded template from %s' % template.filename, level=INFO)
|
||||||
|
return template
|
||||||
|
|
||||||
|
def render(self, config_file):
|
||||||
|
if config_file not in self.templates:
|
||||||
|
log('Config not registered: %s' % config_file, level=ERROR)
|
||||||
|
raise OSConfigException
|
||||||
|
ctxt = self.templates[config_file].context()
|
||||||
|
|
||||||
|
_tmpl = os.path.basename(config_file)
|
||||||
|
try:
|
||||||
|
template = self._get_template(_tmpl)
|
||||||
|
except exceptions.TemplateNotFound:
|
||||||
|
# if no template is found with basename, try looking for it
|
||||||
|
# using a munged full path, eg:
|
||||||
|
# /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
|
||||||
|
_tmpl = '_'.join(config_file.split('/')[1:])
|
||||||
|
try:
|
||||||
|
template = self._get_template(_tmpl)
|
||||||
|
except exceptions.TemplateNotFound as e:
|
||||||
|
log('Could not load template from %s by %s or %s.' %
|
||||||
|
(self.templates_dir, os.path.basename(config_file), _tmpl),
|
||||||
|
level=ERROR)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
log('Rendering from template: %s' % _tmpl, level=INFO)
|
||||||
|
return template.render(ctxt)
|
||||||
|
|
||||||
|
def write(self, config_file):
|
||||||
|
"""
|
||||||
|
Write a single config file, raises if config file is not registered.
|
||||||
|
"""
|
||||||
|
if config_file not in self.templates:
|
||||||
|
log('Config not registered: %s' % config_file, level=ERROR)
|
||||||
|
raise OSConfigException
|
||||||
|
|
||||||
|
_out = self.render(config_file)
|
||||||
|
|
||||||
|
with open(config_file, 'wb') as out:
|
||||||
|
out.write(_out)
|
||||||
|
|
||||||
|
log('Wrote template %s.' % config_file, level=INFO)
|
||||||
|
|
||||||
|
def write_all(self):
|
||||||
|
"""
|
||||||
|
Write out all registered config files.
|
||||||
|
"""
|
||||||
|
[self.write(k) for k in self.templates.iterkeys()]
|
||||||
|
|
||||||
|
def set_release(self, openstack_release):
|
||||||
|
"""
|
||||||
|
Resets the template environment and generates a new template loader
|
||||||
|
based on a the new openstack release.
|
||||||
|
"""
|
||||||
|
self._tmpl_env = None
|
||||||
|
self.openstack_release = openstack_release
|
||||||
|
self._get_tmpl_env()
|
||||||
|
|
||||||
|
def complete_contexts(self):
|
||||||
|
'''
|
||||||
|
Returns a list of context interfaces that yield a complete context.
|
||||||
|
'''
|
||||||
|
interfaces = []
|
||||||
|
[interfaces.extend(i.complete_contexts())
|
||||||
|
for i in self.templates.itervalues()]
|
||||||
|
return interfaces
|
||||||
365
hooks/charmhelpers/contrib/openstack/utils.py
Normal file
365
hooks/charmhelpers/contrib/openstack/utils.py
Normal file
@@ -0,0 +1,365 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
# Common python helper functions used for OpenStack charms.
|
||||||
|
from collections import OrderedDict
|
||||||
|
|
||||||
|
import apt_pkg as apt
|
||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config,
|
||||||
|
log as juju_log,
|
||||||
|
charm_dir,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.host import (
|
||||||
|
lsb_release,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.fetch import (
|
||||||
|
apt_install,
|
||||||
|
)
|
||||||
|
|
||||||
|
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
||||||
|
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
||||||
|
|
||||||
|
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||||
|
('oneiric', 'diablo'),
|
||||||
|
('precise', 'essex'),
|
||||||
|
('quantal', 'folsom'),
|
||||||
|
('raring', 'grizzly'),
|
||||||
|
('saucy', 'havana'),
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
OPENSTACK_CODENAMES = OrderedDict([
|
||||||
|
('2011.2', 'diablo'),
|
||||||
|
('2012.1', 'essex'),
|
||||||
|
('2012.2', 'folsom'),
|
||||||
|
('2013.1', 'grizzly'),
|
||||||
|
('2013.2', 'havana'),
|
||||||
|
('2014.1', 'icehouse'),
|
||||||
|
])
|
||||||
|
|
||||||
|
# The ugly duckling
|
||||||
|
SWIFT_CODENAMES = OrderedDict([
|
||||||
|
('1.4.3', 'diablo'),
|
||||||
|
('1.4.8', 'essex'),
|
||||||
|
('1.7.4', 'folsom'),
|
||||||
|
('1.8.0', 'grizzly'),
|
||||||
|
('1.7.7', 'grizzly'),
|
||||||
|
('1.7.6', 'grizzly'),
|
||||||
|
('1.10.0', 'havana'),
|
||||||
|
('1.9.1', 'havana'),
|
||||||
|
('1.9.0', 'havana'),
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
def error_out(msg):
|
||||||
|
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_codename_install_source(src):
|
||||||
|
'''Derive OpenStack release codename from a given installation source.'''
|
||||||
|
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||||
|
rel = ''
|
||||||
|
if src == 'distro':
|
||||||
|
try:
|
||||||
|
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
|
||||||
|
except KeyError:
|
||||||
|
e = 'Could not derive openstack release for '\
|
||||||
|
'this Ubuntu release: %s' % ubuntu_rel
|
||||||
|
error_out(e)
|
||||||
|
return rel
|
||||||
|
|
||||||
|
if src.startswith('cloud:'):
|
||||||
|
ca_rel = src.split(':')[1]
|
||||||
|
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
|
||||||
|
return ca_rel
|
||||||
|
|
||||||
|
# Best guess match based on deb string provided
|
||||||
|
if src.startswith('deb') or src.startswith('ppa'):
|
||||||
|
for k, v in OPENSTACK_CODENAMES.iteritems():
|
||||||
|
if v in src:
|
||||||
|
return v
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_version_install_source(src):
|
||||||
|
codename = get_os_codename_install_source(src)
|
||||||
|
return get_os_version_codename(codename)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_codename_version(vers):
|
||||||
|
'''Determine OpenStack codename from version number.'''
|
||||||
|
try:
|
||||||
|
return OPENSTACK_CODENAMES[vers]
|
||||||
|
except KeyError:
|
||||||
|
e = 'Could not determine OpenStack codename for version %s' % vers
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_version_codename(codename):
|
||||||
|
'''Determine OpenStack version number from codename.'''
|
||||||
|
for k, v in OPENSTACK_CODENAMES.iteritems():
|
||||||
|
if v == codename:
|
||||||
|
return k
|
||||||
|
e = 'Could not derive OpenStack version for '\
|
||||||
|
'codename: %s' % codename
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_codename_package(package, fatal=True):
|
||||||
|
'''Derive OpenStack release codename from an installed package.'''
|
||||||
|
apt.init()
|
||||||
|
cache = apt.Cache()
|
||||||
|
|
||||||
|
try:
|
||||||
|
pkg = cache[package]
|
||||||
|
except:
|
||||||
|
if not fatal:
|
||||||
|
return None
|
||||||
|
# the package is unknown to the current apt cache.
|
||||||
|
e = 'Could not determine version of package with no installation '\
|
||||||
|
'candidate: %s' % package
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
if not pkg.current_ver:
|
||||||
|
if not fatal:
|
||||||
|
return None
|
||||||
|
# package is known, but no version is currently installed.
|
||||||
|
e = 'Could not determine version of uninstalled package: %s' % package
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
vers = apt.upstream_version(pkg.current_ver.ver_str)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if 'swift' in pkg.name:
|
||||||
|
swift_vers = vers[:5]
|
||||||
|
if swift_vers not in SWIFT_CODENAMES:
|
||||||
|
# Deal with 1.10.0 upward
|
||||||
|
swift_vers = vers[:6]
|
||||||
|
return SWIFT_CODENAMES[swift_vers]
|
||||||
|
else:
|
||||||
|
vers = vers[:6]
|
||||||
|
return OPENSTACK_CODENAMES[vers]
|
||||||
|
except KeyError:
|
||||||
|
e = 'Could not determine OpenStack codename for version %s' % vers
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_version_package(pkg, fatal=True):
|
||||||
|
'''Derive OpenStack version number from an installed package.'''
|
||||||
|
codename = get_os_codename_package(pkg, fatal=fatal)
|
||||||
|
|
||||||
|
if not codename:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if 'swift' in pkg:
|
||||||
|
vers_map = SWIFT_CODENAMES
|
||||||
|
else:
|
||||||
|
vers_map = OPENSTACK_CODENAMES
|
||||||
|
|
||||||
|
for version, cname in vers_map.iteritems():
|
||||||
|
if cname == codename:
|
||||||
|
return version
|
||||||
|
#e = "Could not determine OpenStack version for package: %s" % pkg
|
||||||
|
#error_out(e)
|
||||||
|
|
||||||
|
|
||||||
|
os_rel = None
|
||||||
|
|
||||||
|
|
||||||
|
def os_release(package, base='essex'):
|
||||||
|
'''
|
||||||
|
Returns OpenStack release codename from a cached global.
|
||||||
|
If the codename can not be determined from either an installed package or
|
||||||
|
the installation source, the earliest release supported by the charm should
|
||||||
|
be returned.
|
||||||
|
'''
|
||||||
|
global os_rel
|
||||||
|
if os_rel:
|
||||||
|
return os_rel
|
||||||
|
os_rel = (get_os_codename_package(package, fatal=False) or
|
||||||
|
get_os_codename_install_source(config('openstack-origin')) or
|
||||||
|
base)
|
||||||
|
return os_rel
|
||||||
|
|
||||||
|
|
||||||
|
def import_key(keyid):
|
||||||
|
cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
|
||||||
|
"--recv-keys %s" % keyid
|
||||||
|
try:
|
||||||
|
subprocess.check_call(cmd.split(' '))
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
error_out("Error importing repo key %s" % keyid)
|
||||||
|
|
||||||
|
|
||||||
|
def configure_installation_source(rel):
|
||||||
|
'''Configure apt installation source.'''
|
||||||
|
if rel == 'distro':
|
||||||
|
return
|
||||||
|
elif rel[:4] == "ppa:":
|
||||||
|
src = rel
|
||||||
|
subprocess.check_call(["add-apt-repository", "-y", src])
|
||||||
|
elif rel[:3] == "deb":
|
||||||
|
l = len(rel.split('|'))
|
||||||
|
if l == 2:
|
||||||
|
src, key = rel.split('|')
|
||||||
|
juju_log("Importing PPA key from keyserver for %s" % src)
|
||||||
|
import_key(key)
|
||||||
|
elif l == 1:
|
||||||
|
src = rel
|
||||||
|
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
||||||
|
f.write(src)
|
||||||
|
elif rel[:6] == 'cloud:':
|
||||||
|
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||||
|
rel = rel.split(':')[1]
|
||||||
|
u_rel = rel.split('-')[0]
|
||||||
|
ca_rel = rel.split('-')[1]
|
||||||
|
|
||||||
|
if u_rel != ubuntu_rel:
|
||||||
|
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
|
||||||
|
'version (%s)' % (ca_rel, ubuntu_rel)
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
if 'staging' in ca_rel:
|
||||||
|
# staging is just a regular PPA.
|
||||||
|
os_rel = ca_rel.split('/')[0]
|
||||||
|
ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
|
||||||
|
cmd = 'add-apt-repository -y %s' % ppa
|
||||||
|
subprocess.check_call(cmd.split(' '))
|
||||||
|
return
|
||||||
|
|
||||||
|
# map charm config options to actual archive pockets.
|
||||||
|
pockets = {
|
||||||
|
'folsom': 'precise-updates/folsom',
|
||||||
|
'folsom/updates': 'precise-updates/folsom',
|
||||||
|
'folsom/proposed': 'precise-proposed/folsom',
|
||||||
|
'grizzly': 'precise-updates/grizzly',
|
||||||
|
'grizzly/updates': 'precise-updates/grizzly',
|
||||||
|
'grizzly/proposed': 'precise-proposed/grizzly',
|
||||||
|
'havana': 'precise-updates/havana',
|
||||||
|
'havana/updates': 'precise-updates/havana',
|
||||||
|
'havana/proposed': 'precise-proposed/havana',
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
pocket = pockets[ca_rel]
|
||||||
|
except KeyError:
|
||||||
|
e = 'Invalid Cloud Archive release specified: %s' % rel
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
|
||||||
|
apt_install('ubuntu-cloud-keyring', fatal=True)
|
||||||
|
|
||||||
|
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
|
||||||
|
f.write(src)
|
||||||
|
else:
|
||||||
|
error_out("Invalid openstack-release specified: %s" % rel)
|
||||||
|
|
||||||
|
|
||||||
|
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
|
||||||
|
"""
|
||||||
|
Write an rc file in the charm-delivered directory containing
|
||||||
|
exported environment variables provided by env_vars. Any charm scripts run
|
||||||
|
outside the juju hook environment can source this scriptrc to obtain
|
||||||
|
updated config information necessary to perform health checks or
|
||||||
|
service changes.
|
||||||
|
"""
|
||||||
|
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
|
||||||
|
if not os.path.exists(os.path.dirname(juju_rc_path)):
|
||||||
|
os.mkdir(os.path.dirname(juju_rc_path))
|
||||||
|
with open(juju_rc_path, 'wb') as rc_script:
|
||||||
|
rc_script.write(
|
||||||
|
"#!/bin/bash\n")
|
||||||
|
[rc_script.write('export %s=%s\n' % (u, p))
|
||||||
|
for u, p in env_vars.iteritems() if u != "script_path"]
|
||||||
|
|
||||||
|
|
||||||
|
def openstack_upgrade_available(package):
|
||||||
|
"""
|
||||||
|
Determines if an OpenStack upgrade is available from installation
|
||||||
|
source, based on version of installed package.
|
||||||
|
|
||||||
|
:param package: str: Name of installed package.
|
||||||
|
|
||||||
|
:returns: bool: : Returns True if configured installation source offers
|
||||||
|
a newer version of package.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
src = config('openstack-origin')
|
||||||
|
cur_vers = get_os_version_package(package)
|
||||||
|
available_vers = get_os_version_install_source(src)
|
||||||
|
apt.init()
|
||||||
|
return apt.version_compare(available_vers, cur_vers) == 1
|
||||||
|
|
||||||
|
|
||||||
|
def is_ip(address):
|
||||||
|
"""
|
||||||
|
Returns True if address is a valid IP address.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Test to see if already an IPv4 address
|
||||||
|
socket.inet_aton(address)
|
||||||
|
return True
|
||||||
|
except socket.error:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def ns_query(address):
|
||||||
|
try:
|
||||||
|
import dns.resolver
|
||||||
|
except ImportError:
|
||||||
|
apt_install('python-dnspython')
|
||||||
|
import dns.resolver
|
||||||
|
|
||||||
|
if isinstance(address, dns.name.Name):
|
||||||
|
rtype = 'PTR'
|
||||||
|
elif isinstance(address, basestring):
|
||||||
|
rtype = 'A'
|
||||||
|
|
||||||
|
answers = dns.resolver.query(address, rtype)
|
||||||
|
if answers:
|
||||||
|
return str(answers[0])
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_host_ip(hostname):
|
||||||
|
"""
|
||||||
|
Resolves the IP for a given hostname, or returns
|
||||||
|
the input if it is already an IP.
|
||||||
|
"""
|
||||||
|
if is_ip(hostname):
|
||||||
|
return hostname
|
||||||
|
|
||||||
|
return ns_query(hostname)
|
||||||
|
|
||||||
|
|
||||||
|
def get_hostname(address):
|
||||||
|
"""
|
||||||
|
Resolves hostname for given IP, or returns the input
|
||||||
|
if it is already a hostname.
|
||||||
|
"""
|
||||||
|
if not is_ip(address):
|
||||||
|
return address
|
||||||
|
|
||||||
|
try:
|
||||||
|
import dns.reversename
|
||||||
|
except ImportError:
|
||||||
|
apt_install('python-dnspython')
|
||||||
|
import dns.reversename
|
||||||
|
|
||||||
|
rev = dns.reversename.from_address(address)
|
||||||
|
result = ns_query(rev)
|
||||||
|
if not result:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# strip trailing .
|
||||||
|
if result.endswith('.'):
|
||||||
|
return result[:-1]
|
||||||
|
return result
|
||||||
0
hooks/charmhelpers/contrib/storage/__init__.py
Normal file
0
hooks/charmhelpers/contrib/storage/__init__.py
Normal file
359
hooks/charmhelpers/contrib/storage/linux/ceph.py
Normal file
359
hooks/charmhelpers/contrib/storage/linux/ceph.py
Normal file
@@ -0,0 +1,359 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2012 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# This file is sourced from lp:openstack-charm-helpers
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# James Page <james.page@ubuntu.com>
|
||||||
|
# Adam Gandelman <adamg@ubuntu.com>
|
||||||
|
#
|
||||||
|
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
|
||||||
|
from subprocess import (
|
||||||
|
check_call,
|
||||||
|
check_output,
|
||||||
|
CalledProcessError
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
relation_get,
|
||||||
|
relation_ids,
|
||||||
|
related_units,
|
||||||
|
log,
|
||||||
|
INFO,
|
||||||
|
WARNING,
|
||||||
|
ERROR
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.host import (
|
||||||
|
mount,
|
||||||
|
mounts,
|
||||||
|
service_start,
|
||||||
|
service_stop,
|
||||||
|
service_running,
|
||||||
|
umount,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.fetch import (
|
||||||
|
apt_install,
|
||||||
|
)
|
||||||
|
|
||||||
|
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
|
||||||
|
KEYFILE = '/etc/ceph/ceph.client.{}.key'
|
||||||
|
|
||||||
|
CEPH_CONF = """[global]
|
||||||
|
auth supported = {auth}
|
||||||
|
keyring = {keyring}
|
||||||
|
mon host = {mon_hosts}
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def install():
|
||||||
|
''' Basic Ceph client installation '''
|
||||||
|
ceph_dir = "/etc/ceph"
|
||||||
|
if not os.path.exists(ceph_dir):
|
||||||
|
os.mkdir(ceph_dir)
|
||||||
|
apt_install('ceph-common', fatal=True)
|
||||||
|
|
||||||
|
|
||||||
|
def rbd_exists(service, pool, rbd_img):
|
||||||
|
''' Check to see if a RADOS block device exists '''
|
||||||
|
try:
|
||||||
|
out = check_output(['rbd', 'list', '--id', service,
|
||||||
|
'--pool', pool])
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return rbd_img in out
|
||||||
|
|
||||||
|
|
||||||
|
def create_rbd_image(service, pool, image, sizemb):
|
||||||
|
''' Create a new RADOS block device '''
|
||||||
|
cmd = [
|
||||||
|
'rbd',
|
||||||
|
'create',
|
||||||
|
image,
|
||||||
|
'--size',
|
||||||
|
str(sizemb),
|
||||||
|
'--id',
|
||||||
|
service,
|
||||||
|
'--pool',
|
||||||
|
pool
|
||||||
|
]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def pool_exists(service, name):
|
||||||
|
''' Check to see if a RADOS pool already exists '''
|
||||||
|
try:
|
||||||
|
out = check_output(['rados', '--id', service, 'lspools'])
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return name in out
|
||||||
|
|
||||||
|
|
||||||
|
def get_osds(service):
|
||||||
|
'''
|
||||||
|
Return a list of all Ceph Object Storage Daemons
|
||||||
|
currently in the cluster
|
||||||
|
'''
|
||||||
|
return json.loads(check_output(['ceph', '--id', service,
|
||||||
|
'osd', 'ls', '--format=json']))
|
||||||
|
|
||||||
|
|
||||||
|
def create_pool(service, name, replicas=2):
|
||||||
|
''' Create a new RADOS pool '''
|
||||||
|
if pool_exists(service, name):
|
||||||
|
log("Ceph pool {} already exists, skipping creation".format(name),
|
||||||
|
level=WARNING)
|
||||||
|
return
|
||||||
|
# Calculate the number of placement groups based
|
||||||
|
# on upstream recommended best practices.
|
||||||
|
pgnum = (len(get_osds(service)) * 100 / replicas)
|
||||||
|
cmd = [
|
||||||
|
'ceph', '--id', service,
|
||||||
|
'osd', 'pool', 'create',
|
||||||
|
name, str(pgnum)
|
||||||
|
]
|
||||||
|
check_call(cmd)
|
||||||
|
cmd = [
|
||||||
|
'ceph', '--id', service,
|
||||||
|
'osd', 'pool', 'set', name,
|
||||||
|
'size', str(replicas)
|
||||||
|
]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_pool(service, name):
|
||||||
|
''' Delete a RADOS pool from ceph '''
|
||||||
|
cmd = [
|
||||||
|
'ceph', '--id', service,
|
||||||
|
'osd', 'pool', 'delete',
|
||||||
|
name, '--yes-i-really-really-mean-it'
|
||||||
|
]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def _keyfile_path(service):
|
||||||
|
return KEYFILE.format(service)
|
||||||
|
|
||||||
|
|
||||||
|
def _keyring_path(service):
|
||||||
|
return KEYRING.format(service)
|
||||||
|
|
||||||
|
|
||||||
|
def create_keyring(service, key):
|
||||||
|
''' Create a new Ceph keyring containing key'''
|
||||||
|
keyring = _keyring_path(service)
|
||||||
|
if os.path.exists(keyring):
|
||||||
|
log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
|
||||||
|
return
|
||||||
|
cmd = [
|
||||||
|
'ceph-authtool',
|
||||||
|
keyring,
|
||||||
|
'--create-keyring',
|
||||||
|
'--name=client.{}'.format(service),
|
||||||
|
'--add-key={}'.format(key)
|
||||||
|
]
|
||||||
|
check_call(cmd)
|
||||||
|
log('ceph: Created new ring at %s.' % keyring, level=INFO)
|
||||||
|
|
||||||
|
|
||||||
|
def create_key_file(service, key):
|
||||||
|
''' Create a file containing key '''
|
||||||
|
keyfile = _keyfile_path(service)
|
||||||
|
if os.path.exists(keyfile):
|
||||||
|
log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
|
||||||
|
return
|
||||||
|
with open(keyfile, 'w') as fd:
|
||||||
|
fd.write(key)
|
||||||
|
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
|
||||||
|
|
||||||
|
|
||||||
|
def get_ceph_nodes():
|
||||||
|
''' Query named relation 'ceph' to detemine current nodes '''
|
||||||
|
hosts = []
|
||||||
|
for r_id in relation_ids('ceph'):
|
||||||
|
for unit in related_units(r_id):
|
||||||
|
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
|
||||||
|
return hosts
|
||||||
|
|
||||||
|
|
||||||
|
def configure(service, key, auth):
|
||||||
|
''' Perform basic configuration of Ceph '''
|
||||||
|
create_keyring(service, key)
|
||||||
|
create_key_file(service, key)
|
||||||
|
hosts = get_ceph_nodes()
|
||||||
|
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
|
||||||
|
ceph_conf.write(CEPH_CONF.format(auth=auth,
|
||||||
|
keyring=_keyring_path(service),
|
||||||
|
mon_hosts=",".join(map(str, hosts))))
|
||||||
|
modprobe('rbd')
|
||||||
|
|
||||||
|
|
||||||
|
def image_mapped(name):
|
||||||
|
''' Determine whether a RADOS block device is mapped locally '''
|
||||||
|
try:
|
||||||
|
out = check_output(['rbd', 'showmapped'])
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return name in out
|
||||||
|
|
||||||
|
|
||||||
|
def map_block_storage(service, pool, image):
|
||||||
|
''' Map a RADOS block device for local use '''
|
||||||
|
cmd = [
|
||||||
|
'rbd',
|
||||||
|
'map',
|
||||||
|
'{}/{}'.format(pool, image),
|
||||||
|
'--user',
|
||||||
|
service,
|
||||||
|
'--secret',
|
||||||
|
_keyfile_path(service),
|
||||||
|
]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def filesystem_mounted(fs):
|
||||||
|
''' Determine whether a filesytems is already mounted '''
|
||||||
|
return fs in [f for f, m in mounts()]
|
||||||
|
|
||||||
|
|
||||||
|
def make_filesystem(blk_device, fstype='ext4', timeout=10):
|
||||||
|
''' Make a new filesystem on the specified block device '''
|
||||||
|
count = 0
|
||||||
|
e_noent = os.errno.ENOENT
|
||||||
|
while not os.path.exists(blk_device):
|
||||||
|
if count >= timeout:
|
||||||
|
log('ceph: gave up waiting on block device %s' % blk_device,
|
||||||
|
level=ERROR)
|
||||||
|
raise IOError(e_noent, os.strerror(e_noent), blk_device)
|
||||||
|
log('ceph: waiting for block device %s to appear' % blk_device,
|
||||||
|
level=INFO)
|
||||||
|
count += 1
|
||||||
|
time.sleep(1)
|
||||||
|
else:
|
||||||
|
log('ceph: Formatting block device %s as filesystem %s.' %
|
||||||
|
(blk_device, fstype), level=INFO)
|
||||||
|
check_call(['mkfs', '-t', fstype, blk_device])
|
||||||
|
|
||||||
|
|
||||||
|
def place_data_on_block_device(blk_device, data_src_dst):
|
||||||
|
''' Migrate data in data_src_dst to blk_device and then remount '''
|
||||||
|
# mount block device into /mnt
|
||||||
|
mount(blk_device, '/mnt')
|
||||||
|
# copy data to /mnt
|
||||||
|
copy_files(data_src_dst, '/mnt')
|
||||||
|
# umount block device
|
||||||
|
umount('/mnt')
|
||||||
|
# Grab user/group ID's from original source
|
||||||
|
_dir = os.stat(data_src_dst)
|
||||||
|
uid = _dir.st_uid
|
||||||
|
gid = _dir.st_gid
|
||||||
|
# re-mount where the data should originally be
|
||||||
|
# TODO: persist is currently a NO-OP in core.host
|
||||||
|
mount(blk_device, data_src_dst, persist=True)
|
||||||
|
# ensure original ownership of new mount.
|
||||||
|
os.chown(data_src_dst, uid, gid)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: re-use
|
||||||
|
def modprobe(module):
|
||||||
|
''' Load a kernel module and configure for auto-load on reboot '''
|
||||||
|
log('ceph: Loading kernel module', level=INFO)
|
||||||
|
cmd = ['modprobe', module]
|
||||||
|
check_call(cmd)
|
||||||
|
with open('/etc/modules', 'r+') as modules:
|
||||||
|
if module not in modules.read():
|
||||||
|
modules.write(module)
|
||||||
|
|
||||||
|
|
||||||
|
def copy_files(src, dst, symlinks=False, ignore=None):
|
||||||
|
''' Copy files from src to dst '''
|
||||||
|
for item in os.listdir(src):
|
||||||
|
s = os.path.join(src, item)
|
||||||
|
d = os.path.join(dst, item)
|
||||||
|
if os.path.isdir(s):
|
||||||
|
shutil.copytree(s, d, symlinks, ignore)
|
||||||
|
else:
|
||||||
|
shutil.copy2(s, d)
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
|
||||||
|
blk_device, fstype, system_services=[]):
|
||||||
|
"""
|
||||||
|
NOTE: This function must only be called from a single service unit for
|
||||||
|
the same rbd_img otherwise data loss will occur.
|
||||||
|
|
||||||
|
Ensures given pool and RBD image exists, is mapped to a block device,
|
||||||
|
and the device is formatted and mounted at the given mount_point.
|
||||||
|
|
||||||
|
If formatting a device for the first time, data existing at mount_point
|
||||||
|
will be migrated to the RBD device before being re-mounted.
|
||||||
|
|
||||||
|
All services listed in system_services will be stopped prior to data
|
||||||
|
migration and restarted when complete.
|
||||||
|
"""
|
||||||
|
# Ensure pool, RBD image, RBD mappings are in place.
|
||||||
|
if not pool_exists(service, pool):
|
||||||
|
log('ceph: Creating new pool {}.'.format(pool))
|
||||||
|
create_pool(service, pool)
|
||||||
|
|
||||||
|
if not rbd_exists(service, pool, rbd_img):
|
||||||
|
log('ceph: Creating RBD image ({}).'.format(rbd_img))
|
||||||
|
create_rbd_image(service, pool, rbd_img, sizemb)
|
||||||
|
|
||||||
|
if not image_mapped(rbd_img):
|
||||||
|
log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
|
||||||
|
map_block_storage(service, pool, rbd_img)
|
||||||
|
|
||||||
|
# make file system
|
||||||
|
# TODO: What happens if for whatever reason this is run again and
|
||||||
|
# the data is already in the rbd device and/or is mounted??
|
||||||
|
# When it is mounted already, it will fail to make the fs
|
||||||
|
# XXX: This is really sketchy! Need to at least add an fstab entry
|
||||||
|
# otherwise this hook will blow away existing data if its executed
|
||||||
|
# after a reboot.
|
||||||
|
if not filesystem_mounted(mount_point):
|
||||||
|
make_filesystem(blk_device, fstype)
|
||||||
|
|
||||||
|
for svc in system_services:
|
||||||
|
if service_running(svc):
|
||||||
|
log('ceph: Stopping services {} prior to migrating data.'
|
||||||
|
.format(svc))
|
||||||
|
service_stop(svc)
|
||||||
|
|
||||||
|
place_data_on_block_device(blk_device, mount_point)
|
||||||
|
|
||||||
|
for svc in system_services:
|
||||||
|
log('ceph: Starting service {} after migrating data.'
|
||||||
|
.format(svc))
|
||||||
|
service_start(svc)
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_ceph_keyring(service, user=None, group=None):
|
||||||
|
'''
|
||||||
|
Ensures a ceph keyring is created for a named service
|
||||||
|
and optionally ensures user and group ownership.
|
||||||
|
|
||||||
|
Returns False if no ceph key is available in relation state.
|
||||||
|
'''
|
||||||
|
key = None
|
||||||
|
for rid in relation_ids('ceph'):
|
||||||
|
for unit in related_units(rid):
|
||||||
|
key = relation_get('key', rid=rid, unit=unit)
|
||||||
|
if key:
|
||||||
|
break
|
||||||
|
if not key:
|
||||||
|
return False
|
||||||
|
create_keyring(service=service, key=key)
|
||||||
|
keyring = _keyring_path(service)
|
||||||
|
if user and group:
|
||||||
|
check_call(['chown', '%s.%s' % (user, group), keyring])
|
||||||
|
return True
|
||||||
62
hooks/charmhelpers/contrib/storage/linux/loopback.py
Normal file
62
hooks/charmhelpers/contrib/storage/linux/loopback.py
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
from subprocess import (
|
||||||
|
check_call,
|
||||||
|
check_output,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
##################################################
|
||||||
|
# loopback device helpers.
|
||||||
|
##################################################
|
||||||
|
def loopback_devices():
|
||||||
|
'''
|
||||||
|
Parse through 'losetup -a' output to determine currently mapped
|
||||||
|
loopback devices. Output is expected to look like:
|
||||||
|
|
||||||
|
/dev/loop0: [0807]:961814 (/tmp/my.img)
|
||||||
|
|
||||||
|
:returns: dict: a dict mapping {loopback_dev: backing_file}
|
||||||
|
'''
|
||||||
|
loopbacks = {}
|
||||||
|
cmd = ['losetup', '-a']
|
||||||
|
devs = [d.strip().split(' ') for d in
|
||||||
|
check_output(cmd).splitlines() if d != '']
|
||||||
|
for dev, _, f in devs:
|
||||||
|
loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
|
||||||
|
return loopbacks
|
||||||
|
|
||||||
|
|
||||||
|
def create_loopback(file_path):
|
||||||
|
'''
|
||||||
|
Create a loopback device for a given backing file.
|
||||||
|
|
||||||
|
:returns: str: Full path to new loopback device (eg, /dev/loop0)
|
||||||
|
'''
|
||||||
|
file_path = os.path.abspath(file_path)
|
||||||
|
check_call(['losetup', '--find', file_path])
|
||||||
|
for d, f in loopback_devices().iteritems():
|
||||||
|
if f == file_path:
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_loopback_device(path, size):
|
||||||
|
'''
|
||||||
|
Ensure a loopback device exists for a given backing file path and size.
|
||||||
|
If it a loopback device is not mapped to file, a new one will be created.
|
||||||
|
|
||||||
|
TODO: Confirm size of found loopback device.
|
||||||
|
|
||||||
|
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
|
||||||
|
'''
|
||||||
|
for d, f in loopback_devices().iteritems():
|
||||||
|
if f == path:
|
||||||
|
return d
|
||||||
|
|
||||||
|
if not os.path.exists(path):
|
||||||
|
cmd = ['truncate', '--size', size, path]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
return create_loopback(path)
|
||||||
88
hooks/charmhelpers/contrib/storage/linux/lvm.py
Normal file
88
hooks/charmhelpers/contrib/storage/linux/lvm.py
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
from subprocess import (
|
||||||
|
CalledProcessError,
|
||||||
|
check_call,
|
||||||
|
check_output,
|
||||||
|
Popen,
|
||||||
|
PIPE,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
##################################################
|
||||||
|
# LVM helpers.
|
||||||
|
##################################################
|
||||||
|
def deactivate_lvm_volume_group(block_device):
|
||||||
|
'''
|
||||||
|
Deactivate any volume gruop associated with an LVM physical volume.
|
||||||
|
|
||||||
|
:param block_device: str: Full path to LVM physical volume
|
||||||
|
'''
|
||||||
|
vg = list_lvm_volume_group(block_device)
|
||||||
|
if vg:
|
||||||
|
cmd = ['vgchange', '-an', vg]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def is_lvm_physical_volume(block_device):
|
||||||
|
'''
|
||||||
|
Determine whether a block device is initialized as an LVM PV.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to inspect.
|
||||||
|
|
||||||
|
:returns: boolean: True if block device is a PV, False if not.
|
||||||
|
'''
|
||||||
|
try:
|
||||||
|
check_output(['pvdisplay', block_device])
|
||||||
|
return True
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def remove_lvm_physical_volume(block_device):
|
||||||
|
'''
|
||||||
|
Remove LVM PV signatures from a given block device.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to scrub.
|
||||||
|
'''
|
||||||
|
p = Popen(['pvremove', '-ff', block_device],
|
||||||
|
stdin=PIPE)
|
||||||
|
p.communicate(input='y\n')
|
||||||
|
|
||||||
|
|
||||||
|
def list_lvm_volume_group(block_device):
|
||||||
|
'''
|
||||||
|
List LVM volume group associated with a given block device.
|
||||||
|
|
||||||
|
Assumes block device is a valid LVM PV.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to inspect.
|
||||||
|
|
||||||
|
:returns: str: Name of volume group associated with block device or None
|
||||||
|
'''
|
||||||
|
vg = None
|
||||||
|
pvd = check_output(['pvdisplay', block_device]).splitlines()
|
||||||
|
for l in pvd:
|
||||||
|
if l.strip().startswith('VG Name'):
|
||||||
|
vg = ' '.join(l.split()).split(' ').pop()
|
||||||
|
return vg
|
||||||
|
|
||||||
|
|
||||||
|
def create_lvm_physical_volume(block_device):
|
||||||
|
'''
|
||||||
|
Initialize a block device as an LVM physical volume.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to initialize.
|
||||||
|
|
||||||
|
'''
|
||||||
|
check_call(['pvcreate', block_device])
|
||||||
|
|
||||||
|
|
||||||
|
def create_lvm_volume_group(volume_group, block_device):
|
||||||
|
'''
|
||||||
|
Create an LVM volume group backed by a given block device.
|
||||||
|
|
||||||
|
Assumes block device has already been initialized as an LVM PV.
|
||||||
|
|
||||||
|
:param volume_group: str: Name of volume group to create.
|
||||||
|
:block_device: str: Full path of PV-initialized block device.
|
||||||
|
'''
|
||||||
|
check_call(['vgcreate', volume_group, block_device])
|
||||||
25
hooks/charmhelpers/contrib/storage/linux/utils.py
Normal file
25
hooks/charmhelpers/contrib/storage/linux/utils.py
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
from os import stat
|
||||||
|
from stat import S_ISBLK
|
||||||
|
|
||||||
|
from subprocess import (
|
||||||
|
check_call
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def is_block_device(path):
|
||||||
|
'''
|
||||||
|
Confirm device at path is a valid block device node.
|
||||||
|
|
||||||
|
:returns: boolean: True if path is a block device, False if not.
|
||||||
|
'''
|
||||||
|
return S_ISBLK(stat(path).st_mode)
|
||||||
|
|
||||||
|
|
||||||
|
def zap_disk(block_device):
|
||||||
|
'''
|
||||||
|
Clear a block device of partition table. Relies on sgdisk, which is
|
||||||
|
installed as pat of the 'gdisk' package in Ubuntu.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to clean.
|
||||||
|
'''
|
||||||
|
check_call(['sgdisk', '--zap-all', block_device])
|
||||||
0
hooks/charmhelpers/core/__init__.py
Normal file
0
hooks/charmhelpers/core/__init__.py
Normal file
340
hooks/charmhelpers/core/hookenv.py
Normal file
340
hooks/charmhelpers/core/hookenv.py
Normal file
@@ -0,0 +1,340 @@
|
|||||||
|
"Interactions with the Juju environment"
|
||||||
|
# Copyright 2013 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
||||||
|
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import yaml
|
||||||
|
import subprocess
|
||||||
|
import UserDict
|
||||||
|
|
||||||
|
CRITICAL = "CRITICAL"
|
||||||
|
ERROR = "ERROR"
|
||||||
|
WARNING = "WARNING"
|
||||||
|
INFO = "INFO"
|
||||||
|
DEBUG = "DEBUG"
|
||||||
|
MARKER = object()
|
||||||
|
|
||||||
|
cache = {}
|
||||||
|
|
||||||
|
|
||||||
|
def cached(func):
|
||||||
|
''' Cache return values for multiple executions of func + args
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def unit_get(attribute):
|
||||||
|
pass
|
||||||
|
|
||||||
|
unit_get('test')
|
||||||
|
|
||||||
|
will cache the result of unit_get + 'test' for future calls.
|
||||||
|
'''
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
global cache
|
||||||
|
key = str((func, args, kwargs))
|
||||||
|
try:
|
||||||
|
return cache[key]
|
||||||
|
except KeyError:
|
||||||
|
res = func(*args, **kwargs)
|
||||||
|
cache[key] = res
|
||||||
|
return res
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def flush(key):
|
||||||
|
''' Flushes any entries from function cache where the
|
||||||
|
key is found in the function+args '''
|
||||||
|
flush_list = []
|
||||||
|
for item in cache:
|
||||||
|
if key in item:
|
||||||
|
flush_list.append(item)
|
||||||
|
for item in flush_list:
|
||||||
|
del cache[item]
|
||||||
|
|
||||||
|
|
||||||
|
def log(message, level=None):
|
||||||
|
"Write a message to the juju log"
|
||||||
|
command = ['juju-log']
|
||||||
|
if level:
|
||||||
|
command += ['-l', level]
|
||||||
|
command += [message]
|
||||||
|
subprocess.call(command)
|
||||||
|
|
||||||
|
|
||||||
|
class Serializable(UserDict.IterableUserDict):
|
||||||
|
"Wrapper, an object that can be serialized to yaml or json"
|
||||||
|
|
||||||
|
def __init__(self, obj):
|
||||||
|
# wrap the object
|
||||||
|
UserDict.IterableUserDict.__init__(self)
|
||||||
|
self.data = obj
|
||||||
|
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
# See if this object has attribute.
|
||||||
|
if attr in ("json", "yaml", "data"):
|
||||||
|
return self.__dict__[attr]
|
||||||
|
# Check for attribute in wrapped object.
|
||||||
|
got = getattr(self.data, attr, MARKER)
|
||||||
|
if got is not MARKER:
|
||||||
|
return got
|
||||||
|
# Proxy to the wrapped object via dict interface.
|
||||||
|
try:
|
||||||
|
return self.data[attr]
|
||||||
|
except KeyError:
|
||||||
|
raise AttributeError(attr)
|
||||||
|
|
||||||
|
def __getstate__(self):
|
||||||
|
# Pickle as a standard dictionary.
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
def __setstate__(self, state):
|
||||||
|
# Unpickle into our wrapper.
|
||||||
|
self.data = state
|
||||||
|
|
||||||
|
def json(self):
|
||||||
|
"Serialize the object to json"
|
||||||
|
return json.dumps(self.data)
|
||||||
|
|
||||||
|
def yaml(self):
|
||||||
|
"Serialize the object to yaml"
|
||||||
|
return yaml.dump(self.data)
|
||||||
|
|
||||||
|
|
||||||
|
def execution_environment():
|
||||||
|
"""A convenient bundling of the current execution context"""
|
||||||
|
context = {}
|
||||||
|
context['conf'] = config()
|
||||||
|
if relation_id():
|
||||||
|
context['reltype'] = relation_type()
|
||||||
|
context['relid'] = relation_id()
|
||||||
|
context['rel'] = relation_get()
|
||||||
|
context['unit'] = local_unit()
|
||||||
|
context['rels'] = relations()
|
||||||
|
context['env'] = os.environ
|
||||||
|
return context
|
||||||
|
|
||||||
|
|
||||||
|
def in_relation_hook():
|
||||||
|
"Determine whether we're running in a relation hook"
|
||||||
|
return 'JUJU_RELATION' in os.environ
|
||||||
|
|
||||||
|
|
||||||
|
def relation_type():
|
||||||
|
"The scope for the current relation hook"
|
||||||
|
return os.environ.get('JUJU_RELATION', None)
|
||||||
|
|
||||||
|
|
||||||
|
def relation_id():
|
||||||
|
"The relation ID for the current relation hook"
|
||||||
|
return os.environ.get('JUJU_RELATION_ID', None)
|
||||||
|
|
||||||
|
|
||||||
|
def local_unit():
|
||||||
|
"Local unit ID"
|
||||||
|
return os.environ['JUJU_UNIT_NAME']
|
||||||
|
|
||||||
|
|
||||||
|
def remote_unit():
|
||||||
|
"The remote unit for the current relation hook"
|
||||||
|
return os.environ['JUJU_REMOTE_UNIT']
|
||||||
|
|
||||||
|
|
||||||
|
def service_name():
|
||||||
|
"The name service group this unit belongs to"
|
||||||
|
return local_unit().split('/')[0]
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def config(scope=None):
|
||||||
|
"Juju charm configuration"
|
||||||
|
config_cmd_line = ['config-get']
|
||||||
|
if scope is not None:
|
||||||
|
config_cmd_line.append(scope)
|
||||||
|
config_cmd_line.append('--format=json')
|
||||||
|
try:
|
||||||
|
return json.loads(subprocess.check_output(config_cmd_line))
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relation_get(attribute=None, unit=None, rid=None):
|
||||||
|
_args = ['relation-get', '--format=json']
|
||||||
|
if rid:
|
||||||
|
_args.append('-r')
|
||||||
|
_args.append(rid)
|
||||||
|
_args.append(attribute or '-')
|
||||||
|
if unit:
|
||||||
|
_args.append(unit)
|
||||||
|
try:
|
||||||
|
return json.loads(subprocess.check_output(_args))
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def relation_set(relation_id=None, relation_settings={}, **kwargs):
|
||||||
|
relation_cmd_line = ['relation-set']
|
||||||
|
if relation_id is not None:
|
||||||
|
relation_cmd_line.extend(('-r', relation_id))
|
||||||
|
for k, v in (relation_settings.items() + kwargs.items()):
|
||||||
|
if v is None:
|
||||||
|
relation_cmd_line.append('{}='.format(k))
|
||||||
|
else:
|
||||||
|
relation_cmd_line.append('{}={}'.format(k, v))
|
||||||
|
subprocess.check_call(relation_cmd_line)
|
||||||
|
# Flush cache of any relation-gets for local unit
|
||||||
|
flush(local_unit())
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relation_ids(reltype=None):
|
||||||
|
"A list of relation_ids"
|
||||||
|
reltype = reltype or relation_type()
|
||||||
|
relid_cmd_line = ['relation-ids', '--format=json']
|
||||||
|
if reltype is not None:
|
||||||
|
relid_cmd_line.append(reltype)
|
||||||
|
return json.loads(subprocess.check_output(relid_cmd_line)) or []
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def related_units(relid=None):
|
||||||
|
"A list of related units"
|
||||||
|
relid = relid or relation_id()
|
||||||
|
units_cmd_line = ['relation-list', '--format=json']
|
||||||
|
if relid is not None:
|
||||||
|
units_cmd_line.extend(('-r', relid))
|
||||||
|
return json.loads(subprocess.check_output(units_cmd_line)) or []
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relation_for_unit(unit=None, rid=None):
|
||||||
|
"Get the json represenation of a unit's relation"
|
||||||
|
unit = unit or remote_unit()
|
||||||
|
relation = relation_get(unit=unit, rid=rid)
|
||||||
|
for key in relation:
|
||||||
|
if key.endswith('-list'):
|
||||||
|
relation[key] = relation[key].split()
|
||||||
|
relation['__unit__'] = unit
|
||||||
|
return relation
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relations_for_id(relid=None):
|
||||||
|
"Get relations of a specific relation ID"
|
||||||
|
relation_data = []
|
||||||
|
relid = relid or relation_ids()
|
||||||
|
for unit in related_units(relid):
|
||||||
|
unit_data = relation_for_unit(unit, relid)
|
||||||
|
unit_data['__relid__'] = relid
|
||||||
|
relation_data.append(unit_data)
|
||||||
|
return relation_data
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relations_of_type(reltype=None):
|
||||||
|
"Get relations of a specific type"
|
||||||
|
relation_data = []
|
||||||
|
reltype = reltype or relation_type()
|
||||||
|
for relid in relation_ids(reltype):
|
||||||
|
for relation in relations_for_id(relid):
|
||||||
|
relation['__relid__'] = relid
|
||||||
|
relation_data.append(relation)
|
||||||
|
return relation_data
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relation_types():
|
||||||
|
"Get a list of relation types supported by this charm"
|
||||||
|
charmdir = os.environ.get('CHARM_DIR', '')
|
||||||
|
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
|
||||||
|
md = yaml.safe_load(mdf)
|
||||||
|
rel_types = []
|
||||||
|
for key in ('provides', 'requires', 'peers'):
|
||||||
|
section = md.get(key)
|
||||||
|
if section:
|
||||||
|
rel_types.extend(section.keys())
|
||||||
|
mdf.close()
|
||||||
|
return rel_types
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relations():
|
||||||
|
rels = {}
|
||||||
|
for reltype in relation_types():
|
||||||
|
relids = {}
|
||||||
|
for relid in relation_ids(reltype):
|
||||||
|
units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
|
||||||
|
for unit in related_units(relid):
|
||||||
|
reldata = relation_get(unit=unit, rid=relid)
|
||||||
|
units[unit] = reldata
|
||||||
|
relids[relid] = units
|
||||||
|
rels[reltype] = relids
|
||||||
|
return rels
|
||||||
|
|
||||||
|
|
||||||
|
def open_port(port, protocol="TCP"):
|
||||||
|
"Open a service network port"
|
||||||
|
_args = ['open-port']
|
||||||
|
_args.append('{}/{}'.format(port, protocol))
|
||||||
|
subprocess.check_call(_args)
|
||||||
|
|
||||||
|
|
||||||
|
def close_port(port, protocol="TCP"):
|
||||||
|
"Close a service network port"
|
||||||
|
_args = ['close-port']
|
||||||
|
_args.append('{}/{}'.format(port, protocol))
|
||||||
|
subprocess.check_call(_args)
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def unit_get(attribute):
|
||||||
|
_args = ['unit-get', '--format=json', attribute]
|
||||||
|
try:
|
||||||
|
return json.loads(subprocess.check_output(_args))
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def unit_private_ip():
|
||||||
|
return unit_get('private-address')
|
||||||
|
|
||||||
|
|
||||||
|
class UnregisteredHookError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Hooks(object):
|
||||||
|
def __init__(self):
|
||||||
|
super(Hooks, self).__init__()
|
||||||
|
self._hooks = {}
|
||||||
|
|
||||||
|
def register(self, name, function):
|
||||||
|
self._hooks[name] = function
|
||||||
|
|
||||||
|
def execute(self, args):
|
||||||
|
hook_name = os.path.basename(args[0])
|
||||||
|
if hook_name in self._hooks:
|
||||||
|
self._hooks[hook_name]()
|
||||||
|
else:
|
||||||
|
raise UnregisteredHookError(hook_name)
|
||||||
|
|
||||||
|
def hook(self, *hook_names):
|
||||||
|
def wrapper(decorated):
|
||||||
|
for hook_name in hook_names:
|
||||||
|
self.register(hook_name, decorated)
|
||||||
|
else:
|
||||||
|
self.register(decorated.__name__, decorated)
|
||||||
|
if '_' in decorated.__name__:
|
||||||
|
self.register(
|
||||||
|
decorated.__name__.replace('_', '-'), decorated)
|
||||||
|
return decorated
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def charm_dir():
|
||||||
|
return os.environ.get('CHARM_DIR')
|
||||||
241
hooks/charmhelpers/core/host.py
Normal file
241
hooks/charmhelpers/core/host.py
Normal file
@@ -0,0 +1,241 @@
|
|||||||
|
"""Tools for working with the host system"""
|
||||||
|
# Copyright 2012 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Nick Moffitt <nick.moffitt@canonical.com>
|
||||||
|
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
|
||||||
|
|
||||||
|
import os
|
||||||
|
import pwd
|
||||||
|
import grp
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
import subprocess
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
from collections import OrderedDict
|
||||||
|
|
||||||
|
from hookenv import log
|
||||||
|
|
||||||
|
|
||||||
|
def service_start(service_name):
|
||||||
|
return service('start', service_name)
|
||||||
|
|
||||||
|
|
||||||
|
def service_stop(service_name):
|
||||||
|
return service('stop', service_name)
|
||||||
|
|
||||||
|
|
||||||
|
def service_restart(service_name):
|
||||||
|
return service('restart', service_name)
|
||||||
|
|
||||||
|
|
||||||
|
def service_reload(service_name, restart_on_failure=False):
|
||||||
|
service_result = service('reload', service_name)
|
||||||
|
if not service_result and restart_on_failure:
|
||||||
|
service_result = service('restart', service_name)
|
||||||
|
return service_result
|
||||||
|
|
||||||
|
|
||||||
|
def service(action, service_name):
|
||||||
|
cmd = ['service', service_name, action]
|
||||||
|
return subprocess.call(cmd) == 0
|
||||||
|
|
||||||
|
|
||||||
|
def service_running(service):
|
||||||
|
try:
|
||||||
|
output = subprocess.check_output(['service', service, 'status'])
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
if ("start/running" in output or "is running" in output):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
||||||
|
"""Add a user"""
|
||||||
|
try:
|
||||||
|
user_info = pwd.getpwnam(username)
|
||||||
|
log('user {0} already exists!'.format(username))
|
||||||
|
except KeyError:
|
||||||
|
log('creating user {0}'.format(username))
|
||||||
|
cmd = ['useradd']
|
||||||
|
if system_user or password is None:
|
||||||
|
cmd.append('--system')
|
||||||
|
else:
|
||||||
|
cmd.extend([
|
||||||
|
'--create-home',
|
||||||
|
'--shell', shell,
|
||||||
|
'--password', password,
|
||||||
|
])
|
||||||
|
cmd.append(username)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
user_info = pwd.getpwnam(username)
|
||||||
|
return user_info
|
||||||
|
|
||||||
|
|
||||||
|
def add_user_to_group(username, group):
|
||||||
|
"""Add a user to a group"""
|
||||||
|
cmd = [
|
||||||
|
'gpasswd', '-a',
|
||||||
|
username,
|
||||||
|
group
|
||||||
|
]
|
||||||
|
log("Adding user {} to group {}".format(username, group))
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def rsync(from_path, to_path, flags='-r', options=None):
|
||||||
|
"""Replicate the contents of a path"""
|
||||||
|
options = options or ['--delete', '--executability']
|
||||||
|
cmd = ['/usr/bin/rsync', flags]
|
||||||
|
cmd.extend(options)
|
||||||
|
cmd.append(from_path)
|
||||||
|
cmd.append(to_path)
|
||||||
|
log(" ".join(cmd))
|
||||||
|
return subprocess.check_output(cmd).strip()
|
||||||
|
|
||||||
|
|
||||||
|
def symlink(source, destination):
|
||||||
|
"""Create a symbolic link"""
|
||||||
|
log("Symlinking {} as {}".format(source, destination))
|
||||||
|
cmd = [
|
||||||
|
'ln',
|
||||||
|
'-sf',
|
||||||
|
source,
|
||||||
|
destination,
|
||||||
|
]
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def mkdir(path, owner='root', group='root', perms=0555, force=False):
|
||||||
|
"""Create a directory"""
|
||||||
|
log("Making dir {} {}:{} {:o}".format(path, owner, group,
|
||||||
|
perms))
|
||||||
|
uid = pwd.getpwnam(owner).pw_uid
|
||||||
|
gid = grp.getgrnam(group).gr_gid
|
||||||
|
realpath = os.path.abspath(path)
|
||||||
|
if os.path.exists(realpath):
|
||||||
|
if force and not os.path.isdir(realpath):
|
||||||
|
log("Removing non-directory file {} prior to mkdir()".format(path))
|
||||||
|
os.unlink(realpath)
|
||||||
|
else:
|
||||||
|
os.makedirs(realpath, perms)
|
||||||
|
os.chown(realpath, uid, gid)
|
||||||
|
|
||||||
|
|
||||||
|
def write_file(path, content, owner='root', group='root', perms=0444):
|
||||||
|
"""Create or overwrite a file with the contents of a string"""
|
||||||
|
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
|
||||||
|
uid = pwd.getpwnam(owner).pw_uid
|
||||||
|
gid = grp.getgrnam(group).gr_gid
|
||||||
|
with open(path, 'w') as target:
|
||||||
|
os.fchown(target.fileno(), uid, gid)
|
||||||
|
os.fchmod(target.fileno(), perms)
|
||||||
|
target.write(content)
|
||||||
|
|
||||||
|
|
||||||
|
def mount(device, mountpoint, options=None, persist=False):
|
||||||
|
'''Mount a filesystem'''
|
||||||
|
cmd_args = ['mount']
|
||||||
|
if options is not None:
|
||||||
|
cmd_args.extend(['-o', options])
|
||||||
|
cmd_args.extend([device, mountpoint])
|
||||||
|
try:
|
||||||
|
subprocess.check_output(cmd_args)
|
||||||
|
except subprocess.CalledProcessError, e:
|
||||||
|
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
|
||||||
|
return False
|
||||||
|
if persist:
|
||||||
|
# TODO: update fstab
|
||||||
|
pass
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def umount(mountpoint, persist=False):
|
||||||
|
'''Unmount a filesystem'''
|
||||||
|
cmd_args = ['umount', mountpoint]
|
||||||
|
try:
|
||||||
|
subprocess.check_output(cmd_args)
|
||||||
|
except subprocess.CalledProcessError, e:
|
||||||
|
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||||
|
return False
|
||||||
|
if persist:
|
||||||
|
# TODO: update fstab
|
||||||
|
pass
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def mounts():
|
||||||
|
'''List of all mounted volumes as [[mountpoint,device],[...]]'''
|
||||||
|
with open('/proc/mounts') as f:
|
||||||
|
# [['/mount/point','/dev/path'],[...]]
|
||||||
|
system_mounts = [m[1::-1] for m in [l.strip().split()
|
||||||
|
for l in f.readlines()]]
|
||||||
|
return system_mounts
|
||||||
|
|
||||||
|
|
||||||
|
def file_hash(path):
|
||||||
|
''' Generate a md5 hash of the contents of 'path' or None if not found '''
|
||||||
|
if os.path.exists(path):
|
||||||
|
h = hashlib.md5()
|
||||||
|
with open(path, 'r') as source:
|
||||||
|
h.update(source.read()) # IGNORE:E1101 - it does have update
|
||||||
|
return h.hexdigest()
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def restart_on_change(restart_map):
|
||||||
|
''' Restart services based on configuration files changing
|
||||||
|
|
||||||
|
This function is used a decorator, for example
|
||||||
|
|
||||||
|
@restart_on_change({
|
||||||
|
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
|
||||||
|
})
|
||||||
|
def ceph_client_changed():
|
||||||
|
...
|
||||||
|
|
||||||
|
In this example, the cinder-api and cinder-volume services
|
||||||
|
would be restarted if /etc/ceph/ceph.conf is changed by the
|
||||||
|
ceph_client_changed function.
|
||||||
|
'''
|
||||||
|
def wrap(f):
|
||||||
|
def wrapped_f(*args):
|
||||||
|
checksums = {}
|
||||||
|
for path in restart_map:
|
||||||
|
checksums[path] = file_hash(path)
|
||||||
|
f(*args)
|
||||||
|
restarts = []
|
||||||
|
for path in restart_map:
|
||||||
|
if checksums[path] != file_hash(path):
|
||||||
|
restarts += restart_map[path]
|
||||||
|
for service_name in list(OrderedDict.fromkeys(restarts)):
|
||||||
|
service('restart', service_name)
|
||||||
|
return wrapped_f
|
||||||
|
return wrap
|
||||||
|
|
||||||
|
|
||||||
|
def lsb_release():
|
||||||
|
'''Return /etc/lsb-release in a dict'''
|
||||||
|
d = {}
|
||||||
|
with open('/etc/lsb-release', 'r') as lsb:
|
||||||
|
for l in lsb:
|
||||||
|
k, v = l.split('=')
|
||||||
|
d[k.strip()] = v.strip()
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def pwgen(length=None):
|
||||||
|
'''Generate a random pasword.'''
|
||||||
|
if length is None:
|
||||||
|
length = random.choice(range(35, 45))
|
||||||
|
alphanumeric_chars = [
|
||||||
|
l for l in (string.letters + string.digits)
|
||||||
|
if l not in 'l0QD1vAEIOUaeiou']
|
||||||
|
random_chars = [
|
||||||
|
random.choice(alphanumeric_chars) for _ in range(length)]
|
||||||
|
return(''.join(random_chars))
|
||||||
209
hooks/charmhelpers/fetch/__init__.py
Normal file
209
hooks/charmhelpers/fetch/__init__.py
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
import importlib
|
||||||
|
from yaml import safe_load
|
||||||
|
from charmhelpers.core.host import (
|
||||||
|
lsb_release
|
||||||
|
)
|
||||||
|
from urlparse import (
|
||||||
|
urlparse,
|
||||||
|
urlunparse,
|
||||||
|
)
|
||||||
|
import subprocess
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config,
|
||||||
|
log,
|
||||||
|
)
|
||||||
|
import apt_pkg
|
||||||
|
|
||||||
|
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
|
||||||
|
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
||||||
|
"""
|
||||||
|
PROPOSED_POCKET = """# Proposed
|
||||||
|
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def filter_installed_packages(packages):
|
||||||
|
"""Returns a list of packages that require installation"""
|
||||||
|
apt_pkg.init()
|
||||||
|
cache = apt_pkg.Cache()
|
||||||
|
_pkgs = []
|
||||||
|
for package in packages:
|
||||||
|
try:
|
||||||
|
p = cache[package]
|
||||||
|
p.current_ver or _pkgs.append(package)
|
||||||
|
except KeyError:
|
||||||
|
log('Package {} has no installation candidate.'.format(package),
|
||||||
|
level='WARNING')
|
||||||
|
_pkgs.append(package)
|
||||||
|
return _pkgs
|
||||||
|
|
||||||
|
|
||||||
|
def apt_install(packages, options=None, fatal=False):
|
||||||
|
"""Install one or more packages"""
|
||||||
|
options = options or []
|
||||||
|
cmd = ['apt-get', '-y']
|
||||||
|
cmd.extend(options)
|
||||||
|
cmd.append('install')
|
||||||
|
if isinstance(packages, basestring):
|
||||||
|
cmd.append(packages)
|
||||||
|
else:
|
||||||
|
cmd.extend(packages)
|
||||||
|
log("Installing {} with options: {}".format(packages,
|
||||||
|
options))
|
||||||
|
if fatal:
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
else:
|
||||||
|
subprocess.call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def apt_update(fatal=False):
|
||||||
|
"""Update local apt cache"""
|
||||||
|
cmd = ['apt-get', 'update']
|
||||||
|
if fatal:
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
else:
|
||||||
|
subprocess.call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def apt_purge(packages, fatal=False):
|
||||||
|
"""Purge one or more packages"""
|
||||||
|
cmd = ['apt-get', '-y', 'purge']
|
||||||
|
if isinstance(packages, basestring):
|
||||||
|
cmd.append(packages)
|
||||||
|
else:
|
||||||
|
cmd.extend(packages)
|
||||||
|
log("Purging {}".format(packages))
|
||||||
|
if fatal:
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
else:
|
||||||
|
subprocess.call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def add_source(source, key=None):
|
||||||
|
if ((source.startswith('ppa:') or
|
||||||
|
source.startswith('http:'))):
|
||||||
|
subprocess.check_call(['add-apt-repository', '--yes', source])
|
||||||
|
elif source.startswith('cloud:'):
|
||||||
|
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
|
||||||
|
fatal=True)
|
||||||
|
pocket = source.split(':')[-1]
|
||||||
|
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
||||||
|
apt.write(CLOUD_ARCHIVE.format(pocket))
|
||||||
|
elif source == 'proposed':
|
||||||
|
release = lsb_release()['DISTRIB_CODENAME']
|
||||||
|
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
||||||
|
apt.write(PROPOSED_POCKET.format(release))
|
||||||
|
if key:
|
||||||
|
subprocess.check_call(['apt-key', 'import', key])
|
||||||
|
|
||||||
|
|
||||||
|
class SourceConfigError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def configure_sources(update=False,
|
||||||
|
sources_var='install_sources',
|
||||||
|
keys_var='install_keys'):
|
||||||
|
"""
|
||||||
|
Configure multiple sources from charm configuration
|
||||||
|
|
||||||
|
Example config:
|
||||||
|
install_sources:
|
||||||
|
- "ppa:foo"
|
||||||
|
- "http://example.com/repo precise main"
|
||||||
|
install_keys:
|
||||||
|
- null
|
||||||
|
- "a1b2c3d4"
|
||||||
|
|
||||||
|
Note that 'null' (a.k.a. None) should not be quoted.
|
||||||
|
"""
|
||||||
|
sources = safe_load(config(sources_var))
|
||||||
|
keys = safe_load(config(keys_var))
|
||||||
|
if isinstance(sources, basestring) and isinstance(keys, basestring):
|
||||||
|
add_source(sources, keys)
|
||||||
|
else:
|
||||||
|
if not len(sources) == len(keys):
|
||||||
|
msg = 'Install sources and keys lists are different lengths'
|
||||||
|
raise SourceConfigError(msg)
|
||||||
|
for src_num in range(len(sources)):
|
||||||
|
add_source(sources[src_num], keys[src_num])
|
||||||
|
if update:
|
||||||
|
apt_update(fatal=True)
|
||||||
|
|
||||||
|
# The order of this list is very important. Handlers should be listed in from
|
||||||
|
# least- to most-specific URL matching.
|
||||||
|
FETCH_HANDLERS = (
|
||||||
|
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
|
||||||
|
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class UnhandledSource(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def install_remote(source):
|
||||||
|
"""
|
||||||
|
Install a file tree from a remote source
|
||||||
|
|
||||||
|
The specified source should be a url of the form:
|
||||||
|
scheme://[host]/path[#[option=value][&...]]
|
||||||
|
|
||||||
|
Schemes supported are based on this modules submodules
|
||||||
|
Options supported are submodule-specific"""
|
||||||
|
# We ONLY check for True here because can_handle may return a string
|
||||||
|
# explaining why it can't handle a given source.
|
||||||
|
handlers = [h for h in plugins() if h.can_handle(source) is True]
|
||||||
|
installed_to = None
|
||||||
|
for handler in handlers:
|
||||||
|
try:
|
||||||
|
installed_to = handler.install(source)
|
||||||
|
except UnhandledSource:
|
||||||
|
pass
|
||||||
|
if not installed_to:
|
||||||
|
raise UnhandledSource("No handler found for source {}".format(source))
|
||||||
|
return installed_to
|
||||||
|
|
||||||
|
|
||||||
|
def install_from_config(config_var_name):
|
||||||
|
charm_config = config()
|
||||||
|
source = charm_config[config_var_name]
|
||||||
|
return install_remote(source)
|
||||||
|
|
||||||
|
|
||||||
|
class BaseFetchHandler(object):
|
||||||
|
"""Base class for FetchHandler implementations in fetch plugins"""
|
||||||
|
def can_handle(self, source):
|
||||||
|
"""Returns True if the source can be handled. Otherwise returns
|
||||||
|
a string explaining why it cannot"""
|
||||||
|
return "Wrong source type"
|
||||||
|
|
||||||
|
def install(self, source):
|
||||||
|
"""Try to download and unpack the source. Return the path to the
|
||||||
|
unpacked files or raise UnhandledSource."""
|
||||||
|
raise UnhandledSource("Wrong source type {}".format(source))
|
||||||
|
|
||||||
|
def parse_url(self, url):
|
||||||
|
return urlparse(url)
|
||||||
|
|
||||||
|
def base_url(self, url):
|
||||||
|
"""Return url without querystring or fragment"""
|
||||||
|
parts = list(self.parse_url(url))
|
||||||
|
parts[4:] = ['' for i in parts[4:]]
|
||||||
|
return urlunparse(parts)
|
||||||
|
|
||||||
|
|
||||||
|
def plugins(fetch_handlers=None):
|
||||||
|
if not fetch_handlers:
|
||||||
|
fetch_handlers = FETCH_HANDLERS
|
||||||
|
plugin_list = []
|
||||||
|
for handler_name in fetch_handlers:
|
||||||
|
package, classname = handler_name.rsplit('.', 1)
|
||||||
|
try:
|
||||||
|
handler_class = getattr(importlib.import_module(package), classname)
|
||||||
|
plugin_list.append(handler_class())
|
||||||
|
except (ImportError, AttributeError):
|
||||||
|
# Skip missing plugins so that they can be ommitted from
|
||||||
|
# installation if desired
|
||||||
|
log("FetchHandler {} not found, skipping plugin".format(handler_name))
|
||||||
|
return plugin_list
|
||||||
48
hooks/charmhelpers/fetch/archiveurl.py
Normal file
48
hooks/charmhelpers/fetch/archiveurl.py
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
import os
|
||||||
|
import urllib2
|
||||||
|
from charmhelpers.fetch import (
|
||||||
|
BaseFetchHandler,
|
||||||
|
UnhandledSource
|
||||||
|
)
|
||||||
|
from charmhelpers.payload.archive import (
|
||||||
|
get_archive_handler,
|
||||||
|
extract,
|
||||||
|
)
|
||||||
|
from charmhelpers.core.host import mkdir
|
||||||
|
|
||||||
|
|
||||||
|
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||||
|
"""Handler for archives via generic URLs"""
|
||||||
|
def can_handle(self, source):
|
||||||
|
url_parts = self.parse_url(source)
|
||||||
|
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
|
||||||
|
return "Wrong source type"
|
||||||
|
if get_archive_handler(self.base_url(source)):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def download(self, source, dest):
|
||||||
|
# propogate all exceptions
|
||||||
|
# URLError, OSError, etc
|
||||||
|
response = urllib2.urlopen(source)
|
||||||
|
try:
|
||||||
|
with open(dest, 'w') as dest_file:
|
||||||
|
dest_file.write(response.read())
|
||||||
|
except Exception as e:
|
||||||
|
if os.path.isfile(dest):
|
||||||
|
os.unlink(dest)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def install(self, source):
|
||||||
|
url_parts = self.parse_url(source)
|
||||||
|
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
|
||||||
|
if not os.path.exists(dest_dir):
|
||||||
|
mkdir(dest_dir, perms=0755)
|
||||||
|
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
|
||||||
|
try:
|
||||||
|
self.download(source, dld_file)
|
||||||
|
except urllib2.URLError as e:
|
||||||
|
raise UnhandledSource(e.reason)
|
||||||
|
except OSError as e:
|
||||||
|
raise UnhandledSource(e.strerror)
|
||||||
|
return extract(dld_file)
|
||||||
49
hooks/charmhelpers/fetch/bzrurl.py
Normal file
49
hooks/charmhelpers/fetch/bzrurl.py
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
import os
|
||||||
|
from charmhelpers.fetch import (
|
||||||
|
BaseFetchHandler,
|
||||||
|
UnhandledSource
|
||||||
|
)
|
||||||
|
from charmhelpers.core.host import mkdir
|
||||||
|
|
||||||
|
try:
|
||||||
|
from bzrlib.branch import Branch
|
||||||
|
except ImportError:
|
||||||
|
from charmhelpers.fetch import apt_install
|
||||||
|
apt_install("python-bzrlib")
|
||||||
|
from bzrlib.branch import Branch
|
||||||
|
|
||||||
|
class BzrUrlFetchHandler(BaseFetchHandler):
|
||||||
|
"""Handler for bazaar branches via generic and lp URLs"""
|
||||||
|
def can_handle(self, source):
|
||||||
|
url_parts = self.parse_url(source)
|
||||||
|
if url_parts.scheme not in ('bzr+ssh', 'lp'):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def branch(self, source, dest):
|
||||||
|
url_parts = self.parse_url(source)
|
||||||
|
# If we use lp:branchname scheme we need to load plugins
|
||||||
|
if not self.can_handle(source):
|
||||||
|
raise UnhandledSource("Cannot handle {}".format(source))
|
||||||
|
if url_parts.scheme == "lp":
|
||||||
|
from bzrlib.plugin import load_plugins
|
||||||
|
load_plugins()
|
||||||
|
try:
|
||||||
|
remote_branch = Branch.open(source)
|
||||||
|
remote_branch.bzrdir.sprout(dest).open_branch()
|
||||||
|
except Exception as e:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def install(self, source):
|
||||||
|
url_parts = self.parse_url(source)
|
||||||
|
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||||
|
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
|
||||||
|
if not os.path.exists(dest_dir):
|
||||||
|
mkdir(dest_dir, perms=0755)
|
||||||
|
try:
|
||||||
|
self.branch(source, dest_dir)
|
||||||
|
except OSError as e:
|
||||||
|
raise UnhandledSource(e.strerror)
|
||||||
|
return dest_dir
|
||||||
|
|
||||||
1
hooks/charmhelpers/payload/__init__.py
Normal file
1
hooks/charmhelpers/payload/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
"Tools for working with files injected into a charm just before deployment."
|
||||||
50
hooks/charmhelpers/payload/execd.py
Normal file
50
hooks/charmhelpers/payload/execd.py
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import subprocess
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
|
def default_execd_dir():
|
||||||
|
return os.path.join(os.environ['CHARM_DIR'], 'exec.d')
|
||||||
|
|
||||||
|
|
||||||
|
def execd_module_paths(execd_dir=None):
|
||||||
|
"""Generate a list of full paths to modules within execd_dir."""
|
||||||
|
if not execd_dir:
|
||||||
|
execd_dir = default_execd_dir()
|
||||||
|
|
||||||
|
if not os.path.exists(execd_dir):
|
||||||
|
return
|
||||||
|
|
||||||
|
for subpath in os.listdir(execd_dir):
|
||||||
|
module = os.path.join(execd_dir, subpath)
|
||||||
|
if os.path.isdir(module):
|
||||||
|
yield module
|
||||||
|
|
||||||
|
|
||||||
|
def execd_submodule_paths(command, execd_dir=None):
|
||||||
|
"""Generate a list of full paths to the specified command within exec_dir.
|
||||||
|
"""
|
||||||
|
for module_path in execd_module_paths(execd_dir):
|
||||||
|
path = os.path.join(module_path, command)
|
||||||
|
if os.access(path, os.X_OK) and os.path.isfile(path):
|
||||||
|
yield path
|
||||||
|
|
||||||
|
|
||||||
|
def execd_run(command, execd_dir=None, die_on_error=False, stderr=None):
|
||||||
|
"""Run command for each module within execd_dir which defines it."""
|
||||||
|
for submodule_path in execd_submodule_paths(command, execd_dir):
|
||||||
|
try:
|
||||||
|
subprocess.check_call(submodule_path, shell=True, stderr=stderr)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
hookenv.log("Error ({}) running {}. Output: {}".format(
|
||||||
|
e.returncode, e.cmd, e.output))
|
||||||
|
if die_on_error:
|
||||||
|
sys.exit(e.returncode)
|
||||||
|
|
||||||
|
|
||||||
|
def execd_preinstall(execd_dir=None):
|
||||||
|
"""Run charm-pre-install for each module within execd_dir."""
|
||||||
|
execd_run('charm-pre-install', execd_dir=execd_dir)
|
||||||
@@ -1,304 +0,0 @@
|
|||||||
#!/bin/bash -e
|
|
||||||
|
|
||||||
CHARM="cinder"
|
|
||||||
|
|
||||||
COMMON_PACKAGES="cinder-common python-mysqldb gdisk haproxy"
|
|
||||||
API_PACKAGES="cinder-api"
|
|
||||||
VOL_PACKAGES="cinder-volume"
|
|
||||||
SCHED_PACKAGES="cinder-scheduler"
|
|
||||||
|
|
||||||
CINDER_CONF="/etc/cinder/cinder.conf"
|
|
||||||
API_CONF="/etc/cinder/api-paste.ini"
|
|
||||||
|
|
||||||
CONFIG_CHANGED="False"
|
|
||||||
|
|
||||||
HOOKS_DIR="$CHARM_DIR/hooks"
|
|
||||||
|
|
||||||
if [[ -e $HOOKS_DIR/lib/openstack-common ]] ; then
|
|
||||||
. $HOOKS_DIR/lib/openstack-common
|
|
||||||
else
|
|
||||||
juju-log "Couldn't load $HOOKS_DIR/openstack-common" && exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
service_enabled() {
|
|
||||||
# return 0 if a specific cinder service is enabled in config
|
|
||||||
# "enabled-services"
|
|
||||||
local enabled_services=$(config-get enabled-services)
|
|
||||||
[[ "$enabled_services" == "all" ]] && return 0
|
|
||||||
local svc="$1"
|
|
||||||
local enabled=$(echo $enabled_services | sed -e 's/,/ /g')
|
|
||||||
for s in $enabled ; do
|
|
||||||
[[ "$svc" == "$s" ]] && return 0
|
|
||||||
done
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
determine_packages() {
|
|
||||||
# determine packages to be installed based on what services are enabled.
|
|
||||||
local pkgs="$COMMON_PACKAGES"
|
|
||||||
local enabled_services=$(config-get enabled-services)
|
|
||||||
|
|
||||||
if [[ "$enabled_services" == "all" ]] ; then
|
|
||||||
pkgs="$pkgs $API_PACKAGES $VOL_PACKAGES $SCHED_PACKAGES"
|
|
||||||
else
|
|
||||||
service_enabled "api" && pkgs="$pkgs $API_PACKAGES"
|
|
||||||
service_enabled "scheduler" && pkgs="$pkgs $SCHED_PACKAGES"
|
|
||||||
service_enabled "volume" && pkgs="$pkgs $VOL_PACKAGES"
|
|
||||||
fi
|
|
||||||
echo "$pkgs"
|
|
||||||
}
|
|
||||||
|
|
||||||
function set_or_update {
|
|
||||||
# Set a config option in nova.conf or api-paste.ini, depending
|
|
||||||
# Defaults to updating nova.conf
|
|
||||||
local KEY="$1"
|
|
||||||
local VALUE="$2"
|
|
||||||
local CONF_FILE="$3"
|
|
||||||
local pattern=""
|
|
||||||
[[ -z $KEY ]] && error_out "set_or_update: value $VALUE missing KEY"
|
|
||||||
[[ -z $VALUE ]] && error_out "set_or_update: key $KEY missing VALUE"
|
|
||||||
[[ -z "$CONF_FILE" ]] && CONF_FILE=$CINDER_CONF
|
|
||||||
|
|
||||||
case "$CONF_FILE" in
|
|
||||||
"$CINDER_CONF") match="^$KEY = "
|
|
||||||
pattern="$match"
|
|
||||||
out="$KEY = "
|
|
||||||
;;
|
|
||||||
"$API_CONF") match="^$KEY = "
|
|
||||||
pattern="$match"
|
|
||||||
out="$KEY = "
|
|
||||||
;;
|
|
||||||
*) juju-log "ERROR: set_or_update: Invalid CONF_FILE ($CONF_FILE)"
|
|
||||||
esac
|
|
||||||
|
|
||||||
grep -q "$match$VALUE" $CONF_FILE &&
|
|
||||||
juju-log "cinder: $KEY=$VALUE already in set in $CONF_FILE" && return 0
|
|
||||||
|
|
||||||
if grep -q "$match" $CONF_FILE ; then
|
|
||||||
juju-log "cinder: Updating $CONF_FILE, $KEY=$VALUE"
|
|
||||||
sed -i "s|\($pattern\).*|\1$VALUE|" $CONF_FILE
|
|
||||||
else
|
|
||||||
juju-log "cinder: Setting new option $KEY=$VALUE in $CONF_FILE"
|
|
||||||
echo "$out$VALUE" >>$CONF_FILE
|
|
||||||
fi
|
|
||||||
CONFIG_CHANGED="True"
|
|
||||||
}
|
|
||||||
|
|
||||||
cinder_ctl() {
|
|
||||||
local svc="$1"
|
|
||||||
local action="$2"
|
|
||||||
local svcs=""
|
|
||||||
if [[ "$svc" == "all" ]] ; then
|
|
||||||
service_enabled "api" && svcs="$svcs cinder-api"
|
|
||||||
service_enabled "scheduler" && svcs="$svcs cinder-scheduler"
|
|
||||||
service_enabled "volume" && svcs="$svcs cinder-volume"
|
|
||||||
else
|
|
||||||
svcs=$svc
|
|
||||||
fi
|
|
||||||
SERVICES=$svcs
|
|
||||||
service_ctl all $action
|
|
||||||
}
|
|
||||||
|
|
||||||
clean_storage() {
|
|
||||||
# if configured to overwrite existing storage, we unmount the block-dev
|
|
||||||
# if mounted and clear any previous pv signatures
|
|
||||||
local block_dev="$1"
|
|
||||||
juju-log "Preparing storage '$block_dev'"
|
|
||||||
if grep -q "^$block_dev" /proc/mounts ; then
|
|
||||||
mp=$(grep "^$block_dev" /proc/mounts | awk '{ print $2 }')
|
|
||||||
juju-log "Unmounting $block_dev from $mp"
|
|
||||||
umount "$mp" || error_out "ERROR: Could not unmount storage from $mp"
|
|
||||||
fi
|
|
||||||
if pvdisplay "$block_dev" >/dev/null 2>&1 ; then
|
|
||||||
juju-log "Removing existing LVM PV signatures from $block_dev"
|
|
||||||
|
|
||||||
# deactivate any volgroups that may be built on this dev
|
|
||||||
vg=$(pvdisplay $block_dev | grep "VG Name" | awk '{ print $3 }')
|
|
||||||
if [[ -n "$vg" ]] ; then
|
|
||||||
juju-log "Deactivating existing volume group: $vg"
|
|
||||||
vgchange -an "$vg" ||
|
|
||||||
error_out "ERROR: Could not deactivate volgroup $vg. Is it in use?"
|
|
||||||
fi
|
|
||||||
echo "yes" | pvremove -ff "$block_dev" ||
|
|
||||||
error_out "Could not pvremove $block_dev"
|
|
||||||
else
|
|
||||||
juju-log "Zapping disk of all GPT and MBR structures"
|
|
||||||
sgdisk --zap-all $block_dev ||
|
|
||||||
error_out "Unable to zap $block_dev"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function get_block_device() {
|
|
||||||
# given a string, return full path to the block device for that
|
|
||||||
# if input is not a block device, find a loopback device
|
|
||||||
local input="$1"
|
|
||||||
|
|
||||||
case "$input" in
|
|
||||||
/dev/*) echo "$input"; return 0;;
|
|
||||||
/*) :;;
|
|
||||||
*) echo "/dev/$input"; return 0;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# this represents a file
|
|
||||||
# support "/path/to/file|5G"
|
|
||||||
local fpath size oifs="$IFS"
|
|
||||||
if [ "${input#*|}" != "${input}" ]; then
|
|
||||||
size=${input##*|}
|
|
||||||
fpath=${input%|*}
|
|
||||||
else
|
|
||||||
fpath=${input}
|
|
||||||
size=5G
|
|
||||||
fi
|
|
||||||
|
|
||||||
## loop devices are not namespaced. This is bad for containers.
|
|
||||||
## it means that the output of 'losetup' may have the given $fpath
|
|
||||||
## in it, but that may not represent this containers $fpath, but
|
|
||||||
## another containers. To address that, we really need to
|
|
||||||
## allow some uniq container-id to be expanded within path.
|
|
||||||
## TODO: find a unique container-id that will be consistent for
|
|
||||||
## this container throughout its lifetime and expand it
|
|
||||||
## in the fpath.
|
|
||||||
# fpath=${fpath//%{id}/$THAT_ID}
|
|
||||||
|
|
||||||
local found=""
|
|
||||||
# parse through 'losetup -a' output, looking for this file
|
|
||||||
# output is expected to look like:
|
|
||||||
# /dev/loop0: [0807]:961814 (/tmp/my.img)
|
|
||||||
found=$(losetup -a |
|
|
||||||
awk 'BEGIN { found=0; }
|
|
||||||
$3 == f { sub(/:$/,"",$1); print $1; found=found+1; }
|
|
||||||
END { if( found == 0 || found == 1 ) { exit(0); }; exit(1); }' \
|
|
||||||
f="($fpath)")
|
|
||||||
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "multiple devices found for $fpath: $found" 1>&2
|
|
||||||
return 1;
|
|
||||||
fi
|
|
||||||
|
|
||||||
[ -n "$found" -a -b "$found" ] && { echo "$found"; return 1; }
|
|
||||||
|
|
||||||
if [ -n "$found" ]; then
|
|
||||||
echo "confused, $found is not a block device for $fpath";
|
|
||||||
return 1;
|
|
||||||
fi
|
|
||||||
|
|
||||||
# no existing device was found, create one
|
|
||||||
mkdir -p "${fpath%/*}"
|
|
||||||
truncate --size "$size" "$fpath" ||
|
|
||||||
{ echo "failed to create $fpath of size $size"; return 1; }
|
|
||||||
|
|
||||||
found=$(losetup --find --show "$fpath") ||
|
|
||||||
{ echo "failed to setup loop device for $fpath" 1>&2; return 1; }
|
|
||||||
|
|
||||||
echo "$found"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
prepare_storage() {
|
|
||||||
# prepare a local block device for use by cinder. this involves potentially
|
|
||||||
# cleaning/unmounting existing storage, init'ing the device as a LVM PV,
|
|
||||||
# and creating a VG.
|
|
||||||
local block_dev="$1"
|
|
||||||
local vol_group="$2"
|
|
||||||
local overwrite="$3"
|
|
||||||
|
|
||||||
[[ -z "$block_dev" ]] || [[ -z "$vol_group" ]] &&
|
|
||||||
error_out "cinder: prepare_storage() missing input: block_dev|vol_group"
|
|
||||||
|
|
||||||
local device=""
|
|
||||||
device=$(get_block_device "$block_dev") ||
|
|
||||||
error_out "failed to get device for $block_dev"
|
|
||||||
|
|
||||||
juju-log "using $device for block-device input of $block_dev"
|
|
||||||
|
|
||||||
[ -b "$device" ] ||
|
|
||||||
error_out "$device is not a valid block device";
|
|
||||||
|
|
||||||
[ "$overwrite" != "True" -a "$overwrite" != "true" ] ||
|
|
||||||
clean_storage "$device"
|
|
||||||
|
|
||||||
juju-log "Initializing $device as a PV..."
|
|
||||||
pvcreate "$device" || error_out "Could not initialize PV: $device"
|
|
||||||
|
|
||||||
juju-log "Creating volume group $vol_group on $device"
|
|
||||||
vgcreate "$vol_group" "$device" ||
|
|
||||||
error_out "Could not create volume group: $vol_group"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
configure_https() {
|
|
||||||
# request openstack-common setup reverse proxy mapping for API and registry
|
|
||||||
# servers
|
|
||||||
service_enabled "api" || return 0
|
|
||||||
local cfg_api_port=$(config-get api-listening-port)
|
|
||||||
service_ctl cinder-api stop
|
|
||||||
if [[ -n "$(peer_units)" ]] || is_clustered ; then
|
|
||||||
# haproxy may already be configured. need to push it back in the request
|
|
||||||
# pipeline in preparation for a change from:
|
|
||||||
# from: haproxy (8776) -> cinder-api (8766)
|
|
||||||
# to: ssl (8776) -> haproxy (8766) -> cinder-api (8756)
|
|
||||||
local next_server=$(determine_haproxy_port $cfg_api_port)
|
|
||||||
local api_port=$(determine_api_port $cfg_api_port)
|
|
||||||
configure_haproxy "cinder_api:$next_server:$api_port"
|
|
||||||
else
|
|
||||||
# if not clustered, the cinder-api is next in the pipeline.
|
|
||||||
local api_port=$(determine_api_port $cfg_api_port)
|
|
||||||
local next_server=$api_port
|
|
||||||
fi
|
|
||||||
|
|
||||||
# setup https to point to either haproxy or directly to api server, depending.
|
|
||||||
setup_https $cfg_api_port:$next_server
|
|
||||||
|
|
||||||
# configure servers to listen on new ports accordingly.
|
|
||||||
set_or_update osapi_volume_listen_port "$api_port"
|
|
||||||
service_ctl cinder-api start
|
|
||||||
|
|
||||||
local r_id=""
|
|
||||||
# (re)configure ks endpoint accordingly in ks and nova.
|
|
||||||
for r_id in $(relation-ids identity-service) ; do
|
|
||||||
keystone_joined "$r_id"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
do_openstack_upgrade() {
|
|
||||||
local rel="$1"
|
|
||||||
shift
|
|
||||||
local packages=$@
|
|
||||||
configure_install_source "$rel"
|
|
||||||
apt-get update
|
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get \
|
|
||||||
--option Dpkg::Options::=--force-confnew -y \
|
|
||||||
install --no-install-recommends $packages
|
|
||||||
# update new configs for all possible relations
|
|
||||||
# mysql
|
|
||||||
for r_id in $(relation-ids shared-db); do
|
|
||||||
for unit in $(relation-list -r $r_id) ; do
|
|
||||||
juju-log "$CHARM: Configuring database after upgrade."
|
|
||||||
db_changed $r_id $unit
|
|
||||||
done
|
|
||||||
done
|
|
||||||
# rabbitmq-server
|
|
||||||
for r_id in $(relation-ids amqp); do
|
|
||||||
for unit in $(relation-list -r $r_id) ; do
|
|
||||||
juju-log "$CHARM: Configuring amqp after upgrade."
|
|
||||||
amqp_changed $r_id $unit
|
|
||||||
done
|
|
||||||
done
|
|
||||||
# keystone
|
|
||||||
for r_id in $(relation-ids identity-service); do
|
|
||||||
for unit in $(relation-list -r $r_id) ; do
|
|
||||||
juju-log "$CHARM: Configuring identity service after upgrade."
|
|
||||||
keystone_changed $r_id $unit
|
|
||||||
done
|
|
||||||
done
|
|
||||||
# ceph
|
|
||||||
local ceph_ids="$(relation-ids ceph)"
|
|
||||||
[[ -n "$ceph_ids" ]] && apt-get -y install ceph-common python-ceph
|
|
||||||
for r_id in $ceph_ids ; do
|
|
||||||
# ensure librbd gets updated with openstack
|
|
||||||
for unit in $(relation-list -r $r_id) ; do
|
|
||||||
juju-log "$CHARM: Configuring ceph client after upgarde."
|
|
||||||
ceph_changed $r_id $unit
|
|
||||||
done
|
|
||||||
done
|
|
||||||
}
|
|
||||||
@@ -1,354 +0,0 @@
|
|||||||
#!/bin/bash -e
|
|
||||||
|
|
||||||
HOOKS_DIR="$CHARM_DIR/hooks"
|
|
||||||
if [[ -e $HOOKS_DIR/cinder-common ]] ; then
|
|
||||||
. $HOOKS_DIR/cinder-common
|
|
||||||
else
|
|
||||||
juju-log "ERROR: Could not source cinder-common from $HOOKS_DIR."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
install_hook() {
|
|
||||||
install_source="$(config-get openstack-origin)"
|
|
||||||
|
|
||||||
# Check if we are deploying to Precise from distro.
|
|
||||||
# If so, we need to use the Cloud Archive instead of the
|
|
||||||
# Ubuntu Archive since Cinder does not exist there (for precise).
|
|
||||||
. /etc/lsb-release
|
|
||||||
[[ "$DISTRIB_CODENAME" == "precise" && "$install_source" == "distro" ]] &&
|
|
||||||
install_source="cloud:precise-folsom"
|
|
||||||
|
|
||||||
configure_install_source "$install_source"
|
|
||||||
apt-get update || true # ignore transient archive errors
|
|
||||||
pkgs=$(determine_packages)
|
|
||||||
juju-log "cinder: Installing following packages: $pkgs"
|
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get -y install $pkgs
|
|
||||||
|
|
||||||
if service_enabled "volume" ; then
|
|
||||||
# prepare local storage if volume service is being installed.
|
|
||||||
block_dev=$(config-get block-device)
|
|
||||||
if [[ "$block_dev" != "None" && "$block_dev" != "none" ]] ; then
|
|
||||||
vol_group=$(config-get volume-group)
|
|
||||||
overwrite=$(config-get overwrite)
|
|
||||||
prepare_storage "$block_dev" "$vol_group" "$overwrite"
|
|
||||||
set_or_update "volume_group" "$vol_group"
|
|
||||||
cinder_ctl cinder-volume restart
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
configure_https
|
|
||||||
}
|
|
||||||
|
|
||||||
db_joined() {
|
|
||||||
juju-log "cinder: Requesting database access to cinder database."
|
|
||||||
relation-set database=$(config-get cinder-db) username=$(config-get db-user)
|
|
||||||
relation-set hostname=$(unit-get private-address)
|
|
||||||
}
|
|
||||||
|
|
||||||
db_changed() {
|
|
||||||
local r_id="$1"
|
|
||||||
local unit_id="$2"
|
|
||||||
local r_arg=""
|
|
||||||
[[ -n "$r_id" ]] && r_arg="-r $r_id"
|
|
||||||
db_host=$(relation-get $r_arg db_host $unit_id)
|
|
||||||
db_password=$(relation-get $r_arg password $unit_id)
|
|
||||||
|
|
||||||
[[ -z "$db_host" ]] || [[ -z "$db_password" ]] &&
|
|
||||||
juju-log "Missing DB_HOST|DB_PASSWORD, peer not ready? Will retry." &&
|
|
||||||
exit 0
|
|
||||||
|
|
||||||
db_user=$(config-get db-user)
|
|
||||||
cinder_db=$(config-get cinder-db)
|
|
||||||
juju-log "cinder: Configuring cinder for database access to $cinder_db@$db_host"
|
|
||||||
set_or_update sql_connection "mysql://$db_user:$db_password@$db_host/$cinder_db"
|
|
||||||
if eligible_leader 'res_cinder_vip'; then
|
|
||||||
/usr/bin/cinder-manage db sync
|
|
||||||
fi
|
|
||||||
cinder_ctl all restart
|
|
||||||
}
|
|
||||||
|
|
||||||
amqp_joined() {
|
|
||||||
juju-log "cinder: Requesting amqp access to vhost $rabbit_vhost."
|
|
||||||
relation-set username=$(config-get rabbit-user)
|
|
||||||
relation-set vhost=$(config-get rabbit-vhost)
|
|
||||||
}
|
|
||||||
|
|
||||||
amqp_changed() {
|
|
||||||
local r_id="$1"
|
|
||||||
local unit_id="$2"
|
|
||||||
local r_arg=""
|
|
||||||
[[ -n "$r_id" ]] && r_arg="-r $r_id"
|
|
||||||
rabbit_host=$(relation-get $r_arg private-address $unit_id)
|
|
||||||
rabbit_password=$(relation-get $r_arg password $unit_id)
|
|
||||||
[[ -z "$rabbit_host" ]] || [[ -z "$rabbit_password" ]] &&
|
|
||||||
juju-log "Missing rabbit_host||rabbit_passwd, peer not ready? Will retry." && exit 0
|
|
||||||
local clustered=$(relation-get $r_arg clustered $unit_id)
|
|
||||||
if [[ -n "$clustered" ]] ; then
|
|
||||||
juju-log "$CHARM - amqp_changed: Configuring for access to haclustered "\
|
|
||||||
"rabbitmq service."
|
|
||||||
local vip=$(relation-get $r_arg vip $unit_id)
|
|
||||||
[[ -z "$vip" ]] && juju-log "$CHARM - amqp_changed: Clustered bu no vip."\
|
|
||||||
&& exit 0
|
|
||||||
rabbit_host="$vip"
|
|
||||||
fi
|
|
||||||
juju-log "cinder: Configuring cinder for amqp access to $rabbit_host:$rabbit_vhost"
|
|
||||||
rabbit_user=$(config-get rabbit-user)
|
|
||||||
rabbit_vhost=$(config-get rabbit-vhost)
|
|
||||||
set_or_update rabbit_host $rabbit_host
|
|
||||||
set_or_update rabbit_userid $rabbit_user
|
|
||||||
set_or_update rabbit_password $rabbit_password
|
|
||||||
set_or_update rabbit_virtual_host $rabbit_vhost
|
|
||||||
cinder_ctl all restart
|
|
||||||
}
|
|
||||||
|
|
||||||
keystone_joined() {
|
|
||||||
# Exit hook execution if unit is not leader of cluster/service
|
|
||||||
eligible_leader 'res_cinder_vip' || return 0
|
|
||||||
|
|
||||||
# determine correct endpoint URL
|
|
||||||
https && scheme="https" || scheme="http"
|
|
||||||
is_clustered && local host=$(config-get vip) ||
|
|
||||||
local host=$(unit-get private-address)
|
|
||||||
|
|
||||||
local url="$scheme://$host:$(config-get api-listening-port)/v1/\$(tenant_id)s"
|
|
||||||
r_id=""
|
|
||||||
if [[ -n "$1" ]] ; then
|
|
||||||
r_id="-r $1"
|
|
||||||
fi
|
|
||||||
relation-set $r_id service="cinder" \
|
|
||||||
region="$(config-get region)" public_url="$url" admin_url="$url" internal_url="$url"
|
|
||||||
}
|
|
||||||
|
|
||||||
keystone_changed() {
|
|
||||||
local r_id="$1"
|
|
||||||
local unit_id="$2"
|
|
||||||
local r_arg=""
|
|
||||||
[[ -n "$r_id" ]] && r_arg="-r $r_id"
|
|
||||||
service_port=$(relation-get $r_arg service_port $unit_id)
|
|
||||||
auth_port=$(relation-get $r_arg auth_port $unit_id)
|
|
||||||
service_username=$(relation-get $r_arg service_username $unit_id)
|
|
||||||
service_password=$(relation-get $r_arg service_password $unit_id)
|
|
||||||
service_tenant=$(relation-get $r_arg service_tenant $unit_id)
|
|
||||||
service_host=$(relation-get $r_arg service_host $unit_id)
|
|
||||||
auth_host=$(relation-get $r_arg auth_host $unit_id)
|
|
||||||
|
|
||||||
[[ -z "$service_port" ]] || [[ -z "$auth_port" ]] ||
|
|
||||||
[[ -z "$service_username" ]] || [[ -z "$service_password" ]] ||
|
|
||||||
[[ -z "$service_tenant" ]] && juju-log "keystone_changed: Peer not ready" &&
|
|
||||||
exit 0
|
|
||||||
# update keystone authtoken settings accordingly
|
|
||||||
set_or_update "service_host" "$service_host" "$API_CONF"
|
|
||||||
set_or_update "service_port" "$service_port" "$API_CONF"
|
|
||||||
set_or_update "auth_host" "$auth_host" "$API_CONF"
|
|
||||||
set_or_update "auth_port" "$auth_port" "$API_CONF"
|
|
||||||
set_or_update "admin_tenant_name" "$service_tenant" "$API_CONF"
|
|
||||||
set_or_update "admin_user" "$service_username" "$API_CONF"
|
|
||||||
set_or_update "admin_password" "$service_password" "$API_CONF"
|
|
||||||
set_or_update "auth_protocol" "http" "$API_CONF"
|
|
||||||
set_or_update "auth_strategy" "keystone" "$CINDER_CONF"
|
|
||||||
|
|
||||||
cinder_ctl all restart
|
|
||||||
configure_https
|
|
||||||
}
|
|
||||||
|
|
||||||
function ceph_joined {
|
|
||||||
mkdir -p /etc/ceph
|
|
||||||
apt-get -y install ceph-common || exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
function ceph_changed {
|
|
||||||
local r_id="$1"
|
|
||||||
local unit_id="$2"
|
|
||||||
local r_arg=""
|
|
||||||
[[ -n "$r_id" ]] && r_arg="-r $r_id"
|
|
||||||
SERVICE_NAME=`echo $JUJU_UNIT_NAME | cut -d / -f 1`
|
|
||||||
KEYRING=/etc/ceph/ceph.client.$SERVICE_NAME.keyring
|
|
||||||
KEY=`relation-get $r_arg key $unit_id`
|
|
||||||
if [ -n "$KEY" ]; then
|
|
||||||
# But only once
|
|
||||||
if [ ! -f $KEYRING ]; then
|
|
||||||
ceph-authtool $KEYRING \
|
|
||||||
--create-keyring --name=client.$SERVICE_NAME \
|
|
||||||
--add-key="$KEY"
|
|
||||||
chmod +r $KEYRING
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
# No key - bail for the time being
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
MONS=`relation-list $r_arg`
|
|
||||||
mon_hosts=""
|
|
||||||
for mon in $MONS; do
|
|
||||||
mon_hosts="$mon_hosts $(get_ip $(relation-get $r_arg private-address $mon)):6789"
|
|
||||||
done
|
|
||||||
cat > /etc/ceph/ceph.conf << EOF
|
|
||||||
[global]
|
|
||||||
auth supported = $(relation-get $r_id auth $unit_id)
|
|
||||||
keyring = /etc/ceph/\$cluster.\$name.keyring
|
|
||||||
mon host = $mon_hosts
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# XXX: Horrid kludge to make cinder-volume use
|
|
||||||
# a different ceph username than admin
|
|
||||||
if [ -z "`grep CEPH_ARGS /etc/environment`" ]; then
|
|
||||||
# Only insert environment var if we don't already have it
|
|
||||||
echo "CEPH_ARGS=\"--id $SERVICE_NAME\"" >> /etc/environment
|
|
||||||
fi
|
|
||||||
# Also add it to the overrides for cinder volume
|
|
||||||
# in preparation for move to start-stop-daemon.
|
|
||||||
echo "env CEPH_ARGS=\"--id $SERVICE_NAME\"" > /etc/init/cinder-volume.override
|
|
||||||
|
|
||||||
# Only the leader should try to create pools
|
|
||||||
if eligible_leader 'res_cinder_vip'; then
|
|
||||||
# Create the cinder pool if it does not already exist
|
|
||||||
if ! rados --id $SERVICE_NAME lspools | grep -q cinder; then
|
|
||||||
local num_osds=$(ceph --id $SERVICE_NAME osd ls| egrep "[^\s]"| wc -l)
|
|
||||||
local cfg_key='ceph-osd-replication-count'
|
|
||||||
local rep_count="$(config-get $cfg_key)"
|
|
||||||
if [ -z "$rep_count" ]
|
|
||||||
then
|
|
||||||
rep_count=2
|
|
||||||
juju-log "config returned empty string for $cfg_key - using value of 2"
|
|
||||||
fi
|
|
||||||
local num_pgs=$(((num_osds*100)/rep_count))
|
|
||||||
ceph --id $SERVICE_NAME osd pool create cinder $num_pgs $num_pgs
|
|
||||||
ceph --id $SERVICE_NAME osd pool set cinder size $rep_count
|
|
||||||
# TODO: set appropriate crush ruleset
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Reconfigure cinder-volume
|
|
||||||
set_or_update volume_driver cinder.volume.driver.RBDDriver
|
|
||||||
set_or_update rbd_pool cinder
|
|
||||||
# Set host to service name to ensure that requests get
|
|
||||||
# distributed across all cinder servers in a cluster
|
|
||||||
# as they can all service ceph requests.
|
|
||||||
set_or_update host "$SERVICE_NAME"
|
|
||||||
cinder_ctl "cinder-volume" restart
|
|
||||||
}
|
|
||||||
|
|
||||||
function cluster_changed() {
|
|
||||||
service_enabled "api" || return 0
|
|
||||||
[[ -z "$(peer_units)" ]] &&
|
|
||||||
juju-log "cluster_changed() with no peers." && exit 0
|
|
||||||
local cfg_api_port="$(config-get api-listening-port)"
|
|
||||||
local haproxy_port="$(determine_haproxy_port $cfg_api_port)"
|
|
||||||
local backend_port="$(determine_api_port $cfg_api_port)"
|
|
||||||
service cinder-api stop || :
|
|
||||||
configure_haproxy "cinder_api:$haproxy_port:$backend_port"
|
|
||||||
set_or_update osapi_volume_listen_port "$backend_port"
|
|
||||||
service cinder-api start
|
|
||||||
}
|
|
||||||
|
|
||||||
function upgrade_charm() {
|
|
||||||
cluster_changed
|
|
||||||
}
|
|
||||||
|
|
||||||
function ha_relation_joined() {
|
|
||||||
local corosync_bindiface=`config-get ha-bindiface`
|
|
||||||
local corosync_mcastport=`config-get ha-mcastport`
|
|
||||||
local vip=`config-get vip`
|
|
||||||
local vip_iface=`config-get vip_iface`
|
|
||||||
local vip_cidr=`config-get vip_cidr`
|
|
||||||
if [ -n "$vip" ] && [ -n "$vip_iface" ] && \
|
|
||||||
[ -n "$vip_cidr" ] && [ -n "$corosync_bindiface" ] && \
|
|
||||||
[ -n "$corosync_mcastport" ]; then
|
|
||||||
# TODO: This feels horrible but the data required by the hacluster
|
|
||||||
# charm is quite complex and is python ast parsed.
|
|
||||||
resources="{
|
|
||||||
'res_cinder_vip':'ocf:heartbeat:IPaddr2',
|
|
||||||
'res_cinder_haproxy':'lsb:haproxy'
|
|
||||||
}"
|
|
||||||
resource_params="{
|
|
||||||
'res_cinder_vip': 'params ip=\"$vip\" cidr_netmask=\"$vip_cidr\" nic=\"$vip_iface\"',
|
|
||||||
'res_cinder_haproxy': 'op monitor interval=\"5s\"'
|
|
||||||
}"
|
|
||||||
init_services="{
|
|
||||||
'res_cinder_haproxy':'haproxy'
|
|
||||||
}"
|
|
||||||
clones="{
|
|
||||||
'cl_cinder_haproxy': 'res_cinder_haproxy'
|
|
||||||
}"
|
|
||||||
relation-set corosync_bindiface=$corosync_bindiface \
|
|
||||||
corosync_mcastport=$corosync_mcastport \
|
|
||||||
resources="$resources" resource_params="$resource_params" \
|
|
||||||
init_services="$init_services" clones="$clones"
|
|
||||||
else
|
|
||||||
juju-log "Insufficient configuration data to configure hacluster"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function ha_relation_changed() {
|
|
||||||
local clustered=`relation-get clustered`
|
|
||||||
if [ -n "$clustered" ] && is_leader 'res_cinder_vip'; then
|
|
||||||
juju-log "Cluster leader, reconfiguring keystone endpoint"
|
|
||||||
https && local scheme="https" || local scheme="http"
|
|
||||||
local url="$scheme://$(config-get vip):$(config-get api-listening-port)/v1/\$(tenant_id)s"
|
|
||||||
local r_id=""
|
|
||||||
for r_id in `relation-ids identity-service`; do
|
|
||||||
relation-set -r $r_id service="cinder" \
|
|
||||||
region="$(config-get region)" \
|
|
||||||
public_url="$url" admin_url="$url" internal_url="$url"
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function config_changed() {
|
|
||||||
# possibly upgrade if openstack-origin has been bumped
|
|
||||||
local install_src=$(config-get openstack-origin)
|
|
||||||
local cur=$(get_os_codename_package "cinder-common")
|
|
||||||
local available=$(get_os_codename_install_source "$install_src")
|
|
||||||
if dpkg --compare-versions $(get_os_version_codename "$cur") lt \
|
|
||||||
$(get_os_version_codename "$available") ; then
|
|
||||||
juju-log "$CHARM: Upgrading OpenStack release: $cur -> $available."
|
|
||||||
# need to explicitly upgrade ksc b/c (LP: 1182689)
|
|
||||||
do_openstack_upgrade "$install_src" $(determine_packages) python-keystoneclient
|
|
||||||
fi
|
|
||||||
|
|
||||||
configure_https
|
|
||||||
# Save our scriptrc env variables for health checks
|
|
||||||
declare -a env_vars=(
|
|
||||||
"OPENSTACK_PORT_MCASTPORT=$(config-get ha-mcastport)"
|
|
||||||
'OPENSTACK_SERVICE_API=cinder-api'
|
|
||||||
'OPENSTACK_SERVICE_SCHEDULER=cinder-scheduler'
|
|
||||||
'OPENSTACK_SERVICE_VOLUME=cinder-volume')
|
|
||||||
save_script_rc ${env_vars[@]}
|
|
||||||
}
|
|
||||||
|
|
||||||
function image-service_changed {
|
|
||||||
GLANCE_API_SERVER=`relation-get glance-api-server`
|
|
||||||
if [[ -z $GLANCE_API_SERVER ]] ; then
|
|
||||||
echo "image-service_changed: GLANCE_API_SERVER not yet set. Exit 0 and retry"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
set_or_update glance_api_servers $GLANCE_API_SERVER
|
|
||||||
apt-get -y install qemu-utils
|
|
||||||
cinder_ctl all restart
|
|
||||||
}
|
|
||||||
|
|
||||||
arg0=$(basename $0)
|
|
||||||
juju-log "cinder: Attempting to fire hook for: $arg0"
|
|
||||||
case $arg0 in
|
|
||||||
"install") install_hook ;;
|
|
||||||
"start") cinder_ctl all start;;
|
|
||||||
"stop") cinder_ctl all stop;;
|
|
||||||
"shared-db-relation-joined") db_joined ;;
|
|
||||||
"shared-db-relation-changed") db_changed ;;
|
|
||||||
"amqp-relation-joined") amqp_joined ;;
|
|
||||||
"amqp-relation-changed") amqp_changed ;;
|
|
||||||
"identity-service-relation-joined") keystone_joined ;;
|
|
||||||
"identity-service-relation-changed") keystone_changed ;;
|
|
||||||
"ceph-relation-joined") ceph_joined;;
|
|
||||||
"ceph-relation-changed") ceph_changed;;
|
|
||||||
"cinder-volume-service-relation-joined") exit 0 ;;
|
|
||||||
"cinder-volume-service-relation-changed") exit 0 ;;
|
|
||||||
"cluster-relation-changed") cluster_changed ;;
|
|
||||||
"cluster-relation-departed") cluster_changed ;;
|
|
||||||
"image-service-relation-changed") image-service_changed ;;
|
|
||||||
"ha-relation-joined") ha_relation_joined ;;
|
|
||||||
"ha-relation-changed") ha_relation_changed ;;
|
|
||||||
"upgrade-charm") upgrade_charm ;;
|
|
||||||
"config-changed") config_changed ;;
|
|
||||||
*) exit 0
|
|
||||||
esac
|
|
||||||
1
hooks/cinder-volume-service
Symbolic link
1
hooks/cinder-volume-service
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
cinder_hooks.py
|
||||||
@@ -1 +0,0 @@
|
|||||||
cinder-hooks
|
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
76
hooks/cinder_contexts.py
Normal file
76
hooks/cinder_contexts.py
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config,
|
||||||
|
relation_ids,
|
||||||
|
service_name,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.openstack.context import (
|
||||||
|
OSContextGenerator,
|
||||||
|
ApacheSSLContext as SSLContext,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.hahelpers.cluster import (
|
||||||
|
determine_api_port,
|
||||||
|
determine_haproxy_port,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageServiceContext(OSContextGenerator):
|
||||||
|
interfaces = ['image-service']
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
if not relation_ids('image-service'):
|
||||||
|
return {}
|
||||||
|
return { 'glance_api_version': config('glance-api-version') }
|
||||||
|
|
||||||
|
|
||||||
|
class CephContext(OSContextGenerator):
|
||||||
|
interfaces = ['ceph']
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
"""
|
||||||
|
Used to generate template context to be added to cinder.conf in the
|
||||||
|
presence of a ceph relation.
|
||||||
|
"""
|
||||||
|
if not relation_ids('ceph'):
|
||||||
|
return {}
|
||||||
|
service = service_name()
|
||||||
|
return {
|
||||||
|
'volume_driver': 'cinder.volume.driver.RBDDriver',
|
||||||
|
# ensure_ceph_pool() creates pool based on service name.
|
||||||
|
'rbd_pool': service,
|
||||||
|
'rbd_user': service,
|
||||||
|
'host': service,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class HAProxyContext(OSContextGenerator):
|
||||||
|
interfaces = ['ceph']
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
'''
|
||||||
|
Extends the main charmhelpers HAProxyContext with a port mapping
|
||||||
|
specific to this charm.
|
||||||
|
Also used to extend cinder.conf context with correct api_listening_port
|
||||||
|
'''
|
||||||
|
haproxy_port = determine_haproxy_port(config('api-listening-port'))
|
||||||
|
api_port = determine_api_port(config('api-listening-port'))
|
||||||
|
|
||||||
|
ctxt = {
|
||||||
|
'service_ports': {'cinder_api': [haproxy_port, api_port]},
|
||||||
|
'osapi_volume_listen_port': api_port,
|
||||||
|
}
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
|
class ApacheSSLContext(SSLContext):
|
||||||
|
interfaces = ['https']
|
||||||
|
external_ports = [8776]
|
||||||
|
service_namespace = 'cinder'
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
# late import to work around circular dependency
|
||||||
|
from cinder_utils import service_enabled
|
||||||
|
if not service_enabled('cinder-api'):
|
||||||
|
return {}
|
||||||
|
return super(ApacheSSLContext, self).__call__()
|
||||||
276
hooks/cinder_hooks.py
Executable file
276
hooks/cinder_hooks.py
Executable file
@@ -0,0 +1,276 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from subprocess import check_call
|
||||||
|
|
||||||
|
from cinder_utils import (
|
||||||
|
clean_storage,
|
||||||
|
determine_packages,
|
||||||
|
do_openstack_upgrade,
|
||||||
|
ensure_block_device,
|
||||||
|
ensure_ceph_pool,
|
||||||
|
juju_log,
|
||||||
|
migrate_database,
|
||||||
|
prepare_lvm_storage,
|
||||||
|
register_configs,
|
||||||
|
restart_map,
|
||||||
|
service_enabled,
|
||||||
|
set_ceph_env_variables,
|
||||||
|
CLUSTER_RES,
|
||||||
|
CINDER_CONF,
|
||||||
|
CINDER_API_CONF,
|
||||||
|
CEPH_CONF,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
Hooks,
|
||||||
|
UnregisteredHookError,
|
||||||
|
config,
|
||||||
|
relation_get,
|
||||||
|
relation_ids,
|
||||||
|
relation_set,
|
||||||
|
service_name,
|
||||||
|
unit_get,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.fetch import apt_install, apt_update
|
||||||
|
from charmhelpers.core.host import lsb_release, restart_on_change
|
||||||
|
|
||||||
|
from charmhelpers.contrib.openstack.utils import (
|
||||||
|
configure_installation_source, openstack_upgrade_available)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.storage.linux.ceph import ensure_ceph_keyring
|
||||||
|
|
||||||
|
from charmhelpers.contrib.hahelpers.cluster import (
|
||||||
|
canonical_url,
|
||||||
|
eligible_leader,
|
||||||
|
is_leader,
|
||||||
|
get_hacluster_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.payload.execd import execd_preinstall
|
||||||
|
|
||||||
|
hooks = Hooks()
|
||||||
|
|
||||||
|
CONFIGS = register_configs()
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('install')
|
||||||
|
def install():
|
||||||
|
execd_preinstall()
|
||||||
|
conf = config()
|
||||||
|
src = conf['openstack-origin']
|
||||||
|
if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and
|
||||||
|
src == 'distro'):
|
||||||
|
src = 'cloud:precise-folsom'
|
||||||
|
configure_installation_source(src)
|
||||||
|
apt_update()
|
||||||
|
apt_install(determine_packages(), fatal=True)
|
||||||
|
|
||||||
|
if (service_enabled('volume') and
|
||||||
|
conf['block-device'] not in [None, 'None', 'none']):
|
||||||
|
bdev = ensure_block_device(conf['block-device'])
|
||||||
|
juju_log('Located valid block device: %s' % bdev)
|
||||||
|
if conf['overwrite'] in ['true', 'True', True]:
|
||||||
|
juju_log('Ensuring block device is clean: %s' % bdev)
|
||||||
|
clean_storage(bdev)
|
||||||
|
prepare_lvm_storage(bdev, conf['volume-group'])
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('config-changed')
|
||||||
|
@restart_on_change(restart_map())
|
||||||
|
def config_changed():
|
||||||
|
if openstack_upgrade_available('cinder-common'):
|
||||||
|
do_openstack_upgrade(configs=CONFIGS)
|
||||||
|
CONFIGS.write_all()
|
||||||
|
configure_https()
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('shared-db-relation-joined')
|
||||||
|
def db_joined():
|
||||||
|
conf = config()
|
||||||
|
relation_set(database=conf['database'], username=conf['database-user'],
|
||||||
|
hostname=unit_get('private-address'))
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('shared-db-relation-changed')
|
||||||
|
@restart_on_change(restart_map())
|
||||||
|
def db_changed():
|
||||||
|
if 'shared-db' not in CONFIGS.complete_contexts():
|
||||||
|
juju_log('shared-db relation incomplete. Peer not ready?')
|
||||||
|
return
|
||||||
|
CONFIGS.write(CINDER_CONF)
|
||||||
|
if eligible_leader(CLUSTER_RES):
|
||||||
|
juju_log('Cluster leader, performing db sync')
|
||||||
|
migrate_database()
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('amqp-relation-joined')
|
||||||
|
def amqp_joined():
|
||||||
|
conf = config()
|
||||||
|
relation_set(username=conf['rabbit-user'], vhost=conf['rabbit-vhost'])
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('amqp-relation-changed')
|
||||||
|
@restart_on_change(restart_map())
|
||||||
|
def amqp_changed():
|
||||||
|
if 'amqp' not in CONFIGS.complete_contexts():
|
||||||
|
juju_log('amqp relation incomplete. Peer not ready?')
|
||||||
|
return
|
||||||
|
CONFIGS.write(CINDER_CONF)
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('identity-service-relation-joined')
|
||||||
|
def identity_joined(rid=None):
|
||||||
|
if not eligible_leader(CLUSTER_RES):
|
||||||
|
return
|
||||||
|
|
||||||
|
conf = config()
|
||||||
|
|
||||||
|
port = conf['api-listening-port']
|
||||||
|
url = canonical_url(CONFIGS) + ':%s/v1/$(tenant_id)s' % port
|
||||||
|
|
||||||
|
settings = {
|
||||||
|
'region': conf['region'],
|
||||||
|
'service': 'cinder',
|
||||||
|
'public_url': url,
|
||||||
|
'internal_url': url,
|
||||||
|
'admin_url': url,
|
||||||
|
}
|
||||||
|
relation_set(relation_id=rid, **settings)
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('identity-service-relation-changed')
|
||||||
|
@restart_on_change(restart_map())
|
||||||
|
def identity_changed():
|
||||||
|
if 'identity-service' not in CONFIGS.complete_contexts():
|
||||||
|
juju_log('identity-service relation incomplete. Peer not ready?')
|
||||||
|
return
|
||||||
|
CONFIGS.write(CINDER_API_CONF)
|
||||||
|
configure_https()
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('ceph-relation-joined')
|
||||||
|
def ceph_joined():
|
||||||
|
if not os.path.isdir('/etc/ceph'):
|
||||||
|
os.mkdir('/etc/ceph')
|
||||||
|
apt_install('ceph-common', fatal=True)
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('ceph-relation-changed')
|
||||||
|
@restart_on_change(restart_map())
|
||||||
|
def ceph_changed():
|
||||||
|
if 'ceph' not in CONFIGS.complete_contexts():
|
||||||
|
juju_log('ceph relation incomplete. Peer not ready?')
|
||||||
|
return
|
||||||
|
svc = service_name()
|
||||||
|
if not ensure_ceph_keyring(service=svc,
|
||||||
|
user='cinder', group='cinder'):
|
||||||
|
juju_log('Could not create ceph keyring: peer not ready?')
|
||||||
|
return
|
||||||
|
CONFIGS.write(CEPH_CONF)
|
||||||
|
CONFIGS.write(CINDER_CONF)
|
||||||
|
set_ceph_env_variables(service=svc)
|
||||||
|
|
||||||
|
if eligible_leader(CLUSTER_RES):
|
||||||
|
_config = config()
|
||||||
|
ensure_ceph_pool(service=svc,
|
||||||
|
replicas=_config['ceph-osd-replication-count'])
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('cluster-relation-changed',
|
||||||
|
'cluster-relation-departed')
|
||||||
|
@restart_on_change(restart_map())
|
||||||
|
def cluster_changed():
|
||||||
|
CONFIGS.write_all()
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('ha-relation-joined')
|
||||||
|
def ha_joined():
|
||||||
|
config = get_hacluster_config()
|
||||||
|
resources = {
|
||||||
|
'res_cinder_vip': 'ocf:heartbeat:IPaddr2',
|
||||||
|
'res_cinder_haproxy': 'lsb:haproxy'
|
||||||
|
}
|
||||||
|
|
||||||
|
vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
|
||||||
|
(config['vip'], config['vip_cidr'], config['vip_iface'])
|
||||||
|
resource_params = {
|
||||||
|
'res_cinder_vip': vip_params,
|
||||||
|
'res_cinder_haproxy': 'op monitor interval="5s"'
|
||||||
|
}
|
||||||
|
init_services = {
|
||||||
|
'res_cinder_haproxy': 'haproxy'
|
||||||
|
}
|
||||||
|
clones = {
|
||||||
|
'cl_cinder_haproxy': 'res_cinder_haproxy'
|
||||||
|
}
|
||||||
|
relation_set(init_services=init_services,
|
||||||
|
corosync_bindiface=config['ha-bindiface'],
|
||||||
|
corosync_mcastport=config['ha-mcastport'],
|
||||||
|
resources=resources,
|
||||||
|
resource_params=resource_params,
|
||||||
|
clones=clones)
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('ha-relation-changed')
|
||||||
|
def ha_changed():
|
||||||
|
clustered = relation_get('clustered')
|
||||||
|
if not clustered or clustered in [None, 'None', '']:
|
||||||
|
juju_log('ha_changed: hacluster subordinate not fully clustered.')
|
||||||
|
return
|
||||||
|
if not is_leader(CLUSTER_RES):
|
||||||
|
juju_log('ha_changed: hacluster complete but we are not leader.')
|
||||||
|
return
|
||||||
|
juju_log('Cluster configured, notifying other services and updating '
|
||||||
|
'keystone endpoint configuration')
|
||||||
|
for rid in relation_ids('identity-service'):
|
||||||
|
identity_joined(rid=rid)
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('image-service-relation-changed')
|
||||||
|
@restart_on_change(restart_map())
|
||||||
|
def image_service_changed():
|
||||||
|
CONFIGS.write(CINDER_CONF)
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('amqp-relation-broken',
|
||||||
|
'ceph-relation-broken',
|
||||||
|
'identity-service-relation-broken',
|
||||||
|
'image-service-relation-broken',
|
||||||
|
'shared-db-relation-broken')
|
||||||
|
@restart_on_change(restart_map())
|
||||||
|
def relation_broken():
|
||||||
|
CONFIGS.write_all()
|
||||||
|
|
||||||
|
|
||||||
|
def configure_https():
|
||||||
|
'''
|
||||||
|
Enables SSL API Apache config if appropriate and kicks identity-service
|
||||||
|
with any required api updates.
|
||||||
|
'''
|
||||||
|
# need to write all to ensure changes to the entire request pipeline
|
||||||
|
# propagate (c-api, haprxy, apache)
|
||||||
|
CONFIGS.write_all()
|
||||||
|
if 'https' in CONFIGS.complete_contexts():
|
||||||
|
cmd = ['a2ensite', 'openstack_https_frontend']
|
||||||
|
check_call(cmd)
|
||||||
|
else:
|
||||||
|
cmd = ['a2dissite', 'openstack_https_frontend']
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
for rid in relation_ids('identity-service'):
|
||||||
|
identity_joined(rid=rid)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
try:
|
||||||
|
hooks.execute(sys.argv)
|
||||||
|
except UnregisteredHookError as e:
|
||||||
|
juju_log('Unknown hook {} - skipping.'.format(e))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
361
hooks/cinder_utils.py
Normal file
361
hooks/cinder_utils.py
Normal file
@@ -0,0 +1,361 @@
|
|||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from collections import OrderedDict
|
||||||
|
from copy import copy
|
||||||
|
|
||||||
|
import cinder_contexts
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config,
|
||||||
|
relation_ids,
|
||||||
|
log,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.fetch import (
|
||||||
|
apt_install,
|
||||||
|
apt_update,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.host import (
|
||||||
|
mounts,
|
||||||
|
umount,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.storage.linux.ceph import (
|
||||||
|
create_pool as ceph_create_pool,
|
||||||
|
pool_exists as ceph_pool_exists,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.hahelpers.cluster import (
|
||||||
|
eligible_leader,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.storage.linux.utils import (
|
||||||
|
is_block_device,
|
||||||
|
zap_disk,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.storage.linux.lvm import (
|
||||||
|
create_lvm_physical_volume,
|
||||||
|
create_lvm_volume_group,
|
||||||
|
deactivate_lvm_volume_group,
|
||||||
|
is_lvm_physical_volume,
|
||||||
|
remove_lvm_physical_volume,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.storage.linux.loopback import (
|
||||||
|
ensure_loopback_device,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.openstack import (
|
||||||
|
templating,
|
||||||
|
context,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.openstack.utils import (
|
||||||
|
configure_installation_source,
|
||||||
|
get_os_codename_package,
|
||||||
|
get_os_codename_install_source,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
COMMON_PACKAGES = [
|
||||||
|
'apache2',
|
||||||
|
'cinder-common',
|
||||||
|
'gdisk',
|
||||||
|
'haproxy',
|
||||||
|
'python-jinja2',
|
||||||
|
'python-keystoneclient',
|
||||||
|
'python-mysqldb',
|
||||||
|
'qemu-utils',
|
||||||
|
]
|
||||||
|
|
||||||
|
API_PACKAGES = ['cinder-api']
|
||||||
|
VOLUME_PACKAGES = ['cinder-volume']
|
||||||
|
SCHEDULER_PACKAGES = ['cinder-scheduler']
|
||||||
|
|
||||||
|
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||||
|
|
||||||
|
# Cluster resource used to determine leadership when hacluster'd
|
||||||
|
CLUSTER_RES = 'res_cinder_vip'
|
||||||
|
|
||||||
|
|
||||||
|
class CinderCharmError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
CINDER_CONF = '/etc/cinder/cinder.conf'
|
||||||
|
CINDER_API_CONF = '/etc/cinder/api-paste.ini'
|
||||||
|
CEPH_CONF = '/etc/ceph/ceph.conf'
|
||||||
|
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
|
||||||
|
APACHE_SITE_CONF = '/etc/apache2/sites-available/openstack_https_frontend'
|
||||||
|
APACHE_SITE_24_CONF = '/etc/apache2/sites-available/' \
|
||||||
|
'openstack_https_frontend.conf'
|
||||||
|
|
||||||
|
TEMPLATES = 'templates/'
|
||||||
|
# Map config files to hook contexts and services that will be associated
|
||||||
|
# with file in restart_on_changes()'s service map.
|
||||||
|
CONFIG_FILES = OrderedDict([
|
||||||
|
(CINDER_CONF, {
|
||||||
|
'hook_contexts': [context.SharedDBContext(),
|
||||||
|
context.AMQPContext(),
|
||||||
|
context.ImageServiceContext(),
|
||||||
|
cinder_contexts.CephContext(),
|
||||||
|
cinder_contexts.HAProxyContext(),
|
||||||
|
cinder_contexts.ImageServiceContext()],
|
||||||
|
'services': ['cinder-api', 'cinder-volume',
|
||||||
|
'cinder-scheduler', 'haproxy']
|
||||||
|
}),
|
||||||
|
(CINDER_API_CONF, {
|
||||||
|
'hook_contexts': [context.IdentityServiceContext()],
|
||||||
|
'services': ['cinder-api'],
|
||||||
|
}),
|
||||||
|
(CEPH_CONF, {
|
||||||
|
'hook_contexts': [context.CephContext()],
|
||||||
|
'services': ['cinder-volume']
|
||||||
|
}),
|
||||||
|
(HAPROXY_CONF, {
|
||||||
|
'hook_contexts': [context.HAProxyContext(),
|
||||||
|
cinder_contexts.HAProxyContext()],
|
||||||
|
'services': ['haproxy'],
|
||||||
|
}),
|
||||||
|
(APACHE_SITE_CONF, {
|
||||||
|
'hook_contexts': [cinder_contexts.ApacheSSLContext()],
|
||||||
|
'services': ['apache2'],
|
||||||
|
}),
|
||||||
|
(APACHE_SITE_24_CONF, {
|
||||||
|
'hook_contexts': [cinder_contexts.ApacheSSLContext()],
|
||||||
|
'services': ['apache2'],
|
||||||
|
}),
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
def register_configs():
|
||||||
|
"""
|
||||||
|
Register config files with their respective contexts.
|
||||||
|
Regstration of some configs may not be required depending on
|
||||||
|
existing of certain relations.
|
||||||
|
"""
|
||||||
|
# if called without anything installed (eg during install hook)
|
||||||
|
# just default to earliest supported release. configs dont get touched
|
||||||
|
# till post-install, anyway.
|
||||||
|
release = get_os_codename_package('cinder-common', fatal=False) or 'folsom'
|
||||||
|
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
|
||||||
|
openstack_release=release)
|
||||||
|
|
||||||
|
confs = [CINDER_API_CONF,
|
||||||
|
CINDER_CONF,
|
||||||
|
HAPROXY_CONF]
|
||||||
|
|
||||||
|
if relation_ids('ceph'):
|
||||||
|
# need to create this early, new peers will have a relation during
|
||||||
|
# registration # before they've run the ceph hooks to create the
|
||||||
|
# directory.
|
||||||
|
if not os.path.isdir(os.path.dirname(CEPH_CONF)):
|
||||||
|
os.mkdir(os.path.dirname(CEPH_CONF))
|
||||||
|
confs.append(CEPH_CONF)
|
||||||
|
|
||||||
|
for conf in confs:
|
||||||
|
configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])
|
||||||
|
|
||||||
|
if os.path.exists('/etc/apache2/conf-available'):
|
||||||
|
configs.register(APACHE_SITE_24_CONF,
|
||||||
|
CONFIG_FILES[APACHE_SITE_24_CONF]['hook_contexts'])
|
||||||
|
else:
|
||||||
|
configs.register(APACHE_SITE_CONF,
|
||||||
|
CONFIG_FILES[APACHE_SITE_CONF]['hook_contexts'])
|
||||||
|
return configs
|
||||||
|
|
||||||
|
|
||||||
|
def juju_log(msg):
|
||||||
|
log('[cinder] %s' % msg)
|
||||||
|
|
||||||
|
|
||||||
|
def determine_packages():
|
||||||
|
'''
|
||||||
|
Determine list of packages required for the currently enabled services.
|
||||||
|
|
||||||
|
:returns: list of package names
|
||||||
|
'''
|
||||||
|
pkgs = copy(COMMON_PACKAGES)
|
||||||
|
for s, p in [('api', API_PACKAGES),
|
||||||
|
('volume', VOLUME_PACKAGES),
|
||||||
|
('scheduler', SCHEDULER_PACKAGES)]:
|
||||||
|
if service_enabled(s):
|
||||||
|
pkgs += p
|
||||||
|
return pkgs
|
||||||
|
|
||||||
|
|
||||||
|
def service_enabled(service):
|
||||||
|
'''
|
||||||
|
Determine if a specific cinder service is enabled in charm configuration.
|
||||||
|
|
||||||
|
:param service: str: cinder service name to query (volume, scheduler, api,
|
||||||
|
all)
|
||||||
|
|
||||||
|
:returns: boolean: True if service is enabled in config, False if not.
|
||||||
|
'''
|
||||||
|
enabled = config()['enabled-services']
|
||||||
|
if enabled == 'all':
|
||||||
|
return True
|
||||||
|
return service in enabled
|
||||||
|
|
||||||
|
|
||||||
|
def restart_map():
|
||||||
|
'''
|
||||||
|
Determine the correct resource map to be passed to
|
||||||
|
charmhelpers.core.restart_on_change() based on the services configured.
|
||||||
|
|
||||||
|
:returns: dict: A dictionary mapping config file to lists of services
|
||||||
|
that should be restarted when file changes.
|
||||||
|
'''
|
||||||
|
_map = []
|
||||||
|
for f, ctxt in CONFIG_FILES.iteritems():
|
||||||
|
svcs = []
|
||||||
|
for svc in ctxt['services']:
|
||||||
|
if svc.startswith('cinder-'):
|
||||||
|
if service_enabled(svc.split('-')[1]):
|
||||||
|
svcs.append(svc)
|
||||||
|
else:
|
||||||
|
svcs.append(svc)
|
||||||
|
if svcs:
|
||||||
|
_map.append((f, svcs))
|
||||||
|
return OrderedDict(_map)
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_lvm_storage(block_device, volume_group):
|
||||||
|
'''
|
||||||
|
Ensures block_device is initialized as a LVM PV and creates volume_group.
|
||||||
|
Assumes block device is clean and will raise if storage is already
|
||||||
|
initialized as a PV.
|
||||||
|
|
||||||
|
:param block_device: str: Full path to block device to be prepared.
|
||||||
|
:param volume_group: str: Name of volume group to be created with
|
||||||
|
block_device as backing PV.
|
||||||
|
|
||||||
|
:returns: None or raises CinderCharmError if storage is unclean.
|
||||||
|
'''
|
||||||
|
e = None
|
||||||
|
if is_lvm_physical_volume(block_device):
|
||||||
|
juju_log('ERROR: Could not prepare LVM storage: %s is already '
|
||||||
|
'initialized as LVM physical device.' % block_device)
|
||||||
|
raise CinderCharmError
|
||||||
|
|
||||||
|
try:
|
||||||
|
create_lvm_physical_volume(block_device)
|
||||||
|
create_lvm_volume_group(volume_group, block_device)
|
||||||
|
except Exception as e:
|
||||||
|
juju_log('Could not prepare LVM storage on %s.' % block_device)
|
||||||
|
juju_log(e)
|
||||||
|
raise CinderCharmError
|
||||||
|
|
||||||
|
|
||||||
|
def clean_storage(block_device):
|
||||||
|
'''
|
||||||
|
Ensures a block device is clean. That is:
|
||||||
|
- unmounted
|
||||||
|
- any lvm volume groups are deactivated
|
||||||
|
- any lvm physical device signatures removed
|
||||||
|
- partition table wiped
|
||||||
|
|
||||||
|
:param block_device: str: Full path to block device to clean.
|
||||||
|
'''
|
||||||
|
for mp, d in mounts():
|
||||||
|
if d == block_device:
|
||||||
|
juju_log('clean_storage(): Found %s mounted @ %s, unmounting.' %
|
||||||
|
(d, mp))
|
||||||
|
umount(mp, persist=True)
|
||||||
|
|
||||||
|
if is_lvm_physical_volume(block_device):
|
||||||
|
deactivate_lvm_volume_group(block_device)
|
||||||
|
remove_lvm_physical_volume(block_device)
|
||||||
|
else:
|
||||||
|
zap_disk(block_device)
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_block_device(block_device):
|
||||||
|
'''
|
||||||
|
Confirm block_device, create as loopback if necessary.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to ensure.
|
||||||
|
|
||||||
|
:returns: str: Full path of ensured block device.
|
||||||
|
'''
|
||||||
|
_none = ['None', 'none', None]
|
||||||
|
if (block_device in _none):
|
||||||
|
juju_log('prepare_storage(): Missing required input: '
|
||||||
|
'block_device=%s.' % block_device)
|
||||||
|
raise CinderCharmError
|
||||||
|
|
||||||
|
if block_device.startswith('/dev/'):
|
||||||
|
bdev = block_device
|
||||||
|
elif block_device.startswith('/'):
|
||||||
|
_bd = block_device.split('|')
|
||||||
|
if len(_bd) == 2:
|
||||||
|
bdev, size = _bd
|
||||||
|
else:
|
||||||
|
bdev = block_device
|
||||||
|
size = DEFAULT_LOOPBACK_SIZE
|
||||||
|
bdev = ensure_loopback_device(bdev, size)
|
||||||
|
else:
|
||||||
|
bdev = '/dev/%s' % block_device
|
||||||
|
|
||||||
|
if not is_block_device(bdev):
|
||||||
|
juju_log('Failed to locate valid block device at %s' % bdev)
|
||||||
|
raise CinderCharmError
|
||||||
|
|
||||||
|
return bdev
|
||||||
|
|
||||||
|
|
||||||
|
def migrate_database():
|
||||||
|
'''Runs cinder-manage to initialize a new database or migrate existing'''
|
||||||
|
cmd = ['cinder-manage', 'db', 'sync']
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_ceph_pool(service, replicas):
|
||||||
|
'''Creates a ceph pool for service if one does not exist'''
|
||||||
|
# TODO: Ditto about moving somewhere sharable.
|
||||||
|
if not ceph_pool_exists(service=service, name=service):
|
||||||
|
ceph_create_pool(service=service, name=service, replicas=replicas)
|
||||||
|
|
||||||
|
|
||||||
|
def set_ceph_env_variables(service):
|
||||||
|
# XXX: Horrid kludge to make cinder-volume use
|
||||||
|
# a different ceph username than admin
|
||||||
|
env = open('/etc/environment', 'r').read()
|
||||||
|
if 'CEPH_ARGS' not in env:
|
||||||
|
with open('/etc/environment', 'a') as out:
|
||||||
|
out.write('CEPH_ARGS="--id %s"\n' % service)
|
||||||
|
with open('/etc/init/cinder-volume.override', 'w') as out:
|
||||||
|
out.write('env CEPH_ARGS="--id %s"\n' % service)
|
||||||
|
|
||||||
|
|
||||||
|
def do_openstack_upgrade(configs):
|
||||||
|
"""
|
||||||
|
Perform an uprade of cinder. Takes care of upgrading packages, rewriting
|
||||||
|
configs + database migration and potentially any other post-upgrade
|
||||||
|
actions.
|
||||||
|
|
||||||
|
:param configs: The charms main OSConfigRenderer object.
|
||||||
|
|
||||||
|
"""
|
||||||
|
new_src = config('openstack-origin')
|
||||||
|
new_os_rel = get_os_codename_install_source(new_src)
|
||||||
|
|
||||||
|
juju_log('Performing OpenStack upgrade to %s.' % (new_os_rel))
|
||||||
|
|
||||||
|
configure_installation_source(new_src)
|
||||||
|
dpkg_opts = [
|
||||||
|
'--option', 'Dpkg::Options::=--force-confnew',
|
||||||
|
'--option', 'Dpkg::Options::=--force-confdef',
|
||||||
|
]
|
||||||
|
apt_update()
|
||||||
|
apt_install(packages=determine_packages(), options=dpkg_opts, fatal=True)
|
||||||
|
|
||||||
|
# set CONFIGS to load templates from new release and regenerate config
|
||||||
|
configs.set_release(openstack_release=new_os_rel)
|
||||||
|
configs.write_all()
|
||||||
|
|
||||||
|
if eligible_leader(CLUSTER_RES):
|
||||||
|
migrate_database()
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
1
hooks/identity-service-relation-broken
Symbolic link
1
hooks/identity-service-relation-broken
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
cinder_hooks.py
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
1
hooks/image-service-relation-broken
Symbolic link
1
hooks/image-service-relation-broken
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
cinder_hooks.py
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
@@ -1,781 +0,0 @@
|
|||||||
#!/bin/bash -e
|
|
||||||
|
|
||||||
# Common utility functions used across all OpenStack charms.
|
|
||||||
|
|
||||||
error_out() {
|
|
||||||
juju-log "$CHARM ERROR: $@"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
function service_ctl_status {
|
|
||||||
# Return 0 if a service is running, 1 otherwise.
|
|
||||||
local svc="$1"
|
|
||||||
local status=$(service $svc status | cut -d/ -f1 | awk '{ print $2 }')
|
|
||||||
case $status in
|
|
||||||
"start") return 0 ;;
|
|
||||||
"stop") return 1 ;;
|
|
||||||
*) error_out "Unexpected status of service $svc: $status" ;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
function service_ctl {
|
|
||||||
# control a specific service, or all (as defined by $SERVICES)
|
|
||||||
# service restarts will only occur depending on global $CONFIG_CHANGED,
|
|
||||||
# which should be updated in charm's set_or_update().
|
|
||||||
local config_changed=${CONFIG_CHANGED:-True}
|
|
||||||
if [[ $1 == "all" ]] ; then
|
|
||||||
ctl="$SERVICES"
|
|
||||||
else
|
|
||||||
ctl="$1"
|
|
||||||
fi
|
|
||||||
action="$2"
|
|
||||||
if [[ -z "$ctl" ]] || [[ -z "$action" ]] ; then
|
|
||||||
error_out "ERROR service_ctl: Not enough arguments"
|
|
||||||
fi
|
|
||||||
|
|
||||||
for i in $ctl ; do
|
|
||||||
case $action in
|
|
||||||
"start")
|
|
||||||
service_ctl_status $i || service $i start ;;
|
|
||||||
"stop")
|
|
||||||
service_ctl_status $i && service $i stop || return 0 ;;
|
|
||||||
"restart")
|
|
||||||
if [[ "$config_changed" == "True" ]] ; then
|
|
||||||
service_ctl_status $i && service $i restart || service $i start
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
if [[ $? != 0 ]] ; then
|
|
||||||
juju-log "$CHARM: service_ctl ERROR - Service $i failed to $action"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
# all configs should have been reloaded on restart of all services, reset
|
|
||||||
# flag if its being used.
|
|
||||||
if [[ "$action" == "restart" ]] && [[ -n "$CONFIG_CHANGED" ]] &&
|
|
||||||
[[ "$ctl" == "all" ]]; then
|
|
||||||
CONFIG_CHANGED="False"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function configure_install_source {
|
|
||||||
# Setup and configure installation source based on a config flag.
|
|
||||||
local src="$1"
|
|
||||||
|
|
||||||
# Default to installing from the main Ubuntu archive.
|
|
||||||
[[ $src == "distro" ]] || [[ -z "$src" ]] && return 0
|
|
||||||
|
|
||||||
. /etc/lsb-release
|
|
||||||
|
|
||||||
# standard 'ppa:someppa/name' format.
|
|
||||||
if [[ "${src:0:4}" == "ppa:" ]] ; then
|
|
||||||
juju-log "$CHARM: Configuring installation from custom src ($src)"
|
|
||||||
add-apt-repository -y "$src" || error_out "Could not configure PPA access."
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# standard 'deb http://url/ubuntu main' entries. gpg key ids must
|
|
||||||
# be appended to the end of url after a |, ie:
|
|
||||||
# 'deb http://url/ubuntu main|$GPGKEYID'
|
|
||||||
if [[ "${src:0:3}" == "deb" ]] ; then
|
|
||||||
juju-log "$CHARM: Configuring installation from custom src URL ($src)"
|
|
||||||
if echo "$src" | grep -q "|" ; then
|
|
||||||
# gpg key id tagged to end of url folloed by a |
|
|
||||||
url=$(echo $src | cut -d'|' -f1)
|
|
||||||
key=$(echo $src | cut -d'|' -f2)
|
|
||||||
juju-log "$CHARM: Importing repository key: $key"
|
|
||||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys "$key" || \
|
|
||||||
juju-log "$CHARM WARN: Could not import key from keyserver: $key"
|
|
||||||
else
|
|
||||||
juju-log "$CHARM No repository key specified."
|
|
||||||
url="$src"
|
|
||||||
fi
|
|
||||||
echo "$url" > /etc/apt/sources.list.d/juju_deb.list
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Cloud Archive
|
|
||||||
if [[ "${src:0:6}" == "cloud:" ]] ; then
|
|
||||||
|
|
||||||
# current os releases supported by the UCA.
|
|
||||||
local cloud_archive_versions="folsom grizzly"
|
|
||||||
|
|
||||||
local ca_rel=$(echo $src | cut -d: -f2)
|
|
||||||
local u_rel=$(echo $ca_rel | cut -d- -f1)
|
|
||||||
local os_rel=$(echo $ca_rel | cut -d- -f2 | cut -d/ -f1)
|
|
||||||
|
|
||||||
[[ "$u_rel" != "$DISTRIB_CODENAME" ]] &&
|
|
||||||
error_out "Cannot install from Cloud Archive pocket $src " \
|
|
||||||
"on this Ubuntu version ($DISTRIB_CODENAME)!"
|
|
||||||
|
|
||||||
valid_release=""
|
|
||||||
for rel in $cloud_archive_versions ; do
|
|
||||||
if [[ "$os_rel" == "$rel" ]] ; then
|
|
||||||
valid_release=1
|
|
||||||
juju-log "Installing OpenStack ($os_rel) from the Ubuntu Cloud Archive."
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if [[ -z "$valid_release" ]] ; then
|
|
||||||
error_out "OpenStack release ($os_rel) not supported by "\
|
|
||||||
"the Ubuntu Cloud Archive."
|
|
||||||
fi
|
|
||||||
|
|
||||||
# CA staging repos are standard PPAs.
|
|
||||||
if echo $ca_rel | grep -q "staging" ; then
|
|
||||||
add-apt-repository -y ppa:ubuntu-cloud-archive/${os_rel}-staging
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# the others are LP-external deb repos.
|
|
||||||
case "$ca_rel" in
|
|
||||||
"$u_rel-$os_rel"|"$u_rel-$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;;
|
|
||||||
"$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;;
|
|
||||||
"$u_rel-$os_rel"|"$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;;
|
|
||||||
"$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;;
|
|
||||||
*) error_out "Invalid Cloud Archive repo specified: $src"
|
|
||||||
esac
|
|
||||||
|
|
||||||
apt-get -y install ubuntu-cloud-keyring
|
|
||||||
entry="deb http://ubuntu-cloud.archive.canonical.com/ubuntu $pocket main"
|
|
||||||
echo "$entry" \
|
|
||||||
>/etc/apt/sources.list.d/ubuntu-cloud-archive-$DISTRIB_CODENAME.list
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
error_out "Invalid installation source specified in config: $src"
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
get_os_codename_install_source() {
|
|
||||||
# derive the openstack release provided by a supported installation source.
|
|
||||||
local rel="$1"
|
|
||||||
local codename="unknown"
|
|
||||||
. /etc/lsb-release
|
|
||||||
|
|
||||||
# map ubuntu releases to the openstack version shipped with it.
|
|
||||||
if [[ "$rel" == "distro" ]] ; then
|
|
||||||
case "$DISTRIB_CODENAME" in
|
|
||||||
"oneiric") codename="diablo" ;;
|
|
||||||
"precise") codename="essex" ;;
|
|
||||||
"quantal") codename="folsom" ;;
|
|
||||||
"raring") codename="grizzly" ;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
|
|
||||||
# derive version from cloud archive strings.
|
|
||||||
if [[ "${rel:0:6}" == "cloud:" ]] ; then
|
|
||||||
rel=$(echo $rel | cut -d: -f2)
|
|
||||||
local u_rel=$(echo $rel | cut -d- -f1)
|
|
||||||
local ca_rel=$(echo $rel | cut -d- -f2)
|
|
||||||
if [[ "$u_rel" == "$DISTRIB_CODENAME" ]] ; then
|
|
||||||
case "$ca_rel" in
|
|
||||||
"folsom"|"folsom/updates"|"folsom/proposed"|"folsom/staging")
|
|
||||||
codename="folsom" ;;
|
|
||||||
"grizzly"|"grizzly/updates"|"grizzly/proposed"|"grizzly/staging")
|
|
||||||
codename="grizzly" ;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# have a guess based on the deb string provided
|
|
||||||
if [[ "${rel:0:3}" == "deb" ]] || \
|
|
||||||
[[ "${rel:0:3}" == "ppa" ]] ; then
|
|
||||||
CODENAMES="diablo essex folsom grizzly havana"
|
|
||||||
for cname in $CODENAMES; do
|
|
||||||
if echo $rel | grep -q $cname; then
|
|
||||||
codename=$cname
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
echo $codename
|
|
||||||
}
|
|
||||||
|
|
||||||
get_os_codename_package() {
|
|
||||||
local pkg_vers=$(dpkg -l | grep "$1" | awk '{ print $3 }') || echo "none"
|
|
||||||
pkg_vers=$(echo $pkg_vers | cut -d: -f2) # epochs
|
|
||||||
case "${pkg_vers:0:6}" in
|
|
||||||
"2011.2") echo "diablo" ;;
|
|
||||||
"2012.1") echo "essex" ;;
|
|
||||||
"2012.2") echo "folsom" ;;
|
|
||||||
"2013.1") echo "grizzly" ;;
|
|
||||||
"2013.2") echo "havana" ;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
get_os_version_codename() {
|
|
||||||
case "$1" in
|
|
||||||
"diablo") echo "2011.2" ;;
|
|
||||||
"essex") echo "2012.1" ;;
|
|
||||||
"folsom") echo "2012.2" ;;
|
|
||||||
"grizzly") echo "2013.1" ;;
|
|
||||||
"havana") echo "2013.2" ;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
get_ip() {
|
|
||||||
dpkg -l | grep -q python-dnspython || {
|
|
||||||
apt-get -y install python-dnspython 2>&1 > /dev/null
|
|
||||||
}
|
|
||||||
hostname=$1
|
|
||||||
python -c "
|
|
||||||
import dns.resolver
|
|
||||||
import socket
|
|
||||||
try:
|
|
||||||
# Test to see if already an IPv4 address
|
|
||||||
socket.inet_aton('$hostname')
|
|
||||||
print '$hostname'
|
|
||||||
except socket.error:
|
|
||||||
try:
|
|
||||||
answers = dns.resolver.query('$hostname', 'A')
|
|
||||||
if answers:
|
|
||||||
print answers[0].address
|
|
||||||
except dns.resolver.NXDOMAIN:
|
|
||||||
pass
|
|
||||||
"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Common storage routines used by cinder, nova-volume and swift-storage.
|
|
||||||
clean_storage() {
|
|
||||||
# if configured to overwrite existing storage, we unmount the block-dev
|
|
||||||
# if mounted and clear any previous pv signatures
|
|
||||||
local block_dev="$1"
|
|
||||||
juju-log "Cleaining storage '$block_dev'"
|
|
||||||
if grep -q "^$block_dev" /proc/mounts ; then
|
|
||||||
mp=$(grep "^$block_dev" /proc/mounts | awk '{ print $2 }')
|
|
||||||
juju-log "Unmounting $block_dev from $mp"
|
|
||||||
umount "$mp" || error_out "ERROR: Could not unmount storage from $mp"
|
|
||||||
fi
|
|
||||||
if pvdisplay "$block_dev" >/dev/null 2>&1 ; then
|
|
||||||
juju-log "Removing existing LVM PV signatures from $block_dev"
|
|
||||||
|
|
||||||
# deactivate any volgroups that may be built on this dev
|
|
||||||
vg=$(pvdisplay $block_dev | grep "VG Name" | awk '{ print $3 }')
|
|
||||||
if [[ -n "$vg" ]] ; then
|
|
||||||
juju-log "Deactivating existing volume group: $vg"
|
|
||||||
vgchange -an "$vg" ||
|
|
||||||
error_out "ERROR: Could not deactivate volgroup $vg. Is it in use?"
|
|
||||||
fi
|
|
||||||
echo "yes" | pvremove -ff "$block_dev" ||
|
|
||||||
error_out "Could not pvremove $block_dev"
|
|
||||||
else
|
|
||||||
juju-log "Zapping disk of all GPT and MBR structures"
|
|
||||||
sgdisk --zap-all $block_dev ||
|
|
||||||
error_out "Unable to zap $block_dev"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function get_block_device() {
|
|
||||||
# given a string, return full path to the block device for that
|
|
||||||
# if input is not a block device, find a loopback device
|
|
||||||
local input="$1"
|
|
||||||
|
|
||||||
case "$input" in
|
|
||||||
/dev/*) [[ ! -b "$input" ]] && error_out "$input does not exist."
|
|
||||||
echo "$input"; return 0;;
|
|
||||||
/*) :;;
|
|
||||||
*) [[ ! -b "/dev/$input" ]] && error_out "/dev/$input does not exist."
|
|
||||||
echo "/dev/$input"; return 0;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# this represents a file
|
|
||||||
# support "/path/to/file|5G"
|
|
||||||
local fpath size oifs="$IFS"
|
|
||||||
if [ "${input#*|}" != "${input}" ]; then
|
|
||||||
size=${input##*|}
|
|
||||||
fpath=${input%|*}
|
|
||||||
else
|
|
||||||
fpath=${input}
|
|
||||||
size=5G
|
|
||||||
fi
|
|
||||||
|
|
||||||
## loop devices are not namespaced. This is bad for containers.
|
|
||||||
## it means that the output of 'losetup' may have the given $fpath
|
|
||||||
## in it, but that may not represent this containers $fpath, but
|
|
||||||
## another containers. To address that, we really need to
|
|
||||||
## allow some uniq container-id to be expanded within path.
|
|
||||||
## TODO: find a unique container-id that will be consistent for
|
|
||||||
## this container throughout its lifetime and expand it
|
|
||||||
## in the fpath.
|
|
||||||
# fpath=${fpath//%{id}/$THAT_ID}
|
|
||||||
|
|
||||||
local found=""
|
|
||||||
# parse through 'losetup -a' output, looking for this file
|
|
||||||
# output is expected to look like:
|
|
||||||
# /dev/loop0: [0807]:961814 (/tmp/my.img)
|
|
||||||
found=$(losetup -a |
|
|
||||||
awk 'BEGIN { found=0; }
|
|
||||||
$3 == f { sub(/:$/,"",$1); print $1; found=found+1; }
|
|
||||||
END { if( found == 0 || found == 1 ) { exit(0); }; exit(1); }' \
|
|
||||||
f="($fpath)")
|
|
||||||
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "multiple devices found for $fpath: $found" 1>&2
|
|
||||||
return 1;
|
|
||||||
fi
|
|
||||||
|
|
||||||
[ -n "$found" -a -b "$found" ] && { echo "$found"; return 1; }
|
|
||||||
|
|
||||||
if [ -n "$found" ]; then
|
|
||||||
echo "confused, $found is not a block device for $fpath";
|
|
||||||
return 1;
|
|
||||||
fi
|
|
||||||
|
|
||||||
# no existing device was found, create one
|
|
||||||
mkdir -p "${fpath%/*}"
|
|
||||||
truncate --size "$size" "$fpath" ||
|
|
||||||
{ echo "failed to create $fpath of size $size"; return 1; }
|
|
||||||
|
|
||||||
found=$(losetup --find --show "$fpath") ||
|
|
||||||
{ echo "failed to setup loop device for $fpath" 1>&2; return 1; }
|
|
||||||
|
|
||||||
echo "$found"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
HAPROXY_CFG=/etc/haproxy/haproxy.cfg
|
|
||||||
HAPROXY_DEFAULT=/etc/default/haproxy
|
|
||||||
##########################################################################
|
|
||||||
# Description: Configures HAProxy services for Openstack API's
|
|
||||||
# Parameters:
|
|
||||||
# Space delimited list of service:port:mode combinations for which
|
|
||||||
# haproxy service configuration should be generated for. The function
|
|
||||||
# assumes the name of the peer relation is 'cluster' and that every
|
|
||||||
# service unit in the peer relation is running the same services.
|
|
||||||
#
|
|
||||||
# Services that do not specify :mode in parameter will default to http.
|
|
||||||
#
|
|
||||||
# Example
|
|
||||||
# configure_haproxy cinder_api:8776:8756:tcp nova_api:8774:8764:http
|
|
||||||
##########################################################################
|
|
||||||
configure_haproxy() {
|
|
||||||
local address=`unit-get private-address`
|
|
||||||
local name=${JUJU_UNIT_NAME////-}
|
|
||||||
cat > $HAPROXY_CFG << EOF
|
|
||||||
global
|
|
||||||
log 127.0.0.1 local0
|
|
||||||
log 127.0.0.1 local1 notice
|
|
||||||
maxconn 20000
|
|
||||||
user haproxy
|
|
||||||
group haproxy
|
|
||||||
spread-checks 0
|
|
||||||
|
|
||||||
defaults
|
|
||||||
log global
|
|
||||||
mode http
|
|
||||||
option httplog
|
|
||||||
option dontlognull
|
|
||||||
retries 3
|
|
||||||
timeout queue 1000
|
|
||||||
timeout connect 1000
|
|
||||||
timeout client 30000
|
|
||||||
timeout server 30000
|
|
||||||
|
|
||||||
listen stats :8888
|
|
||||||
mode http
|
|
||||||
stats enable
|
|
||||||
stats hide-version
|
|
||||||
stats realm Haproxy\ Statistics
|
|
||||||
stats uri /
|
|
||||||
stats auth admin:password
|
|
||||||
|
|
||||||
EOF
|
|
||||||
for service in $@; do
|
|
||||||
local service_name=$(echo $service | cut -d : -f 1)
|
|
||||||
local haproxy_listen_port=$(echo $service | cut -d : -f 2)
|
|
||||||
local api_listen_port=$(echo $service | cut -d : -f 3)
|
|
||||||
local mode=$(echo $service | cut -d : -f 4)
|
|
||||||
[[ -z "$mode" ]] && mode="http"
|
|
||||||
juju-log "Adding haproxy configuration entry for $service "\
|
|
||||||
"($haproxy_listen_port -> $api_listen_port)"
|
|
||||||
cat >> $HAPROXY_CFG << EOF
|
|
||||||
listen $service_name 0.0.0.0:$haproxy_listen_port
|
|
||||||
balance roundrobin
|
|
||||||
mode $mode
|
|
||||||
option ${mode}log
|
|
||||||
server $name $address:$api_listen_port check
|
|
||||||
EOF
|
|
||||||
local r_id=""
|
|
||||||
local unit=""
|
|
||||||
for r_id in `relation-ids cluster`; do
|
|
||||||
for unit in `relation-list -r $r_id`; do
|
|
||||||
local unit_name=${unit////-}
|
|
||||||
local unit_address=`relation-get -r $r_id private-address $unit`
|
|
||||||
if [ -n "$unit_address" ]; then
|
|
||||||
echo " server $unit_name $unit_address:$api_listen_port check" \
|
|
||||||
>> $HAPROXY_CFG
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
done
|
|
||||||
done
|
|
||||||
echo "ENABLED=1" > $HAPROXY_DEFAULT
|
|
||||||
service haproxy restart
|
|
||||||
}
|
|
||||||
|
|
||||||
##########################################################################
|
|
||||||
# Description: Query HA interface to determine is cluster is configured
|
|
||||||
# Returns: 0 if configured, 1 if not configured
|
|
||||||
##########################################################################
|
|
||||||
is_clustered() {
|
|
||||||
local r_id=""
|
|
||||||
local unit=""
|
|
||||||
for r_id in $(relation-ids ha); do
|
|
||||||
if [ -n "$r_id" ]; then
|
|
||||||
for unit in $(relation-list -r $r_id); do
|
|
||||||
clustered=$(relation-get -r $r_id clustered $unit)
|
|
||||||
if [ -n "$clustered" ]; then
|
|
||||||
juju-log "Unit is haclustered"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
juju-log "Unit is not haclustered"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
##########################################################################
|
|
||||||
# Description: Return a list of all peers in cluster relations
|
|
||||||
##########################################################################
|
|
||||||
peer_units() {
|
|
||||||
local peers=""
|
|
||||||
local r_id=""
|
|
||||||
for r_id in $(relation-ids cluster); do
|
|
||||||
peers="$peers $(relation-list -r $r_id)"
|
|
||||||
done
|
|
||||||
echo $peers
|
|
||||||
}
|
|
||||||
|
|
||||||
##########################################################################
|
|
||||||
# Description: Determines whether the current unit is the oldest of all
|
|
||||||
# its peers - supports partial leader election
|
|
||||||
# Returns: 0 if oldest, 1 if not
|
|
||||||
##########################################################################
|
|
||||||
oldest_peer() {
|
|
||||||
peers=$1
|
|
||||||
local l_unit_no=$(echo $JUJU_UNIT_NAME | cut -d / -f 2)
|
|
||||||
for peer in $peers; do
|
|
||||||
echo "Comparing $JUJU_UNIT_NAME with peers: $peers"
|
|
||||||
local r_unit_no=$(echo $peer | cut -d / -f 2)
|
|
||||||
if (($r_unit_no<$l_unit_no)); then
|
|
||||||
juju-log "Not oldest peer; deferring"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
juju-log "Oldest peer; might take charge?"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
##########################################################################
|
|
||||||
# Description: Determines whether the current service units is the
|
|
||||||
# leader within a) a cluster of its peers or b) across a
|
|
||||||
# set of unclustered peers.
|
|
||||||
# Parameters: CRM resource to check ownership of if clustered
|
|
||||||
# Returns: 0 if leader, 1 if not
|
|
||||||
##########################################################################
|
|
||||||
eligible_leader() {
|
|
||||||
if is_clustered; then
|
|
||||||
if ! is_leader $1; then
|
|
||||||
juju-log 'Deferring action to CRM leader'
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
peers=$(peer_units)
|
|
||||||
if [ -n "$peers" ] && ! oldest_peer "$peers"; then
|
|
||||||
juju-log 'Deferring action to oldest service unit.'
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
##########################################################################
|
|
||||||
# Description: Query Cluster peer interface to see if peered
|
|
||||||
# Returns: 0 if peered, 1 if not peered
|
|
||||||
##########################################################################
|
|
||||||
is_peered() {
|
|
||||||
local r_id=$(relation-ids cluster)
|
|
||||||
if [ -n "$r_id" ]; then
|
|
||||||
if [ -n "$(relation-list -r $r_id)" ]; then
|
|
||||||
juju-log "Unit peered"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
juju-log "Unit not peered"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
##########################################################################
|
|
||||||
# Description: Determines whether host is owner of clustered services
|
|
||||||
# Parameters: Name of CRM resource to check ownership of
|
|
||||||
# Returns: 0 if leader, 1 if not leader
|
|
||||||
##########################################################################
|
|
||||||
is_leader() {
|
|
||||||
hostname=`hostname`
|
|
||||||
if [ -x /usr/sbin/crm ]; then
|
|
||||||
if crm resource show $1 | grep -q $hostname; then
|
|
||||||
juju-log "$hostname is cluster leader."
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
juju-log "$hostname is not cluster leader."
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
##########################################################################
|
|
||||||
# Description: Determines whether enough data has been provided in
|
|
||||||
# configuration or relation data to configure HTTPS.
|
|
||||||
# Parameters: None
|
|
||||||
# Returns: 0 if HTTPS can be configured, 1 if not.
|
|
||||||
##########################################################################
|
|
||||||
https() {
|
|
||||||
local r_id=""
|
|
||||||
if [[ -n "$(config-get ssl_cert)" ]] &&
|
|
||||||
[[ -n "$(config-get ssl_key)" ]] ; then
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
for r_id in $(relation-ids identity-service) ; do
|
|
||||||
for unit in $(relation-list -r $r_id) ; do
|
|
||||||
if [[ "$(relation-get -r $r_id https_keystone $unit)" == "True" ]] &&
|
|
||||||
[[ -n "$(relation-get -r $r_id ssl_cert $unit)" ]] &&
|
|
||||||
[[ -n "$(relation-get -r $r_id ssl_key $unit)" ]] &&
|
|
||||||
[[ -n "$(relation-get -r $r_id ca_cert $unit)" ]] ; then
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
done
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
##########################################################################
|
|
||||||
# Description: For a given number of port mappings, configures apache2
|
|
||||||
# HTTPs local reverse proxying using certficates and keys provided in
|
|
||||||
# either configuration data (preferred) or relation data. Assumes ports
|
|
||||||
# are not in use (calling charm should ensure that).
|
|
||||||
# Parameters: Variable number of proxy port mappings as
|
|
||||||
# $internal:$external.
|
|
||||||
# Returns: 0 if reverse proxy(s) have been configured, 0 if not.
|
|
||||||
##########################################################################
|
|
||||||
enable_https() {
|
|
||||||
local port_maps="$@"
|
|
||||||
local http_restart=""
|
|
||||||
juju-log "Enabling HTTPS for port mappings: $port_maps."
|
|
||||||
|
|
||||||
# allow overriding of keystone provided certs with those set manually
|
|
||||||
# in config.
|
|
||||||
local cert=$(config-get ssl_cert)
|
|
||||||
local key=$(config-get ssl_key)
|
|
||||||
local ca_cert=""
|
|
||||||
if [[ -z "$cert" ]] || [[ -z "$key" ]] ; then
|
|
||||||
juju-log "Inspecting identity-service relations for SSL certificate."
|
|
||||||
local r_id=""
|
|
||||||
cert=""
|
|
||||||
key=""
|
|
||||||
ca_cert=""
|
|
||||||
for r_id in $(relation-ids identity-service) ; do
|
|
||||||
for unit in $(relation-list -r $r_id) ; do
|
|
||||||
[[ -z "$cert" ]] && cert="$(relation-get -r $r_id ssl_cert $unit)"
|
|
||||||
[[ -z "$key" ]] && key="$(relation-get -r $r_id ssl_key $unit)"
|
|
||||||
[[ -z "$ca_cert" ]] && ca_cert="$(relation-get -r $r_id ca_cert $unit)"
|
|
||||||
done
|
|
||||||
done
|
|
||||||
[[ -n "$cert" ]] && cert=$(echo $cert | base64 -di)
|
|
||||||
[[ -n "$key" ]] && key=$(echo $key | base64 -di)
|
|
||||||
[[ -n "$ca_cert" ]] && ca_cert=$(echo $ca_cert | base64 -di)
|
|
||||||
else
|
|
||||||
juju-log "Using SSL certificate provided in service config."
|
|
||||||
fi
|
|
||||||
|
|
||||||
[[ -z "$cert" ]] || [[ -z "$key" ]] &&
|
|
||||||
juju-log "Expected but could not find SSL certificate data, not "\
|
|
||||||
"configuring HTTPS!" && return 1
|
|
||||||
|
|
||||||
apt-get -y install apache2
|
|
||||||
a2enmod ssl proxy proxy_http | grep -v "To activate the new configuration" &&
|
|
||||||
http_restart=1
|
|
||||||
|
|
||||||
mkdir -p /etc/apache2/ssl/$CHARM
|
|
||||||
echo "$cert" >/etc/apache2/ssl/$CHARM/cert
|
|
||||||
echo "$key" >/etc/apache2/ssl/$CHARM/key
|
|
||||||
if [[ -n "$ca_cert" ]] ; then
|
|
||||||
juju-log "Installing Keystone supplied CA cert."
|
|
||||||
echo "$ca_cert" >/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
|
|
||||||
update-ca-certificates --fresh
|
|
||||||
|
|
||||||
# XXX TODO: Find a better way of exporting this?
|
|
||||||
if [[ "$CHARM" == "nova-cloud-controller" ]] ; then
|
|
||||||
[[ -e /var/www/keystone_juju_ca_cert.crt ]] &&
|
|
||||||
rm -rf /var/www/keystone_juju_ca_cert.crt
|
|
||||||
ln -s /usr/local/share/ca-certificates/keystone_juju_ca_cert.crt \
|
|
||||||
/var/www/keystone_juju_ca_cert.crt
|
|
||||||
fi
|
|
||||||
|
|
||||||
fi
|
|
||||||
for port_map in $port_maps ; do
|
|
||||||
local ext_port=$(echo $port_map | cut -d: -f1)
|
|
||||||
local int_port=$(echo $port_map | cut -d: -f2)
|
|
||||||
juju-log "Creating apache2 reverse proxy vhost for $port_map."
|
|
||||||
cat >/etc/apache2/sites-available/${CHARM}_${ext_port} <<END
|
|
||||||
Listen $ext_port
|
|
||||||
NameVirtualHost *:$ext_port
|
|
||||||
<VirtualHost *:$ext_port>
|
|
||||||
ServerName $(unit-get private-address)
|
|
||||||
SSLEngine on
|
|
||||||
SSLCertificateFile /etc/apache2/ssl/$CHARM/cert
|
|
||||||
SSLCertificateKeyFile /etc/apache2/ssl/$CHARM/key
|
|
||||||
ProxyPass / http://localhost:$int_port/
|
|
||||||
ProxyPassReverse / http://localhost:$int_port/
|
|
||||||
ProxyPreserveHost on
|
|
||||||
</VirtualHost>
|
|
||||||
<Proxy *>
|
|
||||||
Order deny,allow
|
|
||||||
Allow from all
|
|
||||||
</Proxy>
|
|
||||||
<Location />
|
|
||||||
Order allow,deny
|
|
||||||
Allow from all
|
|
||||||
</Location>
|
|
||||||
END
|
|
||||||
a2ensite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" &&
|
|
||||||
http_restart=1
|
|
||||||
done
|
|
||||||
if [[ -n "$http_restart" ]] ; then
|
|
||||||
service apache2 restart
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
##########################################################################
|
|
||||||
# Description: Ensure HTTPS reverse proxying is disabled for given port
|
|
||||||
# mappings.
|
|
||||||
# Parameters: Variable number of proxy port mappings as
|
|
||||||
# $internal:$external.
|
|
||||||
# Returns: 0 if reverse proxy is not active for all portmaps, 1 on error.
|
|
||||||
##########################################################################
|
|
||||||
disable_https() {
|
|
||||||
local port_maps="$@"
|
|
||||||
local http_restart=""
|
|
||||||
juju-log "Ensuring HTTPS disabled for $port_maps."
|
|
||||||
( [[ ! -d /etc/apache2 ]] || [[ ! -d /etc/apache2/ssl/$CHARM ]] ) && return 0
|
|
||||||
for port_map in $port_maps ; do
|
|
||||||
local ext_port=$(echo $port_map | cut -d: -f1)
|
|
||||||
local int_port=$(echo $port_map | cut -d: -f2)
|
|
||||||
if [[ -e /etc/apache2/sites-available/${CHARM}_${ext_port} ]] ; then
|
|
||||||
juju-log "Disabling HTTPS reverse proxy for $CHARM $port_map."
|
|
||||||
a2dissite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" &&
|
|
||||||
http_restart=1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if [[ -n "$http_restart" ]] ; then
|
|
||||||
service apache2 restart
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
##########################################################################
|
|
||||||
# Description: Ensures HTTPS is either enabled or disabled for given port
|
|
||||||
# mapping.
|
|
||||||
# Parameters: Variable number of proxy port mappings as
|
|
||||||
# $internal:$external.
|
|
||||||
# Returns: 0 if HTTPS reverse proxy is in place, 1 if it is not.
|
|
||||||
##########################################################################
|
|
||||||
setup_https() {
|
|
||||||
# configure https via apache reverse proxying either
|
|
||||||
# using certs provided by config or keystone.
|
|
||||||
[[ -z "$CHARM" ]] &&
|
|
||||||
error_out "setup_https(): CHARM not set."
|
|
||||||
if ! https ; then
|
|
||||||
disable_https $@
|
|
||||||
else
|
|
||||||
enable_https $@
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
##########################################################################
|
|
||||||
# Description: Determine correct API server listening port based on
|
|
||||||
# existence of HTTPS reverse proxy and/or haproxy.
|
|
||||||
# Paremeters: The standard public port for given service.
|
|
||||||
# Returns: The correct listening port for API service.
|
|
||||||
##########################################################################
|
|
||||||
determine_api_port() {
|
|
||||||
local public_port="$1"
|
|
||||||
local i=0
|
|
||||||
( [[ -n "$(peer_units)" ]] || is_clustered >/dev/null 2>&1 ) && i=$[$i + 1]
|
|
||||||
https >/dev/null 2>&1 && i=$[$i + 1]
|
|
||||||
echo $[$public_port - $[$i * 10]]
|
|
||||||
}
|
|
||||||
|
|
||||||
##########################################################################
|
|
||||||
# Description: Determine correct proxy listening port based on public IP +
|
|
||||||
# existence of HTTPS reverse proxy.
|
|
||||||
# Paremeters: The standard public port for given service.
|
|
||||||
# Returns: The correct listening port for haproxy service public address.
|
|
||||||
##########################################################################
|
|
||||||
determine_haproxy_port() {
|
|
||||||
local public_port="$1"
|
|
||||||
local i=0
|
|
||||||
https >/dev/null 2>&1 && i=$[$i + 1]
|
|
||||||
echo $[$public_port - $[$i * 10]]
|
|
||||||
}
|
|
||||||
|
|
||||||
##########################################################################
|
|
||||||
# Description: Print the value for a given config option in an OpenStack
|
|
||||||
# .ini style configuration file.
|
|
||||||
# Parameters: File path, option to retrieve, optional
|
|
||||||
# section name (default=DEFAULT)
|
|
||||||
# Returns: Prints value if set, prints nothing otherwise.
|
|
||||||
##########################################################################
|
|
||||||
local_config_get() {
|
|
||||||
# return config values set in openstack .ini config files.
|
|
||||||
# default placeholders starting (eg, %AUTH_HOST%) treated as
|
|
||||||
# unset values.
|
|
||||||
local file="$1"
|
|
||||||
local option="$2"
|
|
||||||
local section="$3"
|
|
||||||
[[ -z "$section" ]] && section="DEFAULT"
|
|
||||||
python -c "
|
|
||||||
import ConfigParser
|
|
||||||
config = ConfigParser.RawConfigParser()
|
|
||||||
config.read('$file')
|
|
||||||
try:
|
|
||||||
value = config.get('$section', '$option')
|
|
||||||
except:
|
|
||||||
print ''
|
|
||||||
exit(0)
|
|
||||||
if value.startswith('%'): exit(0)
|
|
||||||
print value
|
|
||||||
"
|
|
||||||
}
|
|
||||||
|
|
||||||
##########################################################################
|
|
||||||
# Description: Creates an rc file exporting environment variables to a
|
|
||||||
# script_path local to the charm's installed directory.
|
|
||||||
# Any charm scripts run outside the juju hook environment can source this
|
|
||||||
# scriptrc to obtain updated config information necessary to perform health
|
|
||||||
# checks or service changes
|
|
||||||
#
|
|
||||||
# Parameters:
|
|
||||||
# An array of '=' delimited ENV_VAR:value combinations to export.
|
|
||||||
# If optional script_path key is not provided in the array, script_path
|
|
||||||
# defaults to scripts/scriptrc
|
|
||||||
##########################################################################
|
|
||||||
function save_script_rc {
|
|
||||||
if [ ! -n "$JUJU_UNIT_NAME" ]; then
|
|
||||||
echo "Error: Missing JUJU_UNIT_NAME environment variable"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
# our default unit_path
|
|
||||||
unit_path="$CHARM_DIR/scripts/scriptrc"
|
|
||||||
echo $unit_path
|
|
||||||
tmp_rc="/tmp/${JUJU_UNIT_NAME/\//-}rc"
|
|
||||||
|
|
||||||
echo "#!/bin/bash" > $tmp_rc
|
|
||||||
for env_var in "${@}"
|
|
||||||
do
|
|
||||||
if `echo $env_var | grep -q script_path`; then
|
|
||||||
# well then we need to reset the new unit-local script path
|
|
||||||
unit_path="$CHARM_DIR/${env_var/script_path=/}"
|
|
||||||
else
|
|
||||||
echo "export $env_var" >> $tmp_rc
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
chmod 755 $tmp_rc
|
|
||||||
mv $tmp_rc $unit_path
|
|
||||||
}
|
|
||||||
1
hooks/shared-db-relation-broken
Symbolic link
1
hooks/shared-db-relation-broken
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
cinder_hooks.py
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
@@ -1 +1 @@
|
|||||||
cinder-hooks
|
cinder_hooks.py
|
||||||
@@ -1 +0,0 @@
|
|||||||
cinder-hooks
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
cinder-hooks
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
cinder-hooks
|
|
||||||
@@ -3,6 +3,8 @@ summary: Cinder OpenStack starage service
|
|||||||
maintainer: Adam Gandelman <adamg@canonical.com>
|
maintainer: Adam Gandelman <adamg@canonical.com>
|
||||||
description: |
|
description: |
|
||||||
Cinder is a storage service for the Openstack project
|
Cinder is a storage service for the Openstack project
|
||||||
|
categories:
|
||||||
|
- miscellaneous
|
||||||
provides:
|
provides:
|
||||||
cinder-volume-service:
|
cinder-volume-service:
|
||||||
interface: cinder
|
interface: cinder
|
||||||
|
|||||||
@@ -1,13 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
service corosync start || /bin/true
|
|
||||||
sleep 2
|
|
||||||
while ! service pacemaker start; do
|
|
||||||
echo "Attempting to start pacemaker"
|
|
||||||
sleep 1;
|
|
||||||
done;
|
|
||||||
crm node online
|
|
||||||
sleep 2
|
|
||||||
while crm status | egrep -q 'Stopped$'; do
|
|
||||||
echo "Waiting for nodes to come online"
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
crm node standby
|
|
||||||
service pacemaker stop
|
|
||||||
service corosync stop
|
|
||||||
6
setup.cfg
Normal file
6
setup.cfg
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
[nosesetests]
|
||||||
|
verbosity=2
|
||||||
|
with-coverage=1
|
||||||
|
cover-erase=1
|
||||||
|
cover-package=hooks
|
||||||
|
|
||||||
42
templates/cinder.conf
Normal file
42
templates/cinder.conf
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
###############################################################################
|
||||||
|
# [ WARNING ]
|
||||||
|
# cinder configuration file maintained by Juju
|
||||||
|
# local changes may be overwritten.
|
||||||
|
###############################################################################
|
||||||
|
[DEFAULT]
|
||||||
|
rootwrap_config = /etc/cinder/rootwrap.conf
|
||||||
|
api_paste_confg = /etc/cinder/api-paste.ini
|
||||||
|
iscsi_helper = tgtadm
|
||||||
|
volume_name_template = volume-%s
|
||||||
|
volume_group = cinder-volumes
|
||||||
|
verbose = True
|
||||||
|
auth_strategy = keystone
|
||||||
|
state_path = /var/lib/cinder
|
||||||
|
lock_path = /var/lock/cinder
|
||||||
|
volumes_dir = /var/lib/cinder/volumes
|
||||||
|
{% if database_host -%}
|
||||||
|
sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}
|
||||||
|
{% endif -%}
|
||||||
|
{% if rabbitmq_host -%}
|
||||||
|
rabbit_host = {{ rabbitmq_host }}
|
||||||
|
rabbit_userid = {{ rabbitmq_user }}
|
||||||
|
rabbit_password = {{ rabbitmq_password }}
|
||||||
|
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
|
||||||
|
{% endif -%}
|
||||||
|
{% if volume_driver -%}
|
||||||
|
volume_driver = {{ volume_driver }}
|
||||||
|
{% endif -%}
|
||||||
|
{% if rbd_pool -%}
|
||||||
|
rbd_pool = {{ rbd_pool }}
|
||||||
|
host = {{ host }}
|
||||||
|
rbd_user = {{ rbd_user }}
|
||||||
|
{% endif -%}
|
||||||
|
{% if osapi_volume_listen_port -%}
|
||||||
|
osapi_volume_listen_port = {{ osapi_volume_listen_port }}
|
||||||
|
{% endif -%}
|
||||||
|
{% if glance_api_servers -%}
|
||||||
|
glance_api_servers = {{ glance_api_servers }}
|
||||||
|
{% endif -%}
|
||||||
|
{% if glance_api_version -%}
|
||||||
|
glance_api_version = {{ glance_api_version }}
|
||||||
|
{% endif -%}
|
||||||
60
templates/folsom/api-paste.ini
Normal file
60
templates/folsom/api-paste.ini
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
# folsom
|
||||||
|
###############################################################################
|
||||||
|
# [ WARNING ]
|
||||||
|
# cinder configuration file maintained by Juju
|
||||||
|
# local changes may be overwritten.
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
#############
|
||||||
|
# Openstack #
|
||||||
|
#############
|
||||||
|
|
||||||
|
[composite:osapi_volume]
|
||||||
|
use = call:cinder.api.openstack.urlmap:urlmap_factory
|
||||||
|
/: osvolumeversions
|
||||||
|
/v1: openstack_volume_api_v1
|
||||||
|
|
||||||
|
[composite:openstack_volume_api_v1]
|
||||||
|
use = call:cinder.api.auth:pipeline_factory
|
||||||
|
noauth = faultwrap sizelimit noauth osapi_volume_app_v1
|
||||||
|
keystone = faultwrap sizelimit authtoken keystonecontext osapi_volume_app_v1
|
||||||
|
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_volume_app_v1
|
||||||
|
|
||||||
|
[filter:faultwrap]
|
||||||
|
paste.filter_factory = cinder.api.openstack:FaultWrapper.factory
|
||||||
|
|
||||||
|
[filter:noauth]
|
||||||
|
paste.filter_factory = cinder.api.openstack.auth:NoAuthMiddleware.factory
|
||||||
|
|
||||||
|
[filter:sizelimit]
|
||||||
|
paste.filter_factory = cinder.api.sizelimit:RequestBodySizeLimiter.factory
|
||||||
|
|
||||||
|
[app:osapi_volume_app_v1]
|
||||||
|
paste.app_factory = cinder.api.openstack.volume:APIRouter.factory
|
||||||
|
|
||||||
|
[pipeline:osvolumeversions]
|
||||||
|
pipeline = faultwrap osvolumeversionapp
|
||||||
|
|
||||||
|
[app:osvolumeversionapp]
|
||||||
|
paste.app_factory = cinder.api.openstack.volume.versions:Versions.factory
|
||||||
|
|
||||||
|
##########
|
||||||
|
# Shared #
|
||||||
|
##########
|
||||||
|
|
||||||
|
[filter:keystonecontext]
|
||||||
|
paste.filter_factory = cinder.api.auth:CinderKeystoneContext.factory
|
||||||
|
|
||||||
|
[filter:authtoken]
|
||||||
|
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||||
|
{% if service_host -%}
|
||||||
|
service_protocol = {{ service_protocol }}
|
||||||
|
service_host = {{ service_host }}
|
||||||
|
service_port = {{ service_port }}
|
||||||
|
auth_host = {{ auth_host }}
|
||||||
|
auth_port = {{ auth_port }}
|
||||||
|
auth_protocol = {{ auth_protocol }}
|
||||||
|
admin_tenant_name = {{ admin_tenant_name }}
|
||||||
|
admin_user = {{ admin_user }}
|
||||||
|
admin_password = {{ admin_password }}
|
||||||
|
{% endif -%}
|
||||||
71
templates/grizzly/api-paste.ini
Normal file
71
templates/grizzly/api-paste.ini
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# grizzly
|
||||||
|
###############################################################################
|
||||||
|
# [ WARNING ]
|
||||||
|
# cinder configuration file maintained by Juju
|
||||||
|
# local changes may be overwritten.
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
#############
|
||||||
|
# OpenStack #
|
||||||
|
#############
|
||||||
|
|
||||||
|
[composite:osapi_volume]
|
||||||
|
use = call:cinder.api:root_app_factory
|
||||||
|
/: apiversions
|
||||||
|
/v1: openstack_volume_api_v1
|
||||||
|
/v2: openstack_volume_api_v2
|
||||||
|
|
||||||
|
[composite:openstack_volume_api_v1]
|
||||||
|
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||||
|
noauth = faultwrap sizelimit noauth apiv1
|
||||||
|
keystone = faultwrap sizelimit authtoken keystonecontext apiv1
|
||||||
|
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv1
|
||||||
|
|
||||||
|
[composite:openstack_volume_api_v2]
|
||||||
|
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||||
|
noauth = faultwrap sizelimit noauth apiv2
|
||||||
|
keystone = faultwrap sizelimit authtoken keystonecontext apiv2
|
||||||
|
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv2
|
||||||
|
|
||||||
|
[filter:faultwrap]
|
||||||
|
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
|
||||||
|
|
||||||
|
[filter:noauth]
|
||||||
|
paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
|
||||||
|
|
||||||
|
[filter:sizelimit]
|
||||||
|
paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
|
||||||
|
|
||||||
|
[app:apiv1]
|
||||||
|
paste.app_factory = cinder.api.v1.router:APIRouter.factory
|
||||||
|
|
||||||
|
[app:apiv2]
|
||||||
|
paste.app_factory = cinder.api.v2.router:APIRouter.factory
|
||||||
|
|
||||||
|
[pipeline:apiversions]
|
||||||
|
pipeline = faultwrap osvolumeversionapp
|
||||||
|
|
||||||
|
[app:osvolumeversionapp]
|
||||||
|
paste.app_factory = cinder.api.versions:Versions.factory
|
||||||
|
|
||||||
|
##########
|
||||||
|
# Shared #
|
||||||
|
##########
|
||||||
|
|
||||||
|
[filter:keystonecontext]
|
||||||
|
paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
|
||||||
|
|
||||||
|
[filter:authtoken]
|
||||||
|
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||||
|
{% if service_host -%}
|
||||||
|
service_protocol = {{ service_protocol }}
|
||||||
|
service_host = {{ service_host }}
|
||||||
|
service_port = {{ service_port }}
|
||||||
|
auth_host = {{ auth_host }}
|
||||||
|
auth_port = {{ auth_port }}
|
||||||
|
auth_protocol = {{ auth_protocol }}
|
||||||
|
admin_tenant_name = {{ admin_tenant_name }}
|
||||||
|
admin_user = {{ admin_user }}
|
||||||
|
admin_password = {{ admin_password }}
|
||||||
|
signing_dir = /var/lib/cinder
|
||||||
|
{% endif -%}
|
||||||
71
templates/havana/api-paste.ini
Normal file
71
templates/havana/api-paste.ini
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# havana
|
||||||
|
###############################################################################
|
||||||
|
# [ WARNING ]
|
||||||
|
# cinder configuration file maintained by Juju
|
||||||
|
# local changes may be overwritten.
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
#############
|
||||||
|
# OpenStack #
|
||||||
|
#############
|
||||||
|
|
||||||
|
[composite:osapi_volume]
|
||||||
|
use = call:cinder.api:root_app_factory
|
||||||
|
/: apiversions
|
||||||
|
/v1: openstack_volume_api_v1
|
||||||
|
/v2: openstack_volume_api_v2
|
||||||
|
|
||||||
|
[composite:openstack_volume_api_v1]
|
||||||
|
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||||
|
noauth = faultwrap sizelimit noauth apiv1
|
||||||
|
keystone = faultwrap sizelimit authtoken keystonecontext apiv1
|
||||||
|
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv1
|
||||||
|
|
||||||
|
[composite:openstack_volume_api_v2]
|
||||||
|
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||||
|
noauth = faultwrap sizelimit noauth apiv2
|
||||||
|
keystone = faultwrap sizelimit authtoken keystonecontext apiv2
|
||||||
|
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv2
|
||||||
|
|
||||||
|
[filter:faultwrap]
|
||||||
|
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
|
||||||
|
|
||||||
|
[filter:noauth]
|
||||||
|
paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
|
||||||
|
|
||||||
|
[filter:sizelimit]
|
||||||
|
paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
|
||||||
|
|
||||||
|
[app:apiv1]
|
||||||
|
paste.app_factory = cinder.api.v1.router:APIRouter.factory
|
||||||
|
|
||||||
|
[app:apiv2]
|
||||||
|
paste.app_factory = cinder.api.v2.router:APIRouter.factory
|
||||||
|
|
||||||
|
[pipeline:apiversions]
|
||||||
|
pipeline = faultwrap osvolumeversionapp
|
||||||
|
|
||||||
|
[app:osvolumeversionapp]
|
||||||
|
paste.app_factory = cinder.api.versions:Versions.factory
|
||||||
|
|
||||||
|
##########
|
||||||
|
# Shared #
|
||||||
|
##########
|
||||||
|
|
||||||
|
[filter:keystonecontext]
|
||||||
|
paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
|
||||||
|
|
||||||
|
[filter:authtoken]
|
||||||
|
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||||
|
{% if service_host -%}
|
||||||
|
auth_host = {{ auth_host }}
|
||||||
|
auth_port = {{ auth_port }}
|
||||||
|
auth_protocol = {{ auth_protocol }}
|
||||||
|
admin_tenant_name = {{ admin_tenant_name }}
|
||||||
|
admin_user = {{ admin_user }}
|
||||||
|
admin_password = {{ admin_password }}
|
||||||
|
# signing_dir is configurable, but the default behavior of the authtoken
|
||||||
|
# middleware should be sufficient. It will create a temporary directory
|
||||||
|
# in the home directory for the user the cinder process is running as.
|
||||||
|
#signing_dir = /var/lib/cinder/keystone-signing
|
||||||
|
{% endif -%}
|
||||||
2
unit_tests/__init__.py
Normal file
2
unit_tests/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
import sys
|
||||||
|
sys.path.append('hooks')
|
||||||
286
unit_tests/test_cinder_hooks.py
Normal file
286
unit_tests/test_cinder_hooks.py
Normal file
@@ -0,0 +1,286 @@
|
|||||||
|
|
||||||
|
from mock import MagicMock, patch, call
|
||||||
|
|
||||||
|
|
||||||
|
import cinder_utils as utils
|
||||||
|
|
||||||
|
from test_utils import (
|
||||||
|
CharmTestCase,
|
||||||
|
RESTART_MAP,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Need to do some early patching to get the module loaded.
|
||||||
|
_restart_map = utils.restart_map
|
||||||
|
_register_configs = utils.register_configs
|
||||||
|
|
||||||
|
utils.restart_map = MagicMock()
|
||||||
|
utils.restart_map.return_value = RESTART_MAP
|
||||||
|
utils.register_configs = MagicMock()
|
||||||
|
|
||||||
|
import cinder_hooks as hooks
|
||||||
|
|
||||||
|
# Unpatch it now that its loaded.
|
||||||
|
utils.restart_map = _restart_map
|
||||||
|
utils.register_configs = _register_configs
|
||||||
|
|
||||||
|
TO_PATCH = [
|
||||||
|
'check_call',
|
||||||
|
# cinder_utils
|
||||||
|
'clean_storage',
|
||||||
|
'determine_packages',
|
||||||
|
'do_openstack_upgrade',
|
||||||
|
'ensure_block_device',
|
||||||
|
'ensure_ceph_keyring',
|
||||||
|
'ensure_ceph_pool',
|
||||||
|
'juju_log',
|
||||||
|
'lsb_release',
|
||||||
|
'migrate_database',
|
||||||
|
'prepare_lvm_storage',
|
||||||
|
'register_configs',
|
||||||
|
'restart_map',
|
||||||
|
'service_enabled',
|
||||||
|
'set_ceph_env_variables',
|
||||||
|
'CONFIGS',
|
||||||
|
'CLUSTER_RES',
|
||||||
|
# charmhelpers.core.hookenv
|
||||||
|
'config',
|
||||||
|
'relation_get',
|
||||||
|
'relation_ids',
|
||||||
|
'relation_set',
|
||||||
|
'service_name',
|
||||||
|
'unit_get',
|
||||||
|
# charmhelpers.core.host
|
||||||
|
'apt_install',
|
||||||
|
'apt_update',
|
||||||
|
# charmhelpers.contrib.openstack.openstack_utils
|
||||||
|
'configure_installation_source',
|
||||||
|
'openstack_upgrade_available',
|
||||||
|
# charmhelpers.contrib.hahelpers.cluster_utils
|
||||||
|
'canonical_url',
|
||||||
|
'eligible_leader',
|
||||||
|
'is_leader',
|
||||||
|
'get_hacluster_config',
|
||||||
|
'execd_preinstall',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class TestInstallHook(CharmTestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super(TestInstallHook, self).setUp(hooks, TO_PATCH)
|
||||||
|
self.config.side_effect = self.test_config.get_all
|
||||||
|
|
||||||
|
def test_install_precise_distro(self):
|
||||||
|
'''It redirects to cloud archive if setup to install precise+distro'''
|
||||||
|
self.lsb_release.return_value = {'DISTRIB_CODENAME': 'precise'}
|
||||||
|
hooks.hooks.execute(['hooks/install'])
|
||||||
|
ca = 'cloud:precise-folsom'
|
||||||
|
self.configure_installation_source.assert_called_with(ca)
|
||||||
|
|
||||||
|
def test_correct_install_packages(self):
|
||||||
|
'''It installs the correct packages based on what is determined'''
|
||||||
|
self.determine_packages.return_value = ['foo', 'bar', 'baz']
|
||||||
|
hooks.hooks.execute(['hooks/install'])
|
||||||
|
self.apt_install.assert_called_with(['foo', 'bar', 'baz'], fatal=True)
|
||||||
|
|
||||||
|
def test_storage_prepared(self):
|
||||||
|
'''It prepares local storage if volume service enabled'''
|
||||||
|
self.test_config.set('block-device', 'vdb')
|
||||||
|
self.test_config.set('volume-group', 'cinder')
|
||||||
|
self.test_config.set('overwrite', 'true')
|
||||||
|
self.service_enabled.return_value = True
|
||||||
|
self.ensure_block_device.return_value = '/dev/vdb'
|
||||||
|
hooks.hooks.execute(['hooks/install'])
|
||||||
|
self.ensure_block_device.assert_called_with('vdb')
|
||||||
|
self.prepare_lvm_storage.assert_called_with('/dev/vdb', 'cinder')
|
||||||
|
|
||||||
|
def test_storage_not_prepared(self):
|
||||||
|
'''It does not prepare storage when not necessary'''
|
||||||
|
self.service_enabled.return_value = False
|
||||||
|
hooks.hooks.execute(['hooks/install'])
|
||||||
|
self.assertFalse(self.ensure_block_device.called)
|
||||||
|
self.service_enabled.return_value = True
|
||||||
|
for none in ['None', 'none', None]:
|
||||||
|
self.test_config.set('block-device', none)
|
||||||
|
hooks.hooks.execute(['hooks/install'])
|
||||||
|
self.assertFalse(self.ensure_block_device.called)
|
||||||
|
|
||||||
|
def test_storage_is_cleaned(self):
|
||||||
|
'''It cleans storage when configured to do so'''
|
||||||
|
self.ensure_block_device.return_value = '/dev/foo'
|
||||||
|
for true in ['True', 'true', True]:
|
||||||
|
self.test_config.set('overwrite', true)
|
||||||
|
hooks.hooks.execute(['hooks/install'])
|
||||||
|
self.clean_storage.assert_called_with('/dev/foo')
|
||||||
|
|
||||||
|
def test_storage_is_not_cleaned(self):
|
||||||
|
'''It does not clean storage when not configured to'''
|
||||||
|
self.ensure_block_device.return_value = '/dev/foo'
|
||||||
|
for true in ['False', 'false', False]:
|
||||||
|
self.test_config.set('overwrite', true)
|
||||||
|
hooks.hooks.execute(['hooks/install'])
|
||||||
|
self.assertFalse(self.clean_storage.called)
|
||||||
|
|
||||||
|
|
||||||
|
class TestChangedHooks(CharmTestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super(TestChangedHooks, self).setUp(hooks, TO_PATCH)
|
||||||
|
self.config.side_effect = self.test_config.get_all
|
||||||
|
|
||||||
|
@patch.object(hooks, 'configure_https')
|
||||||
|
def test_config_changed(self, conf_https):
|
||||||
|
'''It writes out all config'''
|
||||||
|
self.openstack_upgrade_available.return_value = False
|
||||||
|
hooks.hooks.execute(['hooks/config-changed'])
|
||||||
|
self.assertTrue(self.CONFIGS.write_all.called)
|
||||||
|
self.assertTrue(conf_https.called)
|
||||||
|
|
||||||
|
@patch.object(hooks, 'configure_https')
|
||||||
|
def test_config_changed_upgrade_available(self, conf_https):
|
||||||
|
'''It writes out all config with an available OS upgrade'''
|
||||||
|
self.openstack_upgrade_available.return_value = True
|
||||||
|
hooks.hooks.execute(['hooks/config-changed'])
|
||||||
|
self.do_openstack_upgrade.assert_called_with(configs=self.CONFIGS)
|
||||||
|
|
||||||
|
def test_db_changed(self):
|
||||||
|
'''It writes out cinder.conf on db changed'''
|
||||||
|
self.CONFIGS.complete_contexts.return_value = ['shared-db']
|
||||||
|
hooks.hooks.execute(['hooks/shared-db-relation-changed'])
|
||||||
|
self.CONFIGS.write.assert_called_with('/etc/cinder/cinder.conf')
|
||||||
|
self.assertTrue(self.migrate_database.called)
|
||||||
|
|
||||||
|
def test_db_changed_relation_incomplete(self):
|
||||||
|
'''It does not write out cinder.conf with incomplete shared-db rel'''
|
||||||
|
hooks.hooks.execute(['hooks/shared-db-relation-changed'])
|
||||||
|
self.assertFalse(self.CONFIGS.write.called)
|
||||||
|
self.assertFalse(self.migrate_database.called)
|
||||||
|
|
||||||
|
def test_db_changed_not_leader(self):
|
||||||
|
'''It does not migrate database when not leader'''
|
||||||
|
self.eligible_leader.return_value = False
|
||||||
|
self.CONFIGS.complete_contexts.return_value = ['shared-db']
|
||||||
|
hooks.hooks.execute(['hooks/shared-db-relation-changed'])
|
||||||
|
self.CONFIGS.write.assert_called_with('/etc/cinder/cinder.conf')
|
||||||
|
self.assertFalse(self.migrate_database.called)
|
||||||
|
|
||||||
|
def test_amqp_changed(self):
|
||||||
|
'''It writes out cinder.conf on amqp changed with complete relation'''
|
||||||
|
self.CONFIGS.complete_contexts.return_value = ['amqp']
|
||||||
|
hooks.hooks.execute(['hooks/amqp-relation-changed'])
|
||||||
|
self.CONFIGS.write.assert_called_with('/etc/cinder/cinder.conf')
|
||||||
|
|
||||||
|
def test_amqp_changed_incomplete(self):
|
||||||
|
'''It does not write out cinder.conf with incomplete relation'''
|
||||||
|
self.CONFIGS.complete_contexts.return_value = ['']
|
||||||
|
hooks.hooks.execute(['hooks/amqp-relation-changed'])
|
||||||
|
self.assertFalse(self.CONFIGS.write.called)
|
||||||
|
|
||||||
|
@patch.object(hooks, 'configure_https')
|
||||||
|
def test_identity_changed(self, conf_https):
|
||||||
|
'''It writes out api-paste.ini on identity-service changed'''
|
||||||
|
self.CONFIGS.complete_contexts.return_value = ['identity-service']
|
||||||
|
hooks.hooks.execute(['hooks/identity-service-relation-changed'])
|
||||||
|
self.CONFIGS.write.assert_called_with('/etc/cinder/api-paste.ini')
|
||||||
|
self.assertTrue(conf_https.called)
|
||||||
|
|
||||||
|
def test_identity_changed_incomplete(self):
|
||||||
|
'''It doesn't write api-paste.ini with incomplete identity-service'''
|
||||||
|
hooks.hooks.execute(['hooks/identity-service-relation-changed'])
|
||||||
|
self.assertFalse(self.CONFIGS.write.called)
|
||||||
|
|
||||||
|
@patch.object(hooks, 'identity_joined')
|
||||||
|
def test_configure_https_enable(self, identity_joined):
|
||||||
|
'''It enables https from hooks when we have https data'''
|
||||||
|
self.CONFIGS.complete_contexts.return_value = ['https']
|
||||||
|
self.relation_ids.return_value = ['identity-service:0']
|
||||||
|
hooks.configure_https()
|
||||||
|
cmd = ['a2ensite', 'openstack_https_frontend']
|
||||||
|
self.check_call.assert_called_with(cmd)
|
||||||
|
identity_joined.assert_called_with(rid='identity-service:0')
|
||||||
|
|
||||||
|
@patch.object(hooks, 'identity_joined')
|
||||||
|
def test_configure_https_disable(self, identity_joined):
|
||||||
|
'''It enables https from hooks when we have https data'''
|
||||||
|
self.CONFIGS.complete_contexts.return_value = []
|
||||||
|
self.relation_ids.return_value = ['identity-service:0']
|
||||||
|
hooks.configure_https()
|
||||||
|
cmd = ['a2dissite', 'openstack_https_frontend']
|
||||||
|
self.check_call.assert_called_with(cmd)
|
||||||
|
identity_joined.assert_called_with(rid='identity-service:0')
|
||||||
|
|
||||||
|
|
||||||
|
class TestJoinedHooks(CharmTestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super(TestJoinedHooks, self).setUp(hooks, TO_PATCH)
|
||||||
|
self.config.side_effect = self.test_config.get_all
|
||||||
|
|
||||||
|
def test_db_joined(self):
|
||||||
|
'''It properly requests access to a shared-db service'''
|
||||||
|
self.unit_get.return_value = 'cindernode1'
|
||||||
|
hooks.hooks.execute(['hooks/shared-db-relation-joined'])
|
||||||
|
expected = {'username': 'cinder',
|
||||||
|
'hostname': 'cindernode1', 'database': 'cinder'}
|
||||||
|
self.relation_set.assert_called_with(**expected)
|
||||||
|
|
||||||
|
def test_amqp_joined(self):
|
||||||
|
'''It properly requests access to an amqp service'''
|
||||||
|
hooks.hooks.execute(['hooks/amqp-relation-joined'])
|
||||||
|
self.relation_set.assert_called_with(username='cinder', vhost='cinder')
|
||||||
|
|
||||||
|
def test_identity_service_joined(self):
|
||||||
|
'''It properly requests unclustered endpoint via identity-service'''
|
||||||
|
self.unit_get.return_value = 'cindernode1'
|
||||||
|
self.canonical_url.return_value = 'http://cindernode1'
|
||||||
|
hooks.hooks.execute(['hooks/identity-service-relation-joined'])
|
||||||
|
expected = {
|
||||||
|
'service': 'cinder',
|
||||||
|
'region': 'RegionOne',
|
||||||
|
'public_url': 'http://cindernode1:8776/v1/$(tenant_id)s',
|
||||||
|
'admin_url': 'http://cindernode1:8776/v1/$(tenant_id)s',
|
||||||
|
'internal_url': 'http://cindernode1:8776/v1/$(tenant_id)s',
|
||||||
|
'relation_id': None,
|
||||||
|
}
|
||||||
|
self.relation_set.assert_called_with(**expected)
|
||||||
|
|
||||||
|
def test_identity_service_joined_no_leadership(self):
|
||||||
|
'''It does nothing on identity-joined when not eligible leader'''
|
||||||
|
self.eligible_leader.return_value = False
|
||||||
|
self.assertFalse(self.relation_set.called)
|
||||||
|
|
||||||
|
@patch('os.mkdir')
|
||||||
|
def test_ceph_joined(self, mkdir):
|
||||||
|
'''It correctly prepares for a ceph changed hook'''
|
||||||
|
with patch('os.path.isdir') as isdir:
|
||||||
|
isdir.return_value = False
|
||||||
|
hooks.hooks.execute(['hooks/ceph-relation-joined'])
|
||||||
|
mkdir.assert_called_with('/etc/ceph')
|
||||||
|
self.apt_install.assert_called_with('ceph-common', fatal=True)
|
||||||
|
|
||||||
|
def test_ceph_changed_no_key(self):
|
||||||
|
'''It does nothing when ceph key is not available'''
|
||||||
|
self.CONFIGS.complete_contexts.return_value = ['']
|
||||||
|
hooks.hooks.execute(['hooks/ceph-relation-changed'])
|
||||||
|
m = 'ceph relation incomplete. Peer not ready?'
|
||||||
|
self.juju_log.assert_called_with(m)
|
||||||
|
|
||||||
|
def test_ceph_changed(self):
|
||||||
|
'''It ensures ceph assets created on ceph changed'''
|
||||||
|
self.CONFIGS.complete_contexts.return_value = ['ceph']
|
||||||
|
self.service_name.return_value = 'cinder'
|
||||||
|
self.ensure_ceph_keyring.return_value = True
|
||||||
|
hooks.hooks.execute(['hooks/ceph-relation-changed'])
|
||||||
|
self.ensure_ceph_keyring.assert_called_with(service='cinder',
|
||||||
|
user='cinder',
|
||||||
|
group='cinder')
|
||||||
|
self.ensure_ceph_pool.assert_called_with(service='cinder', replicas=2)
|
||||||
|
for c in [call('/etc/ceph/ceph.conf'),
|
||||||
|
call('/etc/cinder/cinder.conf')]:
|
||||||
|
self.assertIn(c, self.CONFIGS.write.call_args_list)
|
||||||
|
self.set_ceph_env_variables.assert_called_with(service='cinder')
|
||||||
|
|
||||||
|
def test_ceph_changed_no_leadership(self):
|
||||||
|
'''It does not attempt to create ceph pool if not leader'''
|
||||||
|
self.eligible_leader.return_value = False
|
||||||
|
self.service_name.return_value = 'cinder'
|
||||||
|
self.ensure_ceph_keyring.return_value = True
|
||||||
|
hooks.hooks.execute(['hooks/ceph-relation-changed'])
|
||||||
|
self.assertFalse(self.ensure_ceph_pool.called)
|
||||||
223
unit_tests/test_cinder_utils.py
Normal file
223
unit_tests/test_cinder_utils.py
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
from mock import patch
|
||||||
|
|
||||||
|
from collections import OrderedDict
|
||||||
|
|
||||||
|
import cinder_utils as cinder_utils
|
||||||
|
|
||||||
|
from test_utils import (
|
||||||
|
CharmTestCase,
|
||||||
|
)
|
||||||
|
|
||||||
|
TO_PATCH = [
|
||||||
|
# helpers.core.hookenv
|
||||||
|
'config',
|
||||||
|
'log',
|
||||||
|
# helpers.core.host
|
||||||
|
'mounts',
|
||||||
|
'umount',
|
||||||
|
# ceph utils
|
||||||
|
'ceph_create_pool',
|
||||||
|
'ceph_pool_exists',
|
||||||
|
# storage_utils
|
||||||
|
'create_lvm_physical_volume',
|
||||||
|
'create_lvm_volume_group',
|
||||||
|
'deactivate_lvm_volume_group',
|
||||||
|
'is_lvm_physical_volume',
|
||||||
|
'relation_ids',
|
||||||
|
'remove_lvm_physical_volume',
|
||||||
|
'ensure_loopback_device',
|
||||||
|
'is_block_device',
|
||||||
|
'zap_disk',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
MOUNTS = [
|
||||||
|
['/mnt', '/dev/vdb']
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class TestCinderUtils(CharmTestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super(TestCinderUtils, self).setUp(cinder_utils, TO_PATCH)
|
||||||
|
self.config.side_effect = self.test_config.get_all
|
||||||
|
|
||||||
|
def svc_enabled(self, svc):
|
||||||
|
return svc in self.test_config.get('enabled-services')
|
||||||
|
|
||||||
|
def test_all_services_enabled(self):
|
||||||
|
'''It determines all services are enabled based on config'''
|
||||||
|
self.test_config.set('enabled-services', 'all')
|
||||||
|
enabled = []
|
||||||
|
for s in ['volume', 'api', 'scheduler']:
|
||||||
|
enabled.append(cinder_utils.service_enabled(s))
|
||||||
|
self.assertEquals(enabled, [True, True, True])
|
||||||
|
|
||||||
|
def test_service_enabled(self):
|
||||||
|
'''It determines services are enabled based on config'''
|
||||||
|
self.test_config.set('enabled-services', 'api,volume,scheduler')
|
||||||
|
self.assertTrue(cinder_utils.service_enabled('volume'))
|
||||||
|
|
||||||
|
def test_service_not_enabled(self):
|
||||||
|
'''It determines services are not enabled based on config'''
|
||||||
|
self.test_config.set('enabled-services', 'api,scheduler')
|
||||||
|
self.assertFalse(cinder_utils.service_enabled('volume'))
|
||||||
|
|
||||||
|
@patch('cinder_utils.service_enabled')
|
||||||
|
def test_determine_packages_all(self, service_enabled):
|
||||||
|
'''It determines all packages required when all services enabled'''
|
||||||
|
service_enabled.return_value = True
|
||||||
|
pkgs = cinder_utils.determine_packages()
|
||||||
|
self.assertEquals(sorted(pkgs),
|
||||||
|
sorted(cinder_utils.COMMON_PACKAGES +
|
||||||
|
cinder_utils.VOLUME_PACKAGES +
|
||||||
|
cinder_utils.API_PACKAGES +
|
||||||
|
cinder_utils.SCHEDULER_PACKAGES))
|
||||||
|
|
||||||
|
@patch('cinder_utils.service_enabled')
|
||||||
|
def test_determine_packages_subset(self, service_enabled):
|
||||||
|
'''It determines packages required for a subset of enabled services'''
|
||||||
|
service_enabled.side_effect = self.svc_enabled
|
||||||
|
|
||||||
|
self.test_config.set('enabled-services', 'api')
|
||||||
|
pkgs = cinder_utils.determine_packages()
|
||||||
|
common = cinder_utils.COMMON_PACKAGES
|
||||||
|
self.assertEquals(sorted(pkgs),
|
||||||
|
sorted(common + cinder_utils.API_PACKAGES))
|
||||||
|
self.test_config.set('enabled-services', 'volume')
|
||||||
|
pkgs = cinder_utils.determine_packages()
|
||||||
|
common = cinder_utils.COMMON_PACKAGES
|
||||||
|
self.assertEquals(sorted(pkgs),
|
||||||
|
sorted(common + cinder_utils.VOLUME_PACKAGES))
|
||||||
|
self.test_config.set('enabled-services', 'api,scheduler')
|
||||||
|
pkgs = cinder_utils.determine_packages()
|
||||||
|
common = cinder_utils.COMMON_PACKAGES
|
||||||
|
self.assertEquals(sorted(pkgs),
|
||||||
|
sorted(common + cinder_utils.API_PACKAGES +
|
||||||
|
cinder_utils.SCHEDULER_PACKAGES))
|
||||||
|
|
||||||
|
def test_creates_restart_map_all_enabled(self):
|
||||||
|
'''It creates correct restart map when all services enabled'''
|
||||||
|
ex_map = OrderedDict([
|
||||||
|
('/etc/cinder/cinder.conf', ['cinder-api', 'cinder-volume',
|
||||||
|
'cinder-scheduler', 'haproxy']),
|
||||||
|
('/etc/cinder/api-paste.ini', ['cinder-api']),
|
||||||
|
('/etc/ceph/ceph.conf', ['cinder-volume']),
|
||||||
|
('/etc/haproxy/haproxy.cfg', ['haproxy']),
|
||||||
|
('/etc/apache2/sites-available/openstack_https_frontend',
|
||||||
|
['apache2']),
|
||||||
|
('/etc/apache2/sites-available/openstack_https_frontend.conf',
|
||||||
|
['apache2']),
|
||||||
|
])
|
||||||
|
self.assertEquals(cinder_utils.restart_map(), ex_map)
|
||||||
|
|
||||||
|
@patch('cinder_utils.service_enabled')
|
||||||
|
def test_creates_restart_map_no_api(self, service_enabled):
|
||||||
|
'''It creates correct restart map with api disabled'''
|
||||||
|
service_enabled.side_effect = self.svc_enabled
|
||||||
|
self.test_config.set('enabled-services', 'scheduler,volume')
|
||||||
|
ex_map = OrderedDict([
|
||||||
|
('/etc/cinder/cinder.conf', ['cinder-volume', 'cinder-scheduler',
|
||||||
|
'haproxy']),
|
||||||
|
('/etc/ceph/ceph.conf', ['cinder-volume']),
|
||||||
|
('/etc/haproxy/haproxy.cfg', ['haproxy']),
|
||||||
|
('/etc/apache2/sites-available/openstack_https_frontend',
|
||||||
|
['apache2']),
|
||||||
|
('/etc/apache2/sites-available/openstack_https_frontend.conf',
|
||||||
|
['apache2']),
|
||||||
|
])
|
||||||
|
self.assertEquals(cinder_utils.restart_map(), ex_map)
|
||||||
|
|
||||||
|
@patch('cinder_utils.service_enabled')
|
||||||
|
def test_creates_restart_map_only_api(self, service_enabled):
|
||||||
|
'''It creates correct restart map with only api enabled'''
|
||||||
|
service_enabled.side_effect = self.svc_enabled
|
||||||
|
self.test_config.set('enabled-services', 'api')
|
||||||
|
ex_map = OrderedDict([
|
||||||
|
('/etc/cinder/cinder.conf', ['cinder-api', 'haproxy']),
|
||||||
|
('/etc/cinder/api-paste.ini', ['cinder-api']),
|
||||||
|
('/etc/haproxy/haproxy.cfg', ['haproxy']),
|
||||||
|
('/etc/apache2/sites-available/openstack_https_frontend',
|
||||||
|
['apache2']),
|
||||||
|
('/etc/apache2/sites-available/openstack_https_frontend.conf',
|
||||||
|
['apache2']),
|
||||||
|
])
|
||||||
|
self.assertEquals(cinder_utils.restart_map(), ex_map)
|
||||||
|
|
||||||
|
def test_ensure_block_device_bad_config(self):
|
||||||
|
'''It doesn't prepare storage with bad config'''
|
||||||
|
for none in ['None', 'none', None]:
|
||||||
|
self.assertRaises(cinder_utils.CinderCharmError,
|
||||||
|
cinder_utils.ensure_block_device,
|
||||||
|
block_device=none)
|
||||||
|
|
||||||
|
def test_ensure_block_device_loopback(self):
|
||||||
|
'''It ensures loopback device when checking block device'''
|
||||||
|
cinder_utils.ensure_block_device('/tmp/cinder.img')
|
||||||
|
ex_size = cinder_utils.DEFAULT_LOOPBACK_SIZE
|
||||||
|
self.ensure_loopback_device.assert_called_with('/tmp/cinder.img',
|
||||||
|
ex_size)
|
||||||
|
|
||||||
|
cinder_utils.ensure_block_device('/tmp/cinder-2.img|15G')
|
||||||
|
self.ensure_loopback_device.assert_called_with('/tmp/cinder-2.img',
|
||||||
|
'15G')
|
||||||
|
|
||||||
|
def test_ensure_standard_block_device(self):
|
||||||
|
'''It looks for storage at both relative and full device path'''
|
||||||
|
for dev in ['vdb', '/dev/vdb']:
|
||||||
|
cinder_utils.ensure_block_device(dev)
|
||||||
|
self.is_block_device.assert_called_with('/dev/vdb')
|
||||||
|
|
||||||
|
def test_ensure_nonexistent_block_device(self):
|
||||||
|
'''It will not ensure a non-existant block device'''
|
||||||
|
self.is_block_device.return_value = False
|
||||||
|
self.assertRaises(cinder_utils.CinderCharmError,
|
||||||
|
cinder_utils.ensure_block_device, 'foo')
|
||||||
|
|
||||||
|
def test_clean_storage_unmount(self):
|
||||||
|
'''It unmounts block device when cleaning storage'''
|
||||||
|
self.is_lvm_physical_volume.return_value = False
|
||||||
|
self.zap_disk.return_value = True
|
||||||
|
self.mounts.return_value = MOUNTS
|
||||||
|
cinder_utils.clean_storage('/dev/vdb')
|
||||||
|
self.umount.called_with('/dev/vdb', True)
|
||||||
|
|
||||||
|
def test_clean_storage_lvm_wipe(self):
|
||||||
|
'''It removes traces of LVM when cleaning storage'''
|
||||||
|
self.mounts.return_value = []
|
||||||
|
self.is_lvm_physical_volume.return_value = True
|
||||||
|
cinder_utils.clean_storage('/dev/vdb')
|
||||||
|
self.remove_lvm_physical_volume.assert_called_with('/dev/vdb')
|
||||||
|
self.deactivate_lvm_volume_group.assert_called_with('/dev/vdb')
|
||||||
|
|
||||||
|
def test_clean_storage_zap_disk(self):
|
||||||
|
'''It removes traces of LVM when cleaning storage'''
|
||||||
|
self.mounts.return_value = []
|
||||||
|
self.is_lvm_physical_volume.return_value = False
|
||||||
|
cinder_utils.clean_storage('/dev/vdb')
|
||||||
|
self.zap_disk.assert_called_with('/dev/vdb')
|
||||||
|
|
||||||
|
def test_prepare_lvm_storage_not_clean(self):
|
||||||
|
'''It errors when prepping non-clean LVM storage'''
|
||||||
|
self.is_lvm_physical_volume.return_value = True
|
||||||
|
self.assertRaises(cinder_utils.CinderCharmError,
|
||||||
|
cinder_utils.prepare_lvm_storage,
|
||||||
|
block_device='/dev/foobar',
|
||||||
|
volume_group='bar-vg')
|
||||||
|
|
||||||
|
def test_migrate_database(self):
|
||||||
|
'''It migrates database with cinder-manage'''
|
||||||
|
with patch('subprocess.check_call') as check_call:
|
||||||
|
cinder_utils.migrate_database()
|
||||||
|
check_call.assert_called_with(['cinder-manage', 'db', 'sync'])
|
||||||
|
|
||||||
|
def test_ensure_ceph_pool(self):
|
||||||
|
self.ceph_pool_exists.return_value = False
|
||||||
|
cinder_utils.ensure_ceph_pool(service='cinder', replicas=3)
|
||||||
|
self.ceph_create_pool.assert_called_with(service='cinder',
|
||||||
|
name='cinder',
|
||||||
|
replicas=3)
|
||||||
|
|
||||||
|
def test_ensure_ceph_pool_already_exists(self):
|
||||||
|
self.ceph_pool_exists.return_value = True
|
||||||
|
cinder_utils.ensure_ceph_pool(service='cinder', replicas=3)
|
||||||
|
self.assertFalse(self.ceph_create_pool.called)
|
||||||
107
unit_tests/test_cluster_hooks.py
Normal file
107
unit_tests/test_cluster_hooks.py
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
|
||||||
|
from mock import MagicMock, patch, call
|
||||||
|
|
||||||
|
import cinder_utils as utils
|
||||||
|
|
||||||
|
# Need to do some early patching to get the module loaded.
|
||||||
|
#_restart_map = utils.restart_map
|
||||||
|
_register_configs = utils.register_configs
|
||||||
|
_service_enabled = utils.service_enabled
|
||||||
|
utils.register_configs = MagicMock()
|
||||||
|
utils.service_enabled = MagicMock()
|
||||||
|
|
||||||
|
import cinder_hooks as hooks
|
||||||
|
|
||||||
|
# Unpatch it now that its loaded.
|
||||||
|
utils.register_configs = _register_configs
|
||||||
|
utils.service_enabled = _service_enabled
|
||||||
|
|
||||||
|
from test_utils import (
|
||||||
|
CharmTestCase,
|
||||||
|
RESTART_MAP,
|
||||||
|
)
|
||||||
|
|
||||||
|
TO_PATCH = [
|
||||||
|
# cinder_utils
|
||||||
|
'clean_storage',
|
||||||
|
'determine_packages',
|
||||||
|
'ensure_block_device',
|
||||||
|
'ensure_ceph_keyring',
|
||||||
|
'ensure_ceph_pool',
|
||||||
|
'juju_log',
|
||||||
|
'lsb_release',
|
||||||
|
'migrate_database',
|
||||||
|
'prepare_lvm_storage',
|
||||||
|
'register_configs',
|
||||||
|
'service_enabled',
|
||||||
|
'set_ceph_env_variables',
|
||||||
|
'CONFIGS',
|
||||||
|
'CLUSTER_RES',
|
||||||
|
# charmhelpers.core.hookenv
|
||||||
|
'config',
|
||||||
|
'relation_set',
|
||||||
|
'service_name',
|
||||||
|
'unit_get',
|
||||||
|
# charmhelpers.core.host
|
||||||
|
'apt_install',
|
||||||
|
'apt_update',
|
||||||
|
# charmhelpers.contrib.openstack.openstack_utils
|
||||||
|
'configure_installation_source',
|
||||||
|
# charmhelpers.contrib.hahelpers.cluster_utils
|
||||||
|
'eligible_leader',
|
||||||
|
'get_hacluster_config',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class TestClusterHooks(CharmTestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super(TestClusterHooks, self).setUp(hooks, TO_PATCH)
|
||||||
|
self.config.side_effect = self.test_config.get_all
|
||||||
|
|
||||||
|
@patch('charmhelpers.core.host.service')
|
||||||
|
@patch('charmhelpers.core.host.file_hash')
|
||||||
|
def test_cluster_hook(self, file_hash, service):
|
||||||
|
'''Ensure API restart before haproxy on cluster changed'''
|
||||||
|
# set first hash lookup on all files
|
||||||
|
side_effects = []
|
||||||
|
# set first hash lookup on all configs in restart_on_change
|
||||||
|
[side_effects.append('foo') for f in RESTART_MAP.keys()]
|
||||||
|
# set second hash lookup on all configs in restart_on_change
|
||||||
|
[side_effects.append('bar') for f in RESTART_MAP.keys()]
|
||||||
|
file_hash.side_effect = side_effects
|
||||||
|
hooks.hooks.execute(['hooks/cluster-relation-changed'])
|
||||||
|
ex = [
|
||||||
|
call('restart', 'cinder-api'),
|
||||||
|
call('restart', 'cinder-volume'),
|
||||||
|
call('restart', 'cinder-scheduler'),
|
||||||
|
call('restart', 'haproxy'),
|
||||||
|
call('restart', 'apache2')]
|
||||||
|
self.assertEquals(ex, service.call_args_list)
|
||||||
|
|
||||||
|
def test_ha_joined_complete_config(self):
|
||||||
|
'''Ensure hacluster subordinate receives all relevant config'''
|
||||||
|
conf = {
|
||||||
|
'ha-bindiface': 'eth100',
|
||||||
|
'ha-mcastport': '37373',
|
||||||
|
'vip': '192.168.25.163',
|
||||||
|
'vip_iface': 'eth101',
|
||||||
|
'vip_cidr': '19',
|
||||||
|
}
|
||||||
|
self.get_hacluster_config.return_value = conf
|
||||||
|
hooks.hooks.execute(['hooks/ha-relation-joined'])
|
||||||
|
ex_args = {
|
||||||
|
'corosync_mcastport': '37373',
|
||||||
|
'init_services': {'res_cinder_haproxy': 'haproxy'},
|
||||||
|
'resource_params': {
|
||||||
|
'res_cinder_vip':
|
||||||
|
'params ip="192.168.25.163" cidr_netmask="19" nic="eth101"',
|
||||||
|
'res_cinder_haproxy': 'op monitor interval="5s"'
|
||||||
|
},
|
||||||
|
'corosync_bindiface': 'eth100',
|
||||||
|
'clones': {'cl_cinder_haproxy': 'res_cinder_haproxy'},
|
||||||
|
'resources': {
|
||||||
|
'res_cinder_vip': 'ocf:heartbeat:IPaddr2',
|
||||||
|
'res_cinder_haproxy': 'lsb:haproxy'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.relation_set.assert_called_with(**ex_args)
|
||||||
110
unit_tests/test_utils.py
Normal file
110
unit_tests/test_utils.py
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
import logging
|
||||||
|
import unittest
|
||||||
|
import os
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from collections import OrderedDict
|
||||||
|
|
||||||
|
from mock import patch
|
||||||
|
|
||||||
|
|
||||||
|
RESTART_MAP = OrderedDict([
|
||||||
|
('/etc/cinder/cinder.conf',
|
||||||
|
['cinder-api', 'cinder-volume', 'cinder-scheduler', 'haproxy']),
|
||||||
|
('/etc/cinder/api-paste.ini', ['cinder-api']),
|
||||||
|
('/etc/ceph/ceph.conf', ['cinder-volume']),
|
||||||
|
('/etc/haproxy/haproxy.cfg', ['haproxy']),
|
||||||
|
('/etc/apache2/sites-available/openstack_https_frontend', ['apache2']),
|
||||||
|
('/etc/apache2/sites-available/openstack_https_frontend.conf', ['apache2'])
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
def load_config():
|
||||||
|
'''
|
||||||
|
Walk backwords from __file__ looking for config.yaml, load and return the
|
||||||
|
'options' section'
|
||||||
|
'''
|
||||||
|
config = None
|
||||||
|
f = __file__
|
||||||
|
while config is None:
|
||||||
|
d = os.path.dirname(f)
|
||||||
|
if os.path.isfile(os.path.join(d, 'config.yaml')):
|
||||||
|
config = os.path.join(d, 'config.yaml')
|
||||||
|
break
|
||||||
|
f = d
|
||||||
|
|
||||||
|
if not config:
|
||||||
|
logging.error('Could not find config.yaml in any parent directory '
|
||||||
|
'of %s. ' % file)
|
||||||
|
raise Exception
|
||||||
|
|
||||||
|
return yaml.safe_load(open(config).read())['options']
|
||||||
|
|
||||||
|
|
||||||
|
def get_default_config():
|
||||||
|
'''
|
||||||
|
Load default charm config from config.yaml return as a dict.
|
||||||
|
If no default is set in config.yaml, its value is None.
|
||||||
|
'''
|
||||||
|
default_config = {}
|
||||||
|
config = load_config()
|
||||||
|
for k, v in config.iteritems():
|
||||||
|
if 'default' in v:
|
||||||
|
default_config[k] = v['default']
|
||||||
|
else:
|
||||||
|
default_config[k] = None
|
||||||
|
return default_config
|
||||||
|
|
||||||
|
|
||||||
|
class CharmTestCase(unittest.TestCase):
|
||||||
|
def setUp(self, obj, patches):
|
||||||
|
super(CharmTestCase, self).setUp()
|
||||||
|
self.patches = patches
|
||||||
|
self.obj = obj
|
||||||
|
self.test_config = TestConfig()
|
||||||
|
self.test_relation = TestRelation()
|
||||||
|
self.patch_all()
|
||||||
|
|
||||||
|
def patch(self, method):
|
||||||
|
_m = patch.object(self.obj, method)
|
||||||
|
mock = _m.start()
|
||||||
|
self.addCleanup(_m.stop)
|
||||||
|
return mock
|
||||||
|
|
||||||
|
def patch_all(self):
|
||||||
|
for method in self.patches:
|
||||||
|
setattr(self, method, self.patch(method))
|
||||||
|
|
||||||
|
|
||||||
|
class TestConfig(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.config = get_default_config()
|
||||||
|
|
||||||
|
def get(self, attr):
|
||||||
|
try:
|
||||||
|
return self.config[attr]
|
||||||
|
except KeyError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_all(self):
|
||||||
|
return self.config
|
||||||
|
|
||||||
|
def set(self, attr, value):
|
||||||
|
if attr not in self.config:
|
||||||
|
raise KeyError
|
||||||
|
self.config[attr] = value
|
||||||
|
|
||||||
|
|
||||||
|
class TestRelation(object):
|
||||||
|
def __init__(self, relation_data={}):
|
||||||
|
self.relation_data = relation_data
|
||||||
|
|
||||||
|
def set(self, relation_data):
|
||||||
|
self.relation_data = relation_data
|
||||||
|
|
||||||
|
def get(self, attr=None, unit=None, rid=None):
|
||||||
|
if attr is None:
|
||||||
|
return self.relation_data
|
||||||
|
elif attr in self.relation_data:
|
||||||
|
return self.relation_data[attr]
|
||||||
|
return None
|
||||||
Reference in New Issue
Block a user