First commit of cinder-backup subordinate charm
This commit is contained in:
		
							
								
								
									
										2
									
								
								.bzrignore
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								.bzrignore
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,2 @@
 | 
			
		||||
bin
 | 
			
		||||
.coverage
 | 
			
		||||
							
								
								
									
										6
									
								
								.coveragerc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								.coveragerc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,6 @@
 | 
			
		||||
[report]
 | 
			
		||||
# Regexes for lines to exclude from consideration
 | 
			
		||||
exclude_lines =
 | 
			
		||||
    if __name__ == .__main__.:
 | 
			
		||||
include=
 | 
			
		||||
    hooks/cinder_*
 | 
			
		||||
							
								
								
									
										21
									
								
								Makefile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								Makefile
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,21 @@
 | 
			
		||||
#!/usr/bin/make
 | 
			
		||||
PYTHON := /usr/bin/env python
 | 
			
		||||
 | 
			
		||||
lint:
 | 
			
		||||
	@flake8 --exclude hooks/charmhelpers hooks unit_tests
 | 
			
		||||
	@charm proof
 | 
			
		||||
 | 
			
		||||
unit_test:
 | 
			
		||||
	@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
 | 
			
		||||
 | 
			
		||||
bin/charm_helpers_sync.py:
 | 
			
		||||
	@mkdir -p bin
 | 
			
		||||
	@bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
 | 
			
		||||
        > bin/charm_helpers_sync.py
 | 
			
		||||
 | 
			
		||||
sync: bin/charm_helpers_sync.py
 | 
			
		||||
	@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
 | 
			
		||||
 | 
			
		||||
publish: lint unit_test
 | 
			
		||||
	bzr push lp:charms/cinder-backup
 | 
			
		||||
	bzr push lp:charms/trusty/cinder-backup
 | 
			
		||||
							
								
								
									
										18
									
								
								README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								README.md
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,18 @@
 | 
			
		||||
Cinder Backup Service
 | 
			
		||||
-------------------------------
 | 
			
		||||
 | 
			
		||||
Overview
 | 
			
		||||
========
 | 
			
		||||
 | 
			
		||||
This charm provides a 
 | 
			
		||||
 | 
			
		||||
To use:
 | 
			
		||||
 | 
			
		||||
    juju deploy cinder
 | 
			
		||||
    juju deploy -n 3 ceph
 | 
			
		||||
    juju deploy cinder-backup
 | 
			
		||||
    juju add-relation cinder-backup cinder
 | 
			
		||||
    juju add-relation cinder-backup ceph
 | 
			
		||||
 | 
			
		||||
Configuration
 | 
			
		||||
=============
 | 
			
		||||
							
								
								
									
										11
									
								
								charm-helpers-hooks.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								charm-helpers-hooks.yaml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,11 @@
 | 
			
		||||
branch: lp:charm-helpers
 | 
			
		||||
destination: hooks/charmhelpers
 | 
			
		||||
include:
 | 
			
		||||
    - core
 | 
			
		||||
    - fetch
 | 
			
		||||
    - contrib.openstack|inc=*
 | 
			
		||||
    - contrib.storage
 | 
			
		||||
    - contrib.hahelpers
 | 
			
		||||
    - contrib.network.ip
 | 
			
		||||
    - contrib.python.packages
 | 
			
		||||
    - payload.execd
 | 
			
		||||
							
								
								
									
										12
									
								
								config.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								config.yaml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,12 @@
 | 
			
		||||
options:
 | 
			
		||||
  ceph-osd-replication-count:
 | 
			
		||||
    default: 3
 | 
			
		||||
    type: int
 | 
			
		||||
    description: |
 | 
			
		||||
      This value dictates the number of replicas ceph must make of any
 | 
			
		||||
      object it stores withing the cinder rbd pool. Of course, this only
 | 
			
		||||
      applies if using Ceph as a backend store. Note that once the cinder
 | 
			
		||||
      rbd pool has been created, changing this value will not have any
 | 
			
		||||
      effect (although it can be changed in ceph by manually configuring
 | 
			
		||||
      your ceph cluster).
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										17
									
								
								copyright
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								copyright
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,17 @@
 | 
			
		||||
Format: http://dep.debian.net/deps/dep5/
 | 
			
		||||
 | 
			
		||||
Files: *
 | 
			
		||||
Copyright: Copyright 2015, Canonical Ltd., All Rights Reserved.
 | 
			
		||||
License: GPL-3
 | 
			
		||||
 This program is free software: you can redistribute it and/or modify
 | 
			
		||||
 it under the terms of the GNU General Public License as published by
 | 
			
		||||
 the Free Software Foundation, either version 3 of the License, or
 | 
			
		||||
 (at your option) any later version.
 | 
			
		||||
 .
 | 
			
		||||
 This program is distributed in the hope that it will be useful,
 | 
			
		||||
 but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
 GNU General Public License for more details.
 | 
			
		||||
 .
 | 
			
		||||
 You should have received a copy of the GNU General Public License
 | 
			
		||||
 along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
							
								
								
									
										0
									
								
								hooks/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								hooks/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
								
								
									
										1
									
								
								hooks/backup-backend-relation-broken
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								hooks/backup-backend-relation-broken
									
									
									
									
									
										Symbolic link
									
								
							@@ -0,0 +1 @@
 | 
			
		||||
cinder_backup_hooks.py
 | 
			
		||||
							
								
								
									
										1
									
								
								hooks/backup-backend-relation-changed
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								hooks/backup-backend-relation-changed
									
									
									
									
									
										Symbolic link
									
								
							@@ -0,0 +1 @@
 | 
			
		||||
cinder_backup_hooks.py
 | 
			
		||||
							
								
								
									
										1
									
								
								hooks/backup-backend-relation-joined
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								hooks/backup-backend-relation-joined
									
									
									
									
									
										Symbolic link
									
								
							@@ -0,0 +1 @@
 | 
			
		||||
cinder_backup_hooks.py
 | 
			
		||||
							
								
								
									
										1
									
								
								hooks/ceph-relation-broken
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								hooks/ceph-relation-broken
									
									
									
									
									
										Symbolic link
									
								
							@@ -0,0 +1 @@
 | 
			
		||||
cinder_backup_hooks.py
 | 
			
		||||
							
								
								
									
										1
									
								
								hooks/ceph-relation-changed
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								hooks/ceph-relation-changed
									
									
									
									
									
										Symbolic link
									
								
							@@ -0,0 +1 @@
 | 
			
		||||
cinder_backup_hooks.py
 | 
			
		||||
							
								
								
									
										1
									
								
								hooks/ceph-relation-joined
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								hooks/ceph-relation-joined
									
									
									
									
									
										Symbolic link
									
								
							@@ -0,0 +1 @@
 | 
			
		||||
cinder_backup_hooks.py
 | 
			
		||||
							
								
								
									
										38
									
								
								hooks/charmhelpers/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								hooks/charmhelpers/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,38 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
# Bootstrap charm-helpers, installing its dependencies if necessary using
 | 
			
		||||
# only standard libraries.
 | 
			
		||||
import subprocess
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import six  # flake8: noqa
 | 
			
		||||
except ImportError:
 | 
			
		||||
    if sys.version_info.major == 2:
 | 
			
		||||
        subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
 | 
			
		||||
    else:
 | 
			
		||||
        subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
 | 
			
		||||
    import six  # flake8: noqa
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import yaml  # flake8: noqa
 | 
			
		||||
except ImportError:
 | 
			
		||||
    if sys.version_info.major == 2:
 | 
			
		||||
        subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
 | 
			
		||||
    else:
 | 
			
		||||
        subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
 | 
			
		||||
    import yaml  # flake8: noqa
 | 
			
		||||
							
								
								
									
										15
									
								
								hooks/charmhelpers/contrib/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								hooks/charmhelpers/contrib/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,15 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
							
								
								
									
										15
									
								
								hooks/charmhelpers/contrib/hahelpers/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								hooks/charmhelpers/contrib/hahelpers/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,15 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
							
								
								
									
										82
									
								
								hooks/charmhelpers/contrib/hahelpers/apache.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										82
									
								
								hooks/charmhelpers/contrib/hahelpers/apache.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,82 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# Copyright 2012 Canonical Ltd.
 | 
			
		||||
#
 | 
			
		||||
# This file is sourced from lp:openstack-charm-helpers
 | 
			
		||||
#
 | 
			
		||||
# Authors:
 | 
			
		||||
#  James Page <james.page@ubuntu.com>
 | 
			
		||||
#  Adam Gandelman <adamg@ubuntu.com>
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
import subprocess
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    config as config_get,
 | 
			
		||||
    relation_get,
 | 
			
		||||
    relation_ids,
 | 
			
		||||
    related_units as relation_list,
 | 
			
		||||
    log,
 | 
			
		||||
    INFO,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_cert(cn=None):
 | 
			
		||||
    # TODO: deal with multiple https endpoints via charm config
 | 
			
		||||
    cert = config_get('ssl_cert')
 | 
			
		||||
    key = config_get('ssl_key')
 | 
			
		||||
    if not (cert and key):
 | 
			
		||||
        log("Inspecting identity-service relations for SSL certificate.",
 | 
			
		||||
            level=INFO)
 | 
			
		||||
        cert = key = None
 | 
			
		||||
        if cn:
 | 
			
		||||
            ssl_cert_attr = 'ssl_cert_{}'.format(cn)
 | 
			
		||||
            ssl_key_attr = 'ssl_key_{}'.format(cn)
 | 
			
		||||
        else:
 | 
			
		||||
            ssl_cert_attr = 'ssl_cert'
 | 
			
		||||
            ssl_key_attr = 'ssl_key'
 | 
			
		||||
        for r_id in relation_ids('identity-service'):
 | 
			
		||||
            for unit in relation_list(r_id):
 | 
			
		||||
                if not cert:
 | 
			
		||||
                    cert = relation_get(ssl_cert_attr,
 | 
			
		||||
                                        rid=r_id, unit=unit)
 | 
			
		||||
                if not key:
 | 
			
		||||
                    key = relation_get(ssl_key_attr,
 | 
			
		||||
                                       rid=r_id, unit=unit)
 | 
			
		||||
    return (cert, key)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_ca_cert():
 | 
			
		||||
    ca_cert = config_get('ssl_ca')
 | 
			
		||||
    if ca_cert is None:
 | 
			
		||||
        log("Inspecting identity-service relations for CA SSL certificate.",
 | 
			
		||||
            level=INFO)
 | 
			
		||||
        for r_id in relation_ids('identity-service'):
 | 
			
		||||
            for unit in relation_list(r_id):
 | 
			
		||||
                if ca_cert is None:
 | 
			
		||||
                    ca_cert = relation_get('ca_cert',
 | 
			
		||||
                                           rid=r_id, unit=unit)
 | 
			
		||||
    return ca_cert
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def install_ca_cert(ca_cert):
 | 
			
		||||
    if ca_cert:
 | 
			
		||||
        with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
 | 
			
		||||
                  'w') as crt:
 | 
			
		||||
            crt.write(ca_cert)
 | 
			
		||||
        subprocess.check_call(['update-ca-certificates', '--fresh'])
 | 
			
		||||
							
								
								
									
										316
									
								
								hooks/charmhelpers/contrib/hahelpers/cluster.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										316
									
								
								hooks/charmhelpers/contrib/hahelpers/cluster.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,316 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# Copyright 2012 Canonical Ltd.
 | 
			
		||||
#
 | 
			
		||||
# Authors:
 | 
			
		||||
#  James Page <james.page@ubuntu.com>
 | 
			
		||||
#  Adam Gandelman <adamg@ubuntu.com>
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
Helpers for clustering and determining "cluster leadership" and other
 | 
			
		||||
clustering-related helpers.
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
import subprocess
 | 
			
		||||
import os
 | 
			
		||||
 | 
			
		||||
from socket import gethostname as get_unit_hostname
 | 
			
		||||
 | 
			
		||||
import six
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    log,
 | 
			
		||||
    relation_ids,
 | 
			
		||||
    related_units as relation_list,
 | 
			
		||||
    relation_get,
 | 
			
		||||
    config as config_get,
 | 
			
		||||
    INFO,
 | 
			
		||||
    ERROR,
 | 
			
		||||
    WARNING,
 | 
			
		||||
    unit_get,
 | 
			
		||||
    is_leader as juju_is_leader
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.core.decorators import (
 | 
			
		||||
    retry_on_exception,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.core.strutils import (
 | 
			
		||||
    bool_from_string,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
DC_RESOURCE_NAME = 'DC'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class HAIncompleteConfig(Exception):
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class CRMResourceNotFound(Exception):
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class CRMDCNotFound(Exception):
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_elected_leader(resource):
 | 
			
		||||
    """
 | 
			
		||||
    Returns True if the charm executing this is the elected cluster leader.
 | 
			
		||||
 | 
			
		||||
    It relies on two mechanisms to determine leadership:
 | 
			
		||||
        1. If juju is sufficiently new and leadership election is supported,
 | 
			
		||||
        the is_leader command will be used.
 | 
			
		||||
        2. If the charm is part of a corosync cluster, call corosync to
 | 
			
		||||
        determine leadership.
 | 
			
		||||
        3. If the charm is not part of a corosync cluster, the leader is
 | 
			
		||||
        determined as being "the alive unit with the lowest unit numer". In
 | 
			
		||||
        other words, the oldest surviving unit.
 | 
			
		||||
    """
 | 
			
		||||
    try:
 | 
			
		||||
        return juju_is_leader()
 | 
			
		||||
    except NotImplementedError:
 | 
			
		||||
        log('Juju leadership election feature not enabled'
 | 
			
		||||
            ', using fallback support',
 | 
			
		||||
            level=WARNING)
 | 
			
		||||
 | 
			
		||||
    if is_clustered():
 | 
			
		||||
        if not is_crm_leader(resource):
 | 
			
		||||
            log('Deferring action to CRM leader.', level=INFO)
 | 
			
		||||
            return False
 | 
			
		||||
    else:
 | 
			
		||||
        peers = peer_units()
 | 
			
		||||
        if peers and not oldest_peer(peers):
 | 
			
		||||
            log('Deferring action to oldest service unit.', level=INFO)
 | 
			
		||||
            return False
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_clustered():
 | 
			
		||||
    for r_id in (relation_ids('ha') or []):
 | 
			
		||||
        for unit in (relation_list(r_id) or []):
 | 
			
		||||
            clustered = relation_get('clustered',
 | 
			
		||||
                                     rid=r_id,
 | 
			
		||||
                                     unit=unit)
 | 
			
		||||
            if clustered:
 | 
			
		||||
                return True
 | 
			
		||||
    return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_crm_dc():
 | 
			
		||||
    """
 | 
			
		||||
    Determine leadership by querying the pacemaker Designated Controller
 | 
			
		||||
    """
 | 
			
		||||
    cmd = ['crm', 'status']
 | 
			
		||||
    try:
 | 
			
		||||
        status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
 | 
			
		||||
        if not isinstance(status, six.text_type):
 | 
			
		||||
            status = six.text_type(status, "utf-8")
 | 
			
		||||
    except subprocess.CalledProcessError as ex:
 | 
			
		||||
        raise CRMDCNotFound(str(ex))
 | 
			
		||||
 | 
			
		||||
    current_dc = ''
 | 
			
		||||
    for line in status.split('\n'):
 | 
			
		||||
        if line.startswith('Current DC'):
 | 
			
		||||
            # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
 | 
			
		||||
            current_dc = line.split(':')[1].split()[0]
 | 
			
		||||
    if current_dc == get_unit_hostname():
 | 
			
		||||
        return True
 | 
			
		||||
    elif current_dc == 'NONE':
 | 
			
		||||
        raise CRMDCNotFound('Current DC: NONE')
 | 
			
		||||
 | 
			
		||||
    return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@retry_on_exception(5, base_delay=2,
 | 
			
		||||
                    exc_type=(CRMResourceNotFound, CRMDCNotFound))
 | 
			
		||||
def is_crm_leader(resource, retry=False):
 | 
			
		||||
    """
 | 
			
		||||
    Returns True if the charm calling this is the elected corosync leader,
 | 
			
		||||
    as returned by calling the external "crm" command.
 | 
			
		||||
 | 
			
		||||
    We allow this operation to be retried to avoid the possibility of getting a
 | 
			
		||||
    false negative. See LP #1396246 for more info.
 | 
			
		||||
    """
 | 
			
		||||
    if resource == DC_RESOURCE_NAME:
 | 
			
		||||
        return is_crm_dc()
 | 
			
		||||
    cmd = ['crm', 'resource', 'show', resource]
 | 
			
		||||
    try:
 | 
			
		||||
        status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
 | 
			
		||||
        if not isinstance(status, six.text_type):
 | 
			
		||||
            status = six.text_type(status, "utf-8")
 | 
			
		||||
    except subprocess.CalledProcessError:
 | 
			
		||||
        status = None
 | 
			
		||||
 | 
			
		||||
    if status and get_unit_hostname() in status:
 | 
			
		||||
        return True
 | 
			
		||||
 | 
			
		||||
    if status and "resource %s is NOT running" % (resource) in status:
 | 
			
		||||
        raise CRMResourceNotFound("CRM resource %s not found" % (resource))
 | 
			
		||||
 | 
			
		||||
    return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_leader(resource):
 | 
			
		||||
    log("is_leader is deprecated. Please consider using is_crm_leader "
 | 
			
		||||
        "instead.", level=WARNING)
 | 
			
		||||
    return is_crm_leader(resource)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def peer_units(peer_relation="cluster"):
 | 
			
		||||
    peers = []
 | 
			
		||||
    for r_id in (relation_ids(peer_relation) or []):
 | 
			
		||||
        for unit in (relation_list(r_id) or []):
 | 
			
		||||
            peers.append(unit)
 | 
			
		||||
    return peers
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def peer_ips(peer_relation='cluster', addr_key='private-address'):
 | 
			
		||||
    '''Return a dict of peers and their private-address'''
 | 
			
		||||
    peers = {}
 | 
			
		||||
    for r_id in relation_ids(peer_relation):
 | 
			
		||||
        for unit in relation_list(r_id):
 | 
			
		||||
            peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
 | 
			
		||||
    return peers
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def oldest_peer(peers):
 | 
			
		||||
    """Determines who the oldest peer is by comparing unit numbers."""
 | 
			
		||||
    local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
 | 
			
		||||
    for peer in peers:
 | 
			
		||||
        remote_unit_no = int(peer.split('/')[1])
 | 
			
		||||
        if remote_unit_no < local_unit_no:
 | 
			
		||||
            return False
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def eligible_leader(resource):
 | 
			
		||||
    log("eligible_leader is deprecated. Please consider using "
 | 
			
		||||
        "is_elected_leader instead.", level=WARNING)
 | 
			
		||||
    return is_elected_leader(resource)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def https():
 | 
			
		||||
    '''
 | 
			
		||||
    Determines whether enough data has been provided in configuration
 | 
			
		||||
    or relation data to configure HTTPS
 | 
			
		||||
    .
 | 
			
		||||
    returns: boolean
 | 
			
		||||
    '''
 | 
			
		||||
    use_https = config_get('use-https')
 | 
			
		||||
    if use_https and bool_from_string(use_https):
 | 
			
		||||
        return True
 | 
			
		||||
    if config_get('ssl_cert') and config_get('ssl_key'):
 | 
			
		||||
        return True
 | 
			
		||||
    for r_id in relation_ids('identity-service'):
 | 
			
		||||
        for unit in relation_list(r_id):
 | 
			
		||||
            # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
 | 
			
		||||
            rel_state = [
 | 
			
		||||
                relation_get('https_keystone', rid=r_id, unit=unit),
 | 
			
		||||
                relation_get('ca_cert', rid=r_id, unit=unit),
 | 
			
		||||
            ]
 | 
			
		||||
            # NOTE: works around (LP: #1203241)
 | 
			
		||||
            if (None not in rel_state) and ('' not in rel_state):
 | 
			
		||||
                return True
 | 
			
		||||
    return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def determine_api_port(public_port, singlenode_mode=False):
 | 
			
		||||
    '''
 | 
			
		||||
    Determine correct API server listening port based on
 | 
			
		||||
    existence of HTTPS reverse proxy and/or haproxy.
 | 
			
		||||
 | 
			
		||||
    public_port: int: standard public port for given service
 | 
			
		||||
 | 
			
		||||
    singlenode_mode: boolean: Shuffle ports when only a single unit is present
 | 
			
		||||
 | 
			
		||||
    returns: int: the correct listening port for the API service
 | 
			
		||||
    '''
 | 
			
		||||
    i = 0
 | 
			
		||||
    if singlenode_mode:
 | 
			
		||||
        i += 1
 | 
			
		||||
    elif len(peer_units()) > 0 or is_clustered():
 | 
			
		||||
        i += 1
 | 
			
		||||
    if https():
 | 
			
		||||
        i += 1
 | 
			
		||||
    return public_port - (i * 10)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def determine_apache_port(public_port, singlenode_mode=False):
 | 
			
		||||
    '''
 | 
			
		||||
    Description: Determine correct apache listening port based on public IP +
 | 
			
		||||
    state of the cluster.
 | 
			
		||||
 | 
			
		||||
    public_port: int: standard public port for given service
 | 
			
		||||
 | 
			
		||||
    singlenode_mode: boolean: Shuffle ports when only a single unit is present
 | 
			
		||||
 | 
			
		||||
    returns: int: the correct listening port for the HAProxy service
 | 
			
		||||
    '''
 | 
			
		||||
    i = 0
 | 
			
		||||
    if singlenode_mode:
 | 
			
		||||
        i += 1
 | 
			
		||||
    elif len(peer_units()) > 0 or is_clustered():
 | 
			
		||||
        i += 1
 | 
			
		||||
    return public_port - (i * 10)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_hacluster_config(exclude_keys=None):
 | 
			
		||||
    '''
 | 
			
		||||
    Obtains all relevant configuration from charm configuration required
 | 
			
		||||
    for initiating a relation to hacluster:
 | 
			
		||||
 | 
			
		||||
        ha-bindiface, ha-mcastport, vip
 | 
			
		||||
 | 
			
		||||
    param: exclude_keys: list of setting key(s) to be excluded.
 | 
			
		||||
    returns: dict: A dict containing settings keyed by setting name.
 | 
			
		||||
    raises: HAIncompleteConfig if settings are missing.
 | 
			
		||||
    '''
 | 
			
		||||
    settings = ['ha-bindiface', 'ha-mcastport', 'vip']
 | 
			
		||||
    conf = {}
 | 
			
		||||
    for setting in settings:
 | 
			
		||||
        if exclude_keys and setting in exclude_keys:
 | 
			
		||||
            continue
 | 
			
		||||
 | 
			
		||||
        conf[setting] = config_get(setting)
 | 
			
		||||
    missing = []
 | 
			
		||||
    [missing.append(s) for s, v in six.iteritems(conf) if v is None]
 | 
			
		||||
    if missing:
 | 
			
		||||
        log('Insufficient config data to configure hacluster.', level=ERROR)
 | 
			
		||||
        raise HAIncompleteConfig
 | 
			
		||||
    return conf
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def canonical_url(configs, vip_setting='vip'):
 | 
			
		||||
    '''
 | 
			
		||||
    Returns the correct HTTP URL to this host given the state of HTTPS
 | 
			
		||||
    configuration and hacluster.
 | 
			
		||||
 | 
			
		||||
    :configs    : OSTemplateRenderer: A config tempating object to inspect for
 | 
			
		||||
                                      a complete https context.
 | 
			
		||||
 | 
			
		||||
    :vip_setting:                str: Setting in charm config that specifies
 | 
			
		||||
                                      VIP address.
 | 
			
		||||
    '''
 | 
			
		||||
    scheme = 'http'
 | 
			
		||||
    if 'https' in configs.complete_contexts():
 | 
			
		||||
        scheme = 'https'
 | 
			
		||||
    if is_clustered():
 | 
			
		||||
        addr = config_get(vip_setting)
 | 
			
		||||
    else:
 | 
			
		||||
        addr = unit_get('private-address')
 | 
			
		||||
    return '%s://%s' % (scheme, addr)
 | 
			
		||||
							
								
								
									
										15
									
								
								hooks/charmhelpers/contrib/network/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								hooks/charmhelpers/contrib/network/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,15 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
							
								
								
									
										456
									
								
								hooks/charmhelpers/contrib/network/ip.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										456
									
								
								hooks/charmhelpers/contrib/network/ip.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,456 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import glob
 | 
			
		||||
import re
 | 
			
		||||
import subprocess
 | 
			
		||||
import six
 | 
			
		||||
import socket
 | 
			
		||||
 | 
			
		||||
from functools import partial
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.hookenv import unit_get
 | 
			
		||||
from charmhelpers.fetch import apt_install, apt_update
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    log,
 | 
			
		||||
    WARNING,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import netifaces
 | 
			
		||||
except ImportError:
 | 
			
		||||
    apt_update(fatal=True)
 | 
			
		||||
    apt_install('python-netifaces', fatal=True)
 | 
			
		||||
    import netifaces
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import netaddr
 | 
			
		||||
except ImportError:
 | 
			
		||||
    apt_update(fatal=True)
 | 
			
		||||
    apt_install('python-netaddr', fatal=True)
 | 
			
		||||
    import netaddr
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _validate_cidr(network):
 | 
			
		||||
    try:
 | 
			
		||||
        netaddr.IPNetwork(network)
 | 
			
		||||
    except (netaddr.core.AddrFormatError, ValueError):
 | 
			
		||||
        raise ValueError("Network (%s) is not in CIDR presentation format" %
 | 
			
		||||
                         network)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def no_ip_found_error_out(network):
 | 
			
		||||
    errmsg = ("No IP address found in network: %s" % network)
 | 
			
		||||
    raise ValueError(errmsg)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_address_in_network(network, fallback=None, fatal=False):
 | 
			
		||||
    """Get an IPv4 or IPv6 address within the network from the host.
 | 
			
		||||
 | 
			
		||||
    :param network (str): CIDR presentation format. For example,
 | 
			
		||||
        '192.168.1.0/24'.
 | 
			
		||||
    :param fallback (str): If no address is found, return fallback.
 | 
			
		||||
    :param fatal (boolean): If no address is found, fallback is not
 | 
			
		||||
        set and fatal is True then exit(1).
 | 
			
		||||
    """
 | 
			
		||||
    if network is None:
 | 
			
		||||
        if fallback is not None:
 | 
			
		||||
            return fallback
 | 
			
		||||
 | 
			
		||||
        if fatal:
 | 
			
		||||
            no_ip_found_error_out(network)
 | 
			
		||||
        else:
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
    _validate_cidr(network)
 | 
			
		||||
    network = netaddr.IPNetwork(network)
 | 
			
		||||
    for iface in netifaces.interfaces():
 | 
			
		||||
        addresses = netifaces.ifaddresses(iface)
 | 
			
		||||
        if network.version == 4 and netifaces.AF_INET in addresses:
 | 
			
		||||
            addr = addresses[netifaces.AF_INET][0]['addr']
 | 
			
		||||
            netmask = addresses[netifaces.AF_INET][0]['netmask']
 | 
			
		||||
            cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
 | 
			
		||||
            if cidr in network:
 | 
			
		||||
                return str(cidr.ip)
 | 
			
		||||
 | 
			
		||||
        if network.version == 6 and netifaces.AF_INET6 in addresses:
 | 
			
		||||
            for addr in addresses[netifaces.AF_INET6]:
 | 
			
		||||
                if not addr['addr'].startswith('fe80'):
 | 
			
		||||
                    cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
 | 
			
		||||
                                                        addr['netmask']))
 | 
			
		||||
                    if cidr in network:
 | 
			
		||||
                        return str(cidr.ip)
 | 
			
		||||
 | 
			
		||||
    if fallback is not None:
 | 
			
		||||
        return fallback
 | 
			
		||||
 | 
			
		||||
    if fatal:
 | 
			
		||||
        no_ip_found_error_out(network)
 | 
			
		||||
 | 
			
		||||
    return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_ipv6(address):
 | 
			
		||||
    """Determine whether provided address is IPv6 or not."""
 | 
			
		||||
    try:
 | 
			
		||||
        address = netaddr.IPAddress(address)
 | 
			
		||||
    except netaddr.AddrFormatError:
 | 
			
		||||
        # probably a hostname - so not an address at all!
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
    return address.version == 6
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_address_in_network(network, address):
 | 
			
		||||
    """
 | 
			
		||||
    Determine whether the provided address is within a network range.
 | 
			
		||||
 | 
			
		||||
    :param network (str): CIDR presentation format. For example,
 | 
			
		||||
        '192.168.1.0/24'.
 | 
			
		||||
    :param address: An individual IPv4 or IPv6 address without a net
 | 
			
		||||
        mask or subnet prefix. For example, '192.168.1.1'.
 | 
			
		||||
    :returns boolean: Flag indicating whether address is in network.
 | 
			
		||||
    """
 | 
			
		||||
    try:
 | 
			
		||||
        network = netaddr.IPNetwork(network)
 | 
			
		||||
    except (netaddr.core.AddrFormatError, ValueError):
 | 
			
		||||
        raise ValueError("Network (%s) is not in CIDR presentation format" %
 | 
			
		||||
                         network)
 | 
			
		||||
 | 
			
		||||
    try:
 | 
			
		||||
        address = netaddr.IPAddress(address)
 | 
			
		||||
    except (netaddr.core.AddrFormatError, ValueError):
 | 
			
		||||
        raise ValueError("Address (%s) is not in correct presentation format" %
 | 
			
		||||
                         address)
 | 
			
		||||
 | 
			
		||||
    if address in network:
 | 
			
		||||
        return True
 | 
			
		||||
    else:
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _get_for_address(address, key):
 | 
			
		||||
    """Retrieve an attribute of or the physical interface that
 | 
			
		||||
    the IP address provided could be bound to.
 | 
			
		||||
 | 
			
		||||
    :param address (str): An individual IPv4 or IPv6 address without a net
 | 
			
		||||
        mask or subnet prefix. For example, '192.168.1.1'.
 | 
			
		||||
    :param key: 'iface' for the physical interface name or an attribute
 | 
			
		||||
        of the configured interface, for example 'netmask'.
 | 
			
		||||
    :returns str: Requested attribute or None if address is not bindable.
 | 
			
		||||
    """
 | 
			
		||||
    address = netaddr.IPAddress(address)
 | 
			
		||||
    for iface in netifaces.interfaces():
 | 
			
		||||
        addresses = netifaces.ifaddresses(iface)
 | 
			
		||||
        if address.version == 4 and netifaces.AF_INET in addresses:
 | 
			
		||||
            addr = addresses[netifaces.AF_INET][0]['addr']
 | 
			
		||||
            netmask = addresses[netifaces.AF_INET][0]['netmask']
 | 
			
		||||
            network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
 | 
			
		||||
            cidr = network.cidr
 | 
			
		||||
            if address in cidr:
 | 
			
		||||
                if key == 'iface':
 | 
			
		||||
                    return iface
 | 
			
		||||
                else:
 | 
			
		||||
                    return addresses[netifaces.AF_INET][0][key]
 | 
			
		||||
 | 
			
		||||
        if address.version == 6 and netifaces.AF_INET6 in addresses:
 | 
			
		||||
            for addr in addresses[netifaces.AF_INET6]:
 | 
			
		||||
                if not addr['addr'].startswith('fe80'):
 | 
			
		||||
                    network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
 | 
			
		||||
                                                           addr['netmask']))
 | 
			
		||||
                    cidr = network.cidr
 | 
			
		||||
                    if address in cidr:
 | 
			
		||||
                        if key == 'iface':
 | 
			
		||||
                            return iface
 | 
			
		||||
                        elif key == 'netmask' and cidr:
 | 
			
		||||
                            return str(cidr).split('/')[1]
 | 
			
		||||
                        else:
 | 
			
		||||
                            return addr[key]
 | 
			
		||||
 | 
			
		||||
    return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
get_iface_for_address = partial(_get_for_address, key='iface')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
get_netmask_for_address = partial(_get_for_address, key='netmask')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def format_ipv6_addr(address):
 | 
			
		||||
    """If address is IPv6, wrap it in '[]' otherwise return None.
 | 
			
		||||
 | 
			
		||||
    This is required by most configuration files when specifying IPv6
 | 
			
		||||
    addresses.
 | 
			
		||||
    """
 | 
			
		||||
    if is_ipv6(address):
 | 
			
		||||
        return "[%s]" % address
 | 
			
		||||
 | 
			
		||||
    return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
 | 
			
		||||
                   fatal=True, exc_list=None):
 | 
			
		||||
    """Return the assigned IP address for a given interface, if any."""
 | 
			
		||||
    # Extract nic if passed /dev/ethX
 | 
			
		||||
    if '/' in iface:
 | 
			
		||||
        iface = iface.split('/')[-1]
 | 
			
		||||
 | 
			
		||||
    if not exc_list:
 | 
			
		||||
        exc_list = []
 | 
			
		||||
 | 
			
		||||
    try:
 | 
			
		||||
        inet_num = getattr(netifaces, inet_type)
 | 
			
		||||
    except AttributeError:
 | 
			
		||||
        raise Exception("Unknown inet type '%s'" % str(inet_type))
 | 
			
		||||
 | 
			
		||||
    interfaces = netifaces.interfaces()
 | 
			
		||||
    if inc_aliases:
 | 
			
		||||
        ifaces = []
 | 
			
		||||
        for _iface in interfaces:
 | 
			
		||||
            if iface == _iface or _iface.split(':')[0] == iface:
 | 
			
		||||
                ifaces.append(_iface)
 | 
			
		||||
 | 
			
		||||
        if fatal and not ifaces:
 | 
			
		||||
            raise Exception("Invalid interface '%s'" % iface)
 | 
			
		||||
 | 
			
		||||
        ifaces.sort()
 | 
			
		||||
    else:
 | 
			
		||||
        if iface not in interfaces:
 | 
			
		||||
            if fatal:
 | 
			
		||||
                raise Exception("Interface '%s' not found " % (iface))
 | 
			
		||||
            else:
 | 
			
		||||
                return []
 | 
			
		||||
 | 
			
		||||
        else:
 | 
			
		||||
            ifaces = [iface]
 | 
			
		||||
 | 
			
		||||
    addresses = []
 | 
			
		||||
    for netiface in ifaces:
 | 
			
		||||
        net_info = netifaces.ifaddresses(netiface)
 | 
			
		||||
        if inet_num in net_info:
 | 
			
		||||
            for entry in net_info[inet_num]:
 | 
			
		||||
                if 'addr' in entry and entry['addr'] not in exc_list:
 | 
			
		||||
                    addresses.append(entry['addr'])
 | 
			
		||||
 | 
			
		||||
    if fatal and not addresses:
 | 
			
		||||
        raise Exception("Interface '%s' doesn't have any %s addresses." %
 | 
			
		||||
                        (iface, inet_type))
 | 
			
		||||
 | 
			
		||||
    return sorted(addresses)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_iface_from_addr(addr):
 | 
			
		||||
    """Work out on which interface the provided address is configured."""
 | 
			
		||||
    for iface in netifaces.interfaces():
 | 
			
		||||
        addresses = netifaces.ifaddresses(iface)
 | 
			
		||||
        for inet_type in addresses:
 | 
			
		||||
            for _addr in addresses[inet_type]:
 | 
			
		||||
                _addr = _addr['addr']
 | 
			
		||||
                # link local
 | 
			
		||||
                ll_key = re.compile("(.+)%.*")
 | 
			
		||||
                raw = re.match(ll_key, _addr)
 | 
			
		||||
                if raw:
 | 
			
		||||
                    _addr = raw.group(1)
 | 
			
		||||
 | 
			
		||||
                if _addr == addr:
 | 
			
		||||
                    log("Address '%s' is configured on iface '%s'" %
 | 
			
		||||
                        (addr, iface))
 | 
			
		||||
                    return iface
 | 
			
		||||
 | 
			
		||||
    msg = "Unable to infer net iface on which '%s' is configured" % (addr)
 | 
			
		||||
    raise Exception(msg)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def sniff_iface(f):
 | 
			
		||||
    """Ensure decorated function is called with a value for iface.
 | 
			
		||||
 | 
			
		||||
    If no iface provided, inject net iface inferred from unit private address.
 | 
			
		||||
    """
 | 
			
		||||
    def iface_sniffer(*args, **kwargs):
 | 
			
		||||
        if not kwargs.get('iface', None):
 | 
			
		||||
            kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
 | 
			
		||||
 | 
			
		||||
        return f(*args, **kwargs)
 | 
			
		||||
 | 
			
		||||
    return iface_sniffer
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@sniff_iface
 | 
			
		||||
def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
 | 
			
		||||
                  dynamic_only=True):
 | 
			
		||||
    """Get assigned IPv6 address for a given interface.
 | 
			
		||||
 | 
			
		||||
    Returns list of addresses found. If no address found, returns empty list.
 | 
			
		||||
 | 
			
		||||
    If iface is None, we infer the current primary interface by doing a reverse
 | 
			
		||||
    lookup on the unit private-address.
 | 
			
		||||
 | 
			
		||||
    We currently only support scope global IPv6 addresses i.e. non-temporary
 | 
			
		||||
    addresses. If no global IPv6 address is found, return the first one found
 | 
			
		||||
    in the ipv6 address list.
 | 
			
		||||
    """
 | 
			
		||||
    addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
 | 
			
		||||
                               inc_aliases=inc_aliases, fatal=fatal,
 | 
			
		||||
                               exc_list=exc_list)
 | 
			
		||||
 | 
			
		||||
    if addresses:
 | 
			
		||||
        global_addrs = []
 | 
			
		||||
        for addr in addresses:
 | 
			
		||||
            key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
 | 
			
		||||
            m = re.match(key_scope_link_local, addr)
 | 
			
		||||
            if m:
 | 
			
		||||
                eui_64_mac = m.group(1)
 | 
			
		||||
                iface = m.group(2)
 | 
			
		||||
            else:
 | 
			
		||||
                global_addrs.append(addr)
 | 
			
		||||
 | 
			
		||||
        if global_addrs:
 | 
			
		||||
            # Make sure any found global addresses are not temporary
 | 
			
		||||
            cmd = ['ip', 'addr', 'show', iface]
 | 
			
		||||
            out = subprocess.check_output(cmd).decode('UTF-8')
 | 
			
		||||
            if dynamic_only:
 | 
			
		||||
                key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
 | 
			
		||||
            else:
 | 
			
		||||
                key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
 | 
			
		||||
 | 
			
		||||
            addrs = []
 | 
			
		||||
            for line in out.split('\n'):
 | 
			
		||||
                line = line.strip()
 | 
			
		||||
                m = re.match(key, line)
 | 
			
		||||
                if m and 'temporary' not in line:
 | 
			
		||||
                    # Return the first valid address we find
 | 
			
		||||
                    for addr in global_addrs:
 | 
			
		||||
                        if m.group(1) == addr:
 | 
			
		||||
                            if not dynamic_only or \
 | 
			
		||||
                                    m.group(1).endswith(eui_64_mac):
 | 
			
		||||
                                addrs.append(addr)
 | 
			
		||||
 | 
			
		||||
            if addrs:
 | 
			
		||||
                return addrs
 | 
			
		||||
 | 
			
		||||
    if fatal:
 | 
			
		||||
        raise Exception("Interface '%s' does not have a scope global "
 | 
			
		||||
                        "non-temporary ipv6 address." % iface)
 | 
			
		||||
 | 
			
		||||
    return []
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
 | 
			
		||||
    """Return a list of bridges on the system."""
 | 
			
		||||
    b_regex = "%s/*/bridge" % vnic_dir
 | 
			
		||||
    return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
 | 
			
		||||
    """Return a list of nics comprising a given bridge on the system."""
 | 
			
		||||
    brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
 | 
			
		||||
    return [x.split('/')[-1] for x in glob.glob(brif_regex)]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_bridge_member(nic):
 | 
			
		||||
    """Check if a given nic is a member of a bridge."""
 | 
			
		||||
    for bridge in get_bridges():
 | 
			
		||||
        if nic in get_bridge_nics(bridge):
 | 
			
		||||
            return True
 | 
			
		||||
 | 
			
		||||
    return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_ip(address):
 | 
			
		||||
    """
 | 
			
		||||
    Returns True if address is a valid IP address.
 | 
			
		||||
    """
 | 
			
		||||
    try:
 | 
			
		||||
        # Test to see if already an IPv4 address
 | 
			
		||||
        socket.inet_aton(address)
 | 
			
		||||
        return True
 | 
			
		||||
    except socket.error:
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ns_query(address):
 | 
			
		||||
    try:
 | 
			
		||||
        import dns.resolver
 | 
			
		||||
    except ImportError:
 | 
			
		||||
        apt_install('python-dnspython')
 | 
			
		||||
        import dns.resolver
 | 
			
		||||
 | 
			
		||||
    if isinstance(address, dns.name.Name):
 | 
			
		||||
        rtype = 'PTR'
 | 
			
		||||
    elif isinstance(address, six.string_types):
 | 
			
		||||
        rtype = 'A'
 | 
			
		||||
    else:
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
    answers = dns.resolver.query(address, rtype)
 | 
			
		||||
    if answers:
 | 
			
		||||
        return str(answers[0])
 | 
			
		||||
    return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_host_ip(hostname, fallback=None):
 | 
			
		||||
    """
 | 
			
		||||
    Resolves the IP for a given hostname, or returns
 | 
			
		||||
    the input if it is already an IP.
 | 
			
		||||
    """
 | 
			
		||||
    if is_ip(hostname):
 | 
			
		||||
        return hostname
 | 
			
		||||
 | 
			
		||||
    ip_addr = ns_query(hostname)
 | 
			
		||||
    if not ip_addr:
 | 
			
		||||
        try:
 | 
			
		||||
            ip_addr = socket.gethostbyname(hostname)
 | 
			
		||||
        except:
 | 
			
		||||
            log("Failed to resolve hostname '%s'" % (hostname),
 | 
			
		||||
                level=WARNING)
 | 
			
		||||
            return fallback
 | 
			
		||||
    return ip_addr
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_hostname(address, fqdn=True):
 | 
			
		||||
    """
 | 
			
		||||
    Resolves hostname for given IP, or returns the input
 | 
			
		||||
    if it is already a hostname.
 | 
			
		||||
    """
 | 
			
		||||
    if is_ip(address):
 | 
			
		||||
        try:
 | 
			
		||||
            import dns.reversename
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            apt_install("python-dnspython")
 | 
			
		||||
            import dns.reversename
 | 
			
		||||
 | 
			
		||||
        rev = dns.reversename.from_address(address)
 | 
			
		||||
        result = ns_query(rev)
 | 
			
		||||
 | 
			
		||||
        if not result:
 | 
			
		||||
            try:
 | 
			
		||||
                result = socket.gethostbyaddr(address)[0]
 | 
			
		||||
            except:
 | 
			
		||||
                return None
 | 
			
		||||
    else:
 | 
			
		||||
        result = address
 | 
			
		||||
 | 
			
		||||
    if fqdn:
 | 
			
		||||
        # strip trailing .
 | 
			
		||||
        if result.endswith('.'):
 | 
			
		||||
            return result[:-1]
 | 
			
		||||
        else:
 | 
			
		||||
            return result
 | 
			
		||||
    else:
 | 
			
		||||
        return result.split('.')[0]
 | 
			
		||||
							
								
								
									
										15
									
								
								hooks/charmhelpers/contrib/openstack/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								hooks/charmhelpers/contrib/openstack/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,15 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
							
								
								
									
										33
									
								
								hooks/charmhelpers/contrib/openstack/alternatives.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								hooks/charmhelpers/contrib/openstack/alternatives.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,33 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
''' Helper for managing alternatives for file conflict resolution '''
 | 
			
		||||
 | 
			
		||||
import subprocess
 | 
			
		||||
import shutil
 | 
			
		||||
import os
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def install_alternative(name, target, source, priority=50):
 | 
			
		||||
    ''' Install alternative configuration '''
 | 
			
		||||
    if (os.path.exists(target) and not os.path.islink(target)):
 | 
			
		||||
        # Move existing file/directory away before installing
 | 
			
		||||
        shutil.move(target, '{}.bak'.format(target))
 | 
			
		||||
    cmd = [
 | 
			
		||||
        'update-alternatives', '--force', '--install',
 | 
			
		||||
        target, name, source, str(priority)
 | 
			
		||||
    ]
 | 
			
		||||
    subprocess.check_call(cmd)
 | 
			
		||||
							
								
								
									
										15
									
								
								hooks/charmhelpers/contrib/openstack/amulet/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								hooks/charmhelpers/contrib/openstack/amulet/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,15 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
							
								
								
									
										197
									
								
								hooks/charmhelpers/contrib/openstack/amulet/deployment.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										197
									
								
								hooks/charmhelpers/contrib/openstack/amulet/deployment.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,197 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import six
 | 
			
		||||
from collections import OrderedDict
 | 
			
		||||
from charmhelpers.contrib.amulet.deployment import (
 | 
			
		||||
    AmuletDeployment
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenStackAmuletDeployment(AmuletDeployment):
 | 
			
		||||
    """OpenStack amulet deployment.
 | 
			
		||||
 | 
			
		||||
       This class inherits from AmuletDeployment and has additional support
 | 
			
		||||
       that is specifically for use by OpenStack charms.
 | 
			
		||||
       """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, series=None, openstack=None, source=None, stable=True):
 | 
			
		||||
        """Initialize the deployment environment."""
 | 
			
		||||
        super(OpenStackAmuletDeployment, self).__init__(series)
 | 
			
		||||
        self.openstack = openstack
 | 
			
		||||
        self.source = source
 | 
			
		||||
        self.stable = stable
 | 
			
		||||
        # Note(coreycb): this needs to be changed when new next branches come
 | 
			
		||||
        # out.
 | 
			
		||||
        self.current_next = "trusty"
 | 
			
		||||
 | 
			
		||||
    def _determine_branch_locations(self, other_services):
 | 
			
		||||
        """Determine the branch locations for the other services.
 | 
			
		||||
 | 
			
		||||
           Determine if the local branch being tested is derived from its
 | 
			
		||||
           stable or next (dev) branch, and based on this, use the corresonding
 | 
			
		||||
           stable or next branches for the other_services."""
 | 
			
		||||
 | 
			
		||||
        # Charms outside the lp:~openstack-charmers namespace
 | 
			
		||||
        base_charms = ['mysql', 'mongodb', 'nrpe']
 | 
			
		||||
 | 
			
		||||
        # Force these charms to current series even when using an older series.
 | 
			
		||||
        # ie. Use trusty/nrpe even when series is precise, as the P charm
 | 
			
		||||
        # does not possess the necessary external master config and hooks.
 | 
			
		||||
        force_series_current = ['nrpe']
 | 
			
		||||
 | 
			
		||||
        if self.series in ['precise', 'trusty']:
 | 
			
		||||
            base_series = self.series
 | 
			
		||||
        else:
 | 
			
		||||
            base_series = self.current_next
 | 
			
		||||
 | 
			
		||||
        for svc in other_services:
 | 
			
		||||
            if svc['name'] in force_series_current:
 | 
			
		||||
                base_series = self.current_next
 | 
			
		||||
            # If a location has been explicitly set, use it
 | 
			
		||||
            if svc.get('location'):
 | 
			
		||||
                continue
 | 
			
		||||
            if self.stable:
 | 
			
		||||
                temp = 'lp:charms/{}/{}'
 | 
			
		||||
                svc['location'] = temp.format(base_series,
 | 
			
		||||
                                              svc['name'])
 | 
			
		||||
            else:
 | 
			
		||||
                if svc['name'] in base_charms:
 | 
			
		||||
                    temp = 'lp:charms/{}/{}'
 | 
			
		||||
                    svc['location'] = temp.format(base_series,
 | 
			
		||||
                                                  svc['name'])
 | 
			
		||||
                else:
 | 
			
		||||
                    temp = 'lp:~openstack-charmers/charms/{}/{}/next'
 | 
			
		||||
                    svc['location'] = temp.format(self.current_next,
 | 
			
		||||
                                                  svc['name'])
 | 
			
		||||
 | 
			
		||||
        return other_services
 | 
			
		||||
 | 
			
		||||
    def _add_services(self, this_service, other_services):
 | 
			
		||||
        """Add services to the deployment and set openstack-origin/source."""
 | 
			
		||||
        other_services = self._determine_branch_locations(other_services)
 | 
			
		||||
 | 
			
		||||
        super(OpenStackAmuletDeployment, self)._add_services(this_service,
 | 
			
		||||
                                                             other_services)
 | 
			
		||||
 | 
			
		||||
        services = other_services
 | 
			
		||||
        services.append(this_service)
 | 
			
		||||
 | 
			
		||||
        # Charms which should use the source config option
 | 
			
		||||
        use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
 | 
			
		||||
                      'ceph-osd', 'ceph-radosgw']
 | 
			
		||||
 | 
			
		||||
        # Charms which can not use openstack-origin, ie. many subordinates
 | 
			
		||||
        no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
 | 
			
		||||
 | 
			
		||||
        if self.openstack:
 | 
			
		||||
            for svc in services:
 | 
			
		||||
                if svc['name'] not in use_source + no_origin:
 | 
			
		||||
                    config = {'openstack-origin': self.openstack}
 | 
			
		||||
                    self.d.configure(svc['name'], config)
 | 
			
		||||
 | 
			
		||||
        if self.source:
 | 
			
		||||
            for svc in services:
 | 
			
		||||
                if svc['name'] in use_source and svc['name'] not in no_origin:
 | 
			
		||||
                    config = {'source': self.source}
 | 
			
		||||
                    self.d.configure(svc['name'], config)
 | 
			
		||||
 | 
			
		||||
    def _configure_services(self, configs):
 | 
			
		||||
        """Configure all of the services."""
 | 
			
		||||
        for service, config in six.iteritems(configs):
 | 
			
		||||
            self.d.configure(service, config)
 | 
			
		||||
 | 
			
		||||
    def _get_openstack_release(self):
 | 
			
		||||
        """Get openstack release.
 | 
			
		||||
 | 
			
		||||
           Return an integer representing the enum value of the openstack
 | 
			
		||||
           release.
 | 
			
		||||
           """
 | 
			
		||||
        # Must be ordered by OpenStack release (not by Ubuntu release):
 | 
			
		||||
        (self.precise_essex, self.precise_folsom, self.precise_grizzly,
 | 
			
		||||
         self.precise_havana, self.precise_icehouse,
 | 
			
		||||
         self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
 | 
			
		||||
         self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
 | 
			
		||||
         self.wily_liberty) = range(12)
 | 
			
		||||
 | 
			
		||||
        releases = {
 | 
			
		||||
            ('precise', None): self.precise_essex,
 | 
			
		||||
            ('precise', 'cloud:precise-folsom'): self.precise_folsom,
 | 
			
		||||
            ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
 | 
			
		||||
            ('precise', 'cloud:precise-havana'): self.precise_havana,
 | 
			
		||||
            ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
 | 
			
		||||
            ('trusty', None): self.trusty_icehouse,
 | 
			
		||||
            ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
 | 
			
		||||
            ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
 | 
			
		||||
            ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
 | 
			
		||||
            ('utopic', None): self.utopic_juno,
 | 
			
		||||
            ('vivid', None): self.vivid_kilo,
 | 
			
		||||
            ('wily', None): self.wily_liberty}
 | 
			
		||||
        return releases[(self.series, self.openstack)]
 | 
			
		||||
 | 
			
		||||
    def _get_openstack_release_string(self):
 | 
			
		||||
        """Get openstack release string.
 | 
			
		||||
 | 
			
		||||
           Return a string representing the openstack release.
 | 
			
		||||
           """
 | 
			
		||||
        releases = OrderedDict([
 | 
			
		||||
            ('precise', 'essex'),
 | 
			
		||||
            ('quantal', 'folsom'),
 | 
			
		||||
            ('raring', 'grizzly'),
 | 
			
		||||
            ('saucy', 'havana'),
 | 
			
		||||
            ('trusty', 'icehouse'),
 | 
			
		||||
            ('utopic', 'juno'),
 | 
			
		||||
            ('vivid', 'kilo'),
 | 
			
		||||
            ('wily', 'liberty'),
 | 
			
		||||
        ])
 | 
			
		||||
        if self.openstack:
 | 
			
		||||
            os_origin = self.openstack.split(':')[1]
 | 
			
		||||
            return os_origin.split('%s-' % self.series)[1].split('/')[0]
 | 
			
		||||
        else:
 | 
			
		||||
            return releases[self.series]
 | 
			
		||||
 | 
			
		||||
    def get_ceph_expected_pools(self, radosgw=False):
 | 
			
		||||
        """Return a list of expected ceph pools in a ceph + cinder + glance
 | 
			
		||||
        test scenario, based on OpenStack release and whether ceph radosgw
 | 
			
		||||
        is flagged as present or not."""
 | 
			
		||||
 | 
			
		||||
        if self._get_openstack_release() >= self.trusty_kilo:
 | 
			
		||||
            # Kilo or later
 | 
			
		||||
            pools = [
 | 
			
		||||
                'rbd',
 | 
			
		||||
                'cinder',
 | 
			
		||||
                'glance'
 | 
			
		||||
            ]
 | 
			
		||||
        else:
 | 
			
		||||
            # Juno or earlier
 | 
			
		||||
            pools = [
 | 
			
		||||
                'data',
 | 
			
		||||
                'metadata',
 | 
			
		||||
                'rbd',
 | 
			
		||||
                'cinder',
 | 
			
		||||
                'glance'
 | 
			
		||||
            ]
 | 
			
		||||
 | 
			
		||||
        if radosgw:
 | 
			
		||||
            pools.extend([
 | 
			
		||||
                '.rgw.root',
 | 
			
		||||
                '.rgw.control',
 | 
			
		||||
                '.rgw',
 | 
			
		||||
                '.rgw.gc',
 | 
			
		||||
                '.users.uid'
 | 
			
		||||
            ])
 | 
			
		||||
 | 
			
		||||
        return pools
 | 
			
		||||
							
								
								
									
										963
									
								
								hooks/charmhelpers/contrib/openstack/amulet/utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										963
									
								
								hooks/charmhelpers/contrib/openstack/amulet/utils.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,963 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import amulet
 | 
			
		||||
import json
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import six
 | 
			
		||||
import time
 | 
			
		||||
import urllib
 | 
			
		||||
 | 
			
		||||
import cinderclient.v1.client as cinder_client
 | 
			
		||||
import glanceclient.v1.client as glance_client
 | 
			
		||||
import heatclient.v1.client as heat_client
 | 
			
		||||
import keystoneclient.v2_0 as keystone_client
 | 
			
		||||
import novaclient.v1_1.client as nova_client
 | 
			
		||||
import pika
 | 
			
		||||
import swiftclient
 | 
			
		||||
 | 
			
		||||
from charmhelpers.contrib.amulet.utils import (
 | 
			
		||||
    AmuletUtils
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
DEBUG = logging.DEBUG
 | 
			
		||||
ERROR = logging.ERROR
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenStackAmuletUtils(AmuletUtils):
 | 
			
		||||
    """OpenStack amulet utilities.
 | 
			
		||||
 | 
			
		||||
       This class inherits from AmuletUtils and has additional support
 | 
			
		||||
       that is specifically for use by OpenStack charm tests.
 | 
			
		||||
       """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, log_level=ERROR):
 | 
			
		||||
        """Initialize the deployment environment."""
 | 
			
		||||
        super(OpenStackAmuletUtils, self).__init__(log_level)
 | 
			
		||||
 | 
			
		||||
    def validate_endpoint_data(self, endpoints, admin_port, internal_port,
 | 
			
		||||
                               public_port, expected):
 | 
			
		||||
        """Validate endpoint data.
 | 
			
		||||
 | 
			
		||||
           Validate actual endpoint data vs expected endpoint data. The ports
 | 
			
		||||
           are used to find the matching endpoint.
 | 
			
		||||
           """
 | 
			
		||||
        self.log.debug('Validating endpoint data...')
 | 
			
		||||
        self.log.debug('actual: {}'.format(repr(endpoints)))
 | 
			
		||||
        found = False
 | 
			
		||||
        for ep in endpoints:
 | 
			
		||||
            self.log.debug('endpoint: {}'.format(repr(ep)))
 | 
			
		||||
            if (admin_port in ep.adminurl and
 | 
			
		||||
                    internal_port in ep.internalurl and
 | 
			
		||||
                    public_port in ep.publicurl):
 | 
			
		||||
                found = True
 | 
			
		||||
                actual = {'id': ep.id,
 | 
			
		||||
                          'region': ep.region,
 | 
			
		||||
                          'adminurl': ep.adminurl,
 | 
			
		||||
                          'internalurl': ep.internalurl,
 | 
			
		||||
                          'publicurl': ep.publicurl,
 | 
			
		||||
                          'service_id': ep.service_id}
 | 
			
		||||
                ret = self._validate_dict_data(expected, actual)
 | 
			
		||||
                if ret:
 | 
			
		||||
                    return 'unexpected endpoint data - {}'.format(ret)
 | 
			
		||||
 | 
			
		||||
        if not found:
 | 
			
		||||
            return 'endpoint not found'
 | 
			
		||||
 | 
			
		||||
    def validate_svc_catalog_endpoint_data(self, expected, actual):
 | 
			
		||||
        """Validate service catalog endpoint data.
 | 
			
		||||
 | 
			
		||||
           Validate a list of actual service catalog endpoints vs a list of
 | 
			
		||||
           expected service catalog endpoints.
 | 
			
		||||
           """
 | 
			
		||||
        self.log.debug('Validating service catalog endpoint data...')
 | 
			
		||||
        self.log.debug('actual: {}'.format(repr(actual)))
 | 
			
		||||
        for k, v in six.iteritems(expected):
 | 
			
		||||
            if k in actual:
 | 
			
		||||
                ret = self._validate_dict_data(expected[k][0], actual[k][0])
 | 
			
		||||
                if ret:
 | 
			
		||||
                    return self.endpoint_error(k, ret)
 | 
			
		||||
            else:
 | 
			
		||||
                return "endpoint {} does not exist".format(k)
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    def validate_tenant_data(self, expected, actual):
 | 
			
		||||
        """Validate tenant data.
 | 
			
		||||
 | 
			
		||||
           Validate a list of actual tenant data vs list of expected tenant
 | 
			
		||||
           data.
 | 
			
		||||
           """
 | 
			
		||||
        self.log.debug('Validating tenant data...')
 | 
			
		||||
        self.log.debug('actual: {}'.format(repr(actual)))
 | 
			
		||||
        for e in expected:
 | 
			
		||||
            found = False
 | 
			
		||||
            for act in actual:
 | 
			
		||||
                a = {'enabled': act.enabled, 'description': act.description,
 | 
			
		||||
                     'name': act.name, 'id': act.id}
 | 
			
		||||
                if e['name'] == a['name']:
 | 
			
		||||
                    found = True
 | 
			
		||||
                    ret = self._validate_dict_data(e, a)
 | 
			
		||||
                    if ret:
 | 
			
		||||
                        return "unexpected tenant data - {}".format(ret)
 | 
			
		||||
            if not found:
 | 
			
		||||
                return "tenant {} does not exist".format(e['name'])
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    def validate_role_data(self, expected, actual):
 | 
			
		||||
        """Validate role data.
 | 
			
		||||
 | 
			
		||||
           Validate a list of actual role data vs a list of expected role
 | 
			
		||||
           data.
 | 
			
		||||
           """
 | 
			
		||||
        self.log.debug('Validating role data...')
 | 
			
		||||
        self.log.debug('actual: {}'.format(repr(actual)))
 | 
			
		||||
        for e in expected:
 | 
			
		||||
            found = False
 | 
			
		||||
            for act in actual:
 | 
			
		||||
                a = {'name': act.name, 'id': act.id}
 | 
			
		||||
                if e['name'] == a['name']:
 | 
			
		||||
                    found = True
 | 
			
		||||
                    ret = self._validate_dict_data(e, a)
 | 
			
		||||
                    if ret:
 | 
			
		||||
                        return "unexpected role data - {}".format(ret)
 | 
			
		||||
            if not found:
 | 
			
		||||
                return "role {} does not exist".format(e['name'])
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    def validate_user_data(self, expected, actual):
 | 
			
		||||
        """Validate user data.
 | 
			
		||||
 | 
			
		||||
           Validate a list of actual user data vs a list of expected user
 | 
			
		||||
           data.
 | 
			
		||||
           """
 | 
			
		||||
        self.log.debug('Validating user data...')
 | 
			
		||||
        self.log.debug('actual: {}'.format(repr(actual)))
 | 
			
		||||
        for e in expected:
 | 
			
		||||
            found = False
 | 
			
		||||
            for act in actual:
 | 
			
		||||
                a = {'enabled': act.enabled, 'name': act.name,
 | 
			
		||||
                     'email': act.email, 'tenantId': act.tenantId,
 | 
			
		||||
                     'id': act.id}
 | 
			
		||||
                if e['name'] == a['name']:
 | 
			
		||||
                    found = True
 | 
			
		||||
                    ret = self._validate_dict_data(e, a)
 | 
			
		||||
                    if ret:
 | 
			
		||||
                        return "unexpected user data - {}".format(ret)
 | 
			
		||||
            if not found:
 | 
			
		||||
                return "user {} does not exist".format(e['name'])
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    def validate_flavor_data(self, expected, actual):
 | 
			
		||||
        """Validate flavor data.
 | 
			
		||||
 | 
			
		||||
           Validate a list of actual flavors vs a list of expected flavors.
 | 
			
		||||
           """
 | 
			
		||||
        self.log.debug('Validating flavor data...')
 | 
			
		||||
        self.log.debug('actual: {}'.format(repr(actual)))
 | 
			
		||||
        act = [a.name for a in actual]
 | 
			
		||||
        return self._validate_list_data(expected, act)
 | 
			
		||||
 | 
			
		||||
    def tenant_exists(self, keystone, tenant):
 | 
			
		||||
        """Return True if tenant exists."""
 | 
			
		||||
        self.log.debug('Checking if tenant exists ({})...'.format(tenant))
 | 
			
		||||
        return tenant in [t.name for t in keystone.tenants.list()]
 | 
			
		||||
 | 
			
		||||
    def authenticate_cinder_admin(self, keystone_sentry, username,
 | 
			
		||||
                                  password, tenant):
 | 
			
		||||
        """Authenticates admin user with cinder."""
 | 
			
		||||
        # NOTE(beisner): cinder python client doesn't accept tokens.
 | 
			
		||||
        service_ip = \
 | 
			
		||||
            keystone_sentry.relation('shared-db',
 | 
			
		||||
                                     'mysql:shared-db')['private-address']
 | 
			
		||||
        ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
 | 
			
		||||
        return cinder_client.Client(username, password, tenant, ept)
 | 
			
		||||
 | 
			
		||||
    def authenticate_keystone_admin(self, keystone_sentry, user, password,
 | 
			
		||||
                                    tenant):
 | 
			
		||||
        """Authenticates admin user with the keystone admin endpoint."""
 | 
			
		||||
        self.log.debug('Authenticating keystone admin...')
 | 
			
		||||
        unit = keystone_sentry
 | 
			
		||||
        service_ip = unit.relation('shared-db',
 | 
			
		||||
                                   'mysql:shared-db')['private-address']
 | 
			
		||||
        ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
 | 
			
		||||
        return keystone_client.Client(username=user, password=password,
 | 
			
		||||
                                      tenant_name=tenant, auth_url=ep)
 | 
			
		||||
 | 
			
		||||
    def authenticate_keystone_user(self, keystone, user, password, tenant):
 | 
			
		||||
        """Authenticates a regular user with the keystone public endpoint."""
 | 
			
		||||
        self.log.debug('Authenticating keystone user ({})...'.format(user))
 | 
			
		||||
        ep = keystone.service_catalog.url_for(service_type='identity',
 | 
			
		||||
                                              endpoint_type='publicURL')
 | 
			
		||||
        return keystone_client.Client(username=user, password=password,
 | 
			
		||||
                                      tenant_name=tenant, auth_url=ep)
 | 
			
		||||
 | 
			
		||||
    def authenticate_glance_admin(self, keystone):
 | 
			
		||||
        """Authenticates admin user with glance."""
 | 
			
		||||
        self.log.debug('Authenticating glance admin...')
 | 
			
		||||
        ep = keystone.service_catalog.url_for(service_type='image',
 | 
			
		||||
                                              endpoint_type='adminURL')
 | 
			
		||||
        return glance_client.Client(ep, token=keystone.auth_token)
 | 
			
		||||
 | 
			
		||||
    def authenticate_heat_admin(self, keystone):
 | 
			
		||||
        """Authenticates the admin user with heat."""
 | 
			
		||||
        self.log.debug('Authenticating heat admin...')
 | 
			
		||||
        ep = keystone.service_catalog.url_for(service_type='orchestration',
 | 
			
		||||
                                              endpoint_type='publicURL')
 | 
			
		||||
        return heat_client.Client(endpoint=ep, token=keystone.auth_token)
 | 
			
		||||
 | 
			
		||||
    def authenticate_nova_user(self, keystone, user, password, tenant):
 | 
			
		||||
        """Authenticates a regular user with nova-api."""
 | 
			
		||||
        self.log.debug('Authenticating nova user ({})...'.format(user))
 | 
			
		||||
        ep = keystone.service_catalog.url_for(service_type='identity',
 | 
			
		||||
                                              endpoint_type='publicURL')
 | 
			
		||||
        return nova_client.Client(username=user, api_key=password,
 | 
			
		||||
                                  project_id=tenant, auth_url=ep)
 | 
			
		||||
 | 
			
		||||
    def authenticate_swift_user(self, keystone, user, password, tenant):
 | 
			
		||||
        """Authenticates a regular user with swift api."""
 | 
			
		||||
        self.log.debug('Authenticating swift user ({})...'.format(user))
 | 
			
		||||
        ep = keystone.service_catalog.url_for(service_type='identity',
 | 
			
		||||
                                              endpoint_type='publicURL')
 | 
			
		||||
        return swiftclient.Connection(authurl=ep,
 | 
			
		||||
                                      user=user,
 | 
			
		||||
                                      key=password,
 | 
			
		||||
                                      tenant_name=tenant,
 | 
			
		||||
                                      auth_version='2.0')
 | 
			
		||||
 | 
			
		||||
    def create_cirros_image(self, glance, image_name):
 | 
			
		||||
        """Download the latest cirros image and upload it to glance,
 | 
			
		||||
        validate and return a resource pointer.
 | 
			
		||||
 | 
			
		||||
        :param glance: pointer to authenticated glance connection
 | 
			
		||||
        :param image_name: display name for new image
 | 
			
		||||
        :returns: glance image pointer
 | 
			
		||||
        """
 | 
			
		||||
        self.log.debug('Creating glance cirros image '
 | 
			
		||||
                       '({})...'.format(image_name))
 | 
			
		||||
 | 
			
		||||
        # Download cirros image
 | 
			
		||||
        http_proxy = os.getenv('AMULET_HTTP_PROXY')
 | 
			
		||||
        self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
 | 
			
		||||
        if http_proxy:
 | 
			
		||||
            proxies = {'http': http_proxy}
 | 
			
		||||
            opener = urllib.FancyURLopener(proxies)
 | 
			
		||||
        else:
 | 
			
		||||
            opener = urllib.FancyURLopener()
 | 
			
		||||
 | 
			
		||||
        f = opener.open('http://download.cirros-cloud.net/version/released')
 | 
			
		||||
        version = f.read().strip()
 | 
			
		||||
        cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
 | 
			
		||||
        local_path = os.path.join('tests', cirros_img)
 | 
			
		||||
 | 
			
		||||
        if not os.path.exists(local_path):
 | 
			
		||||
            cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
 | 
			
		||||
                                                  version, cirros_img)
 | 
			
		||||
            opener.retrieve(cirros_url, local_path)
 | 
			
		||||
        f.close()
 | 
			
		||||
 | 
			
		||||
        # Create glance image
 | 
			
		||||
        with open(local_path) as f:
 | 
			
		||||
            image = glance.images.create(name=image_name, is_public=True,
 | 
			
		||||
                                         disk_format='qcow2',
 | 
			
		||||
                                         container_format='bare', data=f)
 | 
			
		||||
 | 
			
		||||
        # Wait for image to reach active status
 | 
			
		||||
        img_id = image.id
 | 
			
		||||
        ret = self.resource_reaches_status(glance.images, img_id,
 | 
			
		||||
                                           expected_stat='active',
 | 
			
		||||
                                           msg='Image status wait')
 | 
			
		||||
        if not ret:
 | 
			
		||||
            msg = 'Glance image failed to reach expected state.'
 | 
			
		||||
            amulet.raise_status(amulet.FAIL, msg=msg)
 | 
			
		||||
 | 
			
		||||
        # Re-validate new image
 | 
			
		||||
        self.log.debug('Validating image attributes...')
 | 
			
		||||
        val_img_name = glance.images.get(img_id).name
 | 
			
		||||
        val_img_stat = glance.images.get(img_id).status
 | 
			
		||||
        val_img_pub = glance.images.get(img_id).is_public
 | 
			
		||||
        val_img_cfmt = glance.images.get(img_id).container_format
 | 
			
		||||
        val_img_dfmt = glance.images.get(img_id).disk_format
 | 
			
		||||
        msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
 | 
			
		||||
                    'container fmt:{} disk fmt:{}'.format(
 | 
			
		||||
                        val_img_name, val_img_pub, img_id,
 | 
			
		||||
                        val_img_stat, val_img_cfmt, val_img_dfmt))
 | 
			
		||||
 | 
			
		||||
        if val_img_name == image_name and val_img_stat == 'active' \
 | 
			
		||||
                and val_img_pub is True and val_img_cfmt == 'bare' \
 | 
			
		||||
                and val_img_dfmt == 'qcow2':
 | 
			
		||||
            self.log.debug(msg_attr)
 | 
			
		||||
        else:
 | 
			
		||||
            msg = ('Volume validation failed, {}'.format(msg_attr))
 | 
			
		||||
            amulet.raise_status(amulet.FAIL, msg=msg)
 | 
			
		||||
 | 
			
		||||
        return image
 | 
			
		||||
 | 
			
		||||
    def delete_image(self, glance, image):
 | 
			
		||||
        """Delete the specified image."""
 | 
			
		||||
 | 
			
		||||
        # /!\ DEPRECATION WARNING
 | 
			
		||||
        self.log.warn('/!\\ DEPRECATION WARNING:  use '
 | 
			
		||||
                      'delete_resource instead of delete_image.')
 | 
			
		||||
        self.log.debug('Deleting glance image ({})...'.format(image))
 | 
			
		||||
        return self.delete_resource(glance.images, image, msg='glance image')
 | 
			
		||||
 | 
			
		||||
    def create_instance(self, nova, image_name, instance_name, flavor):
 | 
			
		||||
        """Create the specified instance."""
 | 
			
		||||
        self.log.debug('Creating instance '
 | 
			
		||||
                       '({}|{}|{})'.format(instance_name, image_name, flavor))
 | 
			
		||||
        image = nova.images.find(name=image_name)
 | 
			
		||||
        flavor = nova.flavors.find(name=flavor)
 | 
			
		||||
        instance = nova.servers.create(name=instance_name, image=image,
 | 
			
		||||
                                       flavor=flavor)
 | 
			
		||||
 | 
			
		||||
        count = 1
 | 
			
		||||
        status = instance.status
 | 
			
		||||
        while status != 'ACTIVE' and count < 60:
 | 
			
		||||
            time.sleep(3)
 | 
			
		||||
            instance = nova.servers.get(instance.id)
 | 
			
		||||
            status = instance.status
 | 
			
		||||
            self.log.debug('instance status: {}'.format(status))
 | 
			
		||||
            count += 1
 | 
			
		||||
 | 
			
		||||
        if status != 'ACTIVE':
 | 
			
		||||
            self.log.error('instance creation timed out')
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
        return instance
 | 
			
		||||
 | 
			
		||||
    def delete_instance(self, nova, instance):
 | 
			
		||||
        """Delete the specified instance."""
 | 
			
		||||
 | 
			
		||||
        # /!\ DEPRECATION WARNING
 | 
			
		||||
        self.log.warn('/!\\ DEPRECATION WARNING:  use '
 | 
			
		||||
                      'delete_resource instead of delete_instance.')
 | 
			
		||||
        self.log.debug('Deleting instance ({})...'.format(instance))
 | 
			
		||||
        return self.delete_resource(nova.servers, instance,
 | 
			
		||||
                                    msg='nova instance')
 | 
			
		||||
 | 
			
		||||
    def create_or_get_keypair(self, nova, keypair_name="testkey"):
 | 
			
		||||
        """Create a new keypair, or return pointer if it already exists."""
 | 
			
		||||
        try:
 | 
			
		||||
            _keypair = nova.keypairs.get(keypair_name)
 | 
			
		||||
            self.log.debug('Keypair ({}) already exists, '
 | 
			
		||||
                           'using it.'.format(keypair_name))
 | 
			
		||||
            return _keypair
 | 
			
		||||
        except:
 | 
			
		||||
            self.log.debug('Keypair ({}) does not exist, '
 | 
			
		||||
                           'creating it.'.format(keypair_name))
 | 
			
		||||
 | 
			
		||||
        _keypair = nova.keypairs.create(name=keypair_name)
 | 
			
		||||
        return _keypair
 | 
			
		||||
 | 
			
		||||
    def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
 | 
			
		||||
                             img_id=None, src_vol_id=None, snap_id=None):
 | 
			
		||||
        """Create cinder volume, optionally from a glance image, OR
 | 
			
		||||
        optionally as a clone of an existing volume, OR optionally
 | 
			
		||||
        from a snapshot.  Wait for the new volume status to reach
 | 
			
		||||
        the expected status, validate and return a resource pointer.
 | 
			
		||||
 | 
			
		||||
        :param vol_name: cinder volume display name
 | 
			
		||||
        :param vol_size: size in gigabytes
 | 
			
		||||
        :param img_id: optional glance image id
 | 
			
		||||
        :param src_vol_id: optional source volume id to clone
 | 
			
		||||
        :param snap_id: optional snapshot id to use
 | 
			
		||||
        :returns: cinder volume pointer
 | 
			
		||||
        """
 | 
			
		||||
        # Handle parameter input and avoid impossible combinations
 | 
			
		||||
        if img_id and not src_vol_id and not snap_id:
 | 
			
		||||
            # Create volume from image
 | 
			
		||||
            self.log.debug('Creating cinder volume from glance image...')
 | 
			
		||||
            bootable = 'true'
 | 
			
		||||
        elif src_vol_id and not img_id and not snap_id:
 | 
			
		||||
            # Clone an existing volume
 | 
			
		||||
            self.log.debug('Cloning cinder volume...')
 | 
			
		||||
            bootable = cinder.volumes.get(src_vol_id).bootable
 | 
			
		||||
        elif snap_id and not src_vol_id and not img_id:
 | 
			
		||||
            # Create volume from snapshot
 | 
			
		||||
            self.log.debug('Creating cinder volume from snapshot...')
 | 
			
		||||
            snap = cinder.volume_snapshots.find(id=snap_id)
 | 
			
		||||
            vol_size = snap.size
 | 
			
		||||
            snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
 | 
			
		||||
            bootable = cinder.volumes.get(snap_vol_id).bootable
 | 
			
		||||
        elif not img_id and not src_vol_id and not snap_id:
 | 
			
		||||
            # Create volume
 | 
			
		||||
            self.log.debug('Creating cinder volume...')
 | 
			
		||||
            bootable = 'false'
 | 
			
		||||
        else:
 | 
			
		||||
            # Impossible combination of parameters
 | 
			
		||||
            msg = ('Invalid method use - name:{} size:{} img_id:{} '
 | 
			
		||||
                   'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
 | 
			
		||||
                                                     img_id, src_vol_id,
 | 
			
		||||
                                                     snap_id))
 | 
			
		||||
            amulet.raise_status(amulet.FAIL, msg=msg)
 | 
			
		||||
 | 
			
		||||
        # Create new volume
 | 
			
		||||
        try:
 | 
			
		||||
            vol_new = cinder.volumes.create(display_name=vol_name,
 | 
			
		||||
                                            imageRef=img_id,
 | 
			
		||||
                                            size=vol_size,
 | 
			
		||||
                                            source_volid=src_vol_id,
 | 
			
		||||
                                            snapshot_id=snap_id)
 | 
			
		||||
            vol_id = vol_new.id
 | 
			
		||||
        except Exception as e:
 | 
			
		||||
            msg = 'Failed to create volume: {}'.format(e)
 | 
			
		||||
            amulet.raise_status(amulet.FAIL, msg=msg)
 | 
			
		||||
 | 
			
		||||
        # Wait for volume to reach available status
 | 
			
		||||
        ret = self.resource_reaches_status(cinder.volumes, vol_id,
 | 
			
		||||
                                           expected_stat="available",
 | 
			
		||||
                                           msg="Volume status wait")
 | 
			
		||||
        if not ret:
 | 
			
		||||
            msg = 'Cinder volume failed to reach expected state.'
 | 
			
		||||
            amulet.raise_status(amulet.FAIL, msg=msg)
 | 
			
		||||
 | 
			
		||||
        # Re-validate new volume
 | 
			
		||||
        self.log.debug('Validating volume attributes...')
 | 
			
		||||
        val_vol_name = cinder.volumes.get(vol_id).display_name
 | 
			
		||||
        val_vol_boot = cinder.volumes.get(vol_id).bootable
 | 
			
		||||
        val_vol_stat = cinder.volumes.get(vol_id).status
 | 
			
		||||
        val_vol_size = cinder.volumes.get(vol_id).size
 | 
			
		||||
        msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
 | 
			
		||||
                    '{} size:{}'.format(val_vol_name, vol_id,
 | 
			
		||||
                                        val_vol_stat, val_vol_boot,
 | 
			
		||||
                                        val_vol_size))
 | 
			
		||||
 | 
			
		||||
        if val_vol_boot == bootable and val_vol_stat == 'available' \
 | 
			
		||||
                and val_vol_name == vol_name and val_vol_size == vol_size:
 | 
			
		||||
            self.log.debug(msg_attr)
 | 
			
		||||
        else:
 | 
			
		||||
            msg = ('Volume validation failed, {}'.format(msg_attr))
 | 
			
		||||
            amulet.raise_status(amulet.FAIL, msg=msg)
 | 
			
		||||
 | 
			
		||||
        return vol_new
 | 
			
		||||
 | 
			
		||||
    def delete_resource(self, resource, resource_id,
 | 
			
		||||
                        msg="resource", max_wait=120):
 | 
			
		||||
        """Delete one openstack resource, such as one instance, keypair,
 | 
			
		||||
        image, volume, stack, etc., and confirm deletion within max wait time.
 | 
			
		||||
 | 
			
		||||
        :param resource: pointer to os resource type, ex:glance_client.images
 | 
			
		||||
        :param resource_id: unique name or id for the openstack resource
 | 
			
		||||
        :param msg: text to identify purpose in logging
 | 
			
		||||
        :param max_wait: maximum wait time in seconds
 | 
			
		||||
        :returns: True if successful, otherwise False
 | 
			
		||||
        """
 | 
			
		||||
        self.log.debug('Deleting OpenStack resource '
 | 
			
		||||
                       '{} ({})'.format(resource_id, msg))
 | 
			
		||||
        num_before = len(list(resource.list()))
 | 
			
		||||
        resource.delete(resource_id)
 | 
			
		||||
 | 
			
		||||
        tries = 0
 | 
			
		||||
        num_after = len(list(resource.list()))
 | 
			
		||||
        while num_after != (num_before - 1) and tries < (max_wait / 4):
 | 
			
		||||
            self.log.debug('{} delete check: '
 | 
			
		||||
                           '{} [{}:{}] {}'.format(msg, tries,
 | 
			
		||||
                                                  num_before,
 | 
			
		||||
                                                  num_after,
 | 
			
		||||
                                                  resource_id))
 | 
			
		||||
            time.sleep(4)
 | 
			
		||||
            num_after = len(list(resource.list()))
 | 
			
		||||
            tries += 1
 | 
			
		||||
 | 
			
		||||
        self.log.debug('{}:  expected, actual count = {}, '
 | 
			
		||||
                       '{}'.format(msg, num_before - 1, num_after))
 | 
			
		||||
 | 
			
		||||
        if num_after == (num_before - 1):
 | 
			
		||||
            return True
 | 
			
		||||
        else:
 | 
			
		||||
            self.log.error('{} delete timed out'.format(msg))
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
    def resource_reaches_status(self, resource, resource_id,
 | 
			
		||||
                                expected_stat='available',
 | 
			
		||||
                                msg='resource', max_wait=120):
 | 
			
		||||
        """Wait for an openstack resources status to reach an
 | 
			
		||||
           expected status within a specified time.  Useful to confirm that
 | 
			
		||||
           nova instances, cinder vols, snapshots, glance images, heat stacks
 | 
			
		||||
           and other resources eventually reach the expected status.
 | 
			
		||||
 | 
			
		||||
        :param resource: pointer to os resource type, ex: heat_client.stacks
 | 
			
		||||
        :param resource_id: unique id for the openstack resource
 | 
			
		||||
        :param expected_stat: status to expect resource to reach
 | 
			
		||||
        :param msg: text to identify purpose in logging
 | 
			
		||||
        :param max_wait: maximum wait time in seconds
 | 
			
		||||
        :returns: True if successful, False if status is not reached
 | 
			
		||||
        """
 | 
			
		||||
 | 
			
		||||
        tries = 0
 | 
			
		||||
        resource_stat = resource.get(resource_id).status
 | 
			
		||||
        while resource_stat != expected_stat and tries < (max_wait / 4):
 | 
			
		||||
            self.log.debug('{} status check: '
 | 
			
		||||
                           '{} [{}:{}] {}'.format(msg, tries,
 | 
			
		||||
                                                  resource_stat,
 | 
			
		||||
                                                  expected_stat,
 | 
			
		||||
                                                  resource_id))
 | 
			
		||||
            time.sleep(4)
 | 
			
		||||
            resource_stat = resource.get(resource_id).status
 | 
			
		||||
            tries += 1
 | 
			
		||||
 | 
			
		||||
        self.log.debug('{}:  expected, actual status = {}, '
 | 
			
		||||
                       '{}'.format(msg, resource_stat, expected_stat))
 | 
			
		||||
 | 
			
		||||
        if resource_stat == expected_stat:
 | 
			
		||||
            return True
 | 
			
		||||
        else:
 | 
			
		||||
            self.log.debug('{} never reached expected status: '
 | 
			
		||||
                           '{}'.format(resource_id, expected_stat))
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
    def get_ceph_osd_id_cmd(self, index):
 | 
			
		||||
        """Produce a shell command that will return a ceph-osd id."""
 | 
			
		||||
        return ("`initctl list | grep 'ceph-osd ' | "
 | 
			
		||||
                "awk 'NR=={} {{ print $2 }}' | "
 | 
			
		||||
                "grep -o '[0-9]*'`".format(index + 1))
 | 
			
		||||
 | 
			
		||||
    def get_ceph_pools(self, sentry_unit):
 | 
			
		||||
        """Return a dict of ceph pools from a single ceph unit, with
 | 
			
		||||
        pool name as keys, pool id as vals."""
 | 
			
		||||
        pools = {}
 | 
			
		||||
        cmd = 'sudo ceph osd lspools'
 | 
			
		||||
        output, code = sentry_unit.run(cmd)
 | 
			
		||||
        if code != 0:
 | 
			
		||||
            msg = ('{} `{}` returned {} '
 | 
			
		||||
                   '{}'.format(sentry_unit.info['unit_name'],
 | 
			
		||||
                               cmd, code, output))
 | 
			
		||||
            amulet.raise_status(amulet.FAIL, msg=msg)
 | 
			
		||||
 | 
			
		||||
        # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
 | 
			
		||||
        for pool in str(output).split(','):
 | 
			
		||||
            pool_id_name = pool.split(' ')
 | 
			
		||||
            if len(pool_id_name) == 2:
 | 
			
		||||
                pool_id = pool_id_name[0]
 | 
			
		||||
                pool_name = pool_id_name[1]
 | 
			
		||||
                pools[pool_name] = int(pool_id)
 | 
			
		||||
 | 
			
		||||
        self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
 | 
			
		||||
                                                pools))
 | 
			
		||||
        return pools
 | 
			
		||||
 | 
			
		||||
    def get_ceph_df(self, sentry_unit):
 | 
			
		||||
        """Return dict of ceph df json output, including ceph pool state.
 | 
			
		||||
 | 
			
		||||
        :param sentry_unit: Pointer to amulet sentry instance (juju unit)
 | 
			
		||||
        :returns: Dict of ceph df output
 | 
			
		||||
        """
 | 
			
		||||
        cmd = 'sudo ceph df --format=json'
 | 
			
		||||
        output, code = sentry_unit.run(cmd)
 | 
			
		||||
        if code != 0:
 | 
			
		||||
            msg = ('{} `{}` returned {} '
 | 
			
		||||
                   '{}'.format(sentry_unit.info['unit_name'],
 | 
			
		||||
                               cmd, code, output))
 | 
			
		||||
            amulet.raise_status(amulet.FAIL, msg=msg)
 | 
			
		||||
        return json.loads(output)
 | 
			
		||||
 | 
			
		||||
    def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
 | 
			
		||||
        """Take a sample of attributes of a ceph pool, returning ceph
 | 
			
		||||
        pool name, object count and disk space used for the specified
 | 
			
		||||
        pool ID number.
 | 
			
		||||
 | 
			
		||||
        :param sentry_unit: Pointer to amulet sentry instance (juju unit)
 | 
			
		||||
        :param pool_id: Ceph pool ID
 | 
			
		||||
        :returns: List of pool name, object count, kb disk space used
 | 
			
		||||
        """
 | 
			
		||||
        df = self.get_ceph_df(sentry_unit)
 | 
			
		||||
        pool_name = df['pools'][pool_id]['name']
 | 
			
		||||
        obj_count = df['pools'][pool_id]['stats']['objects']
 | 
			
		||||
        kb_used = df['pools'][pool_id]['stats']['kb_used']
 | 
			
		||||
        self.log.debug('Ceph {} pool (ID {}): {} objects, '
 | 
			
		||||
                       '{} kb used'.format(pool_name, pool_id,
 | 
			
		||||
                                           obj_count, kb_used))
 | 
			
		||||
        return pool_name, obj_count, kb_used
 | 
			
		||||
 | 
			
		||||
    def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
 | 
			
		||||
        """Validate ceph pool samples taken over time, such as pool
 | 
			
		||||
        object counts or pool kb used, before adding, after adding, and
 | 
			
		||||
        after deleting items which affect those pool attributes.  The
 | 
			
		||||
        2nd element is expected to be greater than the 1st; 3rd is expected
 | 
			
		||||
        to be less than the 2nd.
 | 
			
		||||
 | 
			
		||||
        :param samples: List containing 3 data samples
 | 
			
		||||
        :param sample_type: String for logging and usage context
 | 
			
		||||
        :returns: None if successful, Failure message otherwise
 | 
			
		||||
        """
 | 
			
		||||
        original, created, deleted = range(3)
 | 
			
		||||
        if samples[created] <= samples[original] or \
 | 
			
		||||
                samples[deleted] >= samples[created]:
 | 
			
		||||
            return ('Ceph {} samples ({}) '
 | 
			
		||||
                    'unexpected.'.format(sample_type, samples))
 | 
			
		||||
        else:
 | 
			
		||||
            self.log.debug('Ceph {} samples (OK): '
 | 
			
		||||
                           '{}'.format(sample_type, samples))
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
# rabbitmq/amqp specific helpers:
 | 
			
		||||
    def add_rmq_test_user(self, sentry_units,
 | 
			
		||||
                          username="testuser1", password="changeme"):
 | 
			
		||||
        """Add a test user via the first rmq juju unit, check connection as
 | 
			
		||||
        the new user against all sentry units.
 | 
			
		||||
 | 
			
		||||
        :param sentry_units: list of sentry unit pointers
 | 
			
		||||
        :param username: amqp user name, default to testuser1
 | 
			
		||||
        :param password: amqp user password
 | 
			
		||||
        :returns: None if successful.  Raise on error.
 | 
			
		||||
        """
 | 
			
		||||
        self.log.debug('Adding rmq user ({})...'.format(username))
 | 
			
		||||
 | 
			
		||||
        # Check that user does not already exist
 | 
			
		||||
        cmd_user_list = 'rabbitmqctl list_users'
 | 
			
		||||
        output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
 | 
			
		||||
        if username in output:
 | 
			
		||||
            self.log.warning('User ({}) already exists, returning '
 | 
			
		||||
                             'gracefully.'.format(username))
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        perms = '".*" ".*" ".*"'
 | 
			
		||||
        cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
 | 
			
		||||
                'rabbitmqctl set_permissions {} {}'.format(username, perms)]
 | 
			
		||||
 | 
			
		||||
        # Add user via first unit
 | 
			
		||||
        for cmd in cmds:
 | 
			
		||||
            output, _ = self.run_cmd_unit(sentry_units[0], cmd)
 | 
			
		||||
 | 
			
		||||
        # Check connection against the other sentry_units
 | 
			
		||||
        self.log.debug('Checking user connect against units...')
 | 
			
		||||
        for sentry_unit in sentry_units:
 | 
			
		||||
            connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
 | 
			
		||||
                                                   username=username,
 | 
			
		||||
                                                   password=password)
 | 
			
		||||
            connection.close()
 | 
			
		||||
 | 
			
		||||
    def delete_rmq_test_user(self, sentry_units, username="testuser1"):
 | 
			
		||||
        """Delete a rabbitmq user via the first rmq juju unit.
 | 
			
		||||
 | 
			
		||||
        :param sentry_units: list of sentry unit pointers
 | 
			
		||||
        :param username: amqp user name, default to testuser1
 | 
			
		||||
        :param password: amqp user password
 | 
			
		||||
        :returns: None if successful or no such user.
 | 
			
		||||
        """
 | 
			
		||||
        self.log.debug('Deleting rmq user ({})...'.format(username))
 | 
			
		||||
 | 
			
		||||
        # Check that the user exists
 | 
			
		||||
        cmd_user_list = 'rabbitmqctl list_users'
 | 
			
		||||
        output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
 | 
			
		||||
 | 
			
		||||
        if username not in output:
 | 
			
		||||
            self.log.warning('User ({}) does not exist, returning '
 | 
			
		||||
                             'gracefully.'.format(username))
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        # Delete the user
 | 
			
		||||
        cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
 | 
			
		||||
        output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
 | 
			
		||||
 | 
			
		||||
    def get_rmq_cluster_status(self, sentry_unit):
 | 
			
		||||
        """Execute rabbitmq cluster status command on a unit and return
 | 
			
		||||
        the full output.
 | 
			
		||||
 | 
			
		||||
        :param unit: sentry unit
 | 
			
		||||
        :returns: String containing console output of cluster status command
 | 
			
		||||
        """
 | 
			
		||||
        cmd = 'rabbitmqctl cluster_status'
 | 
			
		||||
        output, _ = self.run_cmd_unit(sentry_unit, cmd)
 | 
			
		||||
        self.log.debug('{} cluster_status:\n{}'.format(
 | 
			
		||||
            sentry_unit.info['unit_name'], output))
 | 
			
		||||
        return str(output)
 | 
			
		||||
 | 
			
		||||
    def get_rmq_cluster_running_nodes(self, sentry_unit):
 | 
			
		||||
        """Parse rabbitmqctl cluster_status output string, return list of
 | 
			
		||||
        running rabbitmq cluster nodes.
 | 
			
		||||
 | 
			
		||||
        :param unit: sentry unit
 | 
			
		||||
        :returns: List containing node names of running nodes
 | 
			
		||||
        """
 | 
			
		||||
        # NOTE(beisner): rabbitmqctl cluster_status output is not
 | 
			
		||||
        # json-parsable, do string chop foo, then json.loads that.
 | 
			
		||||
        str_stat = self.get_rmq_cluster_status(sentry_unit)
 | 
			
		||||
        if 'running_nodes' in str_stat:
 | 
			
		||||
            pos_start = str_stat.find("{running_nodes,") + 15
 | 
			
		||||
            pos_end = str_stat.find("]},", pos_start) + 1
 | 
			
		||||
            str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
 | 
			
		||||
            run_nodes = json.loads(str_run_nodes)
 | 
			
		||||
            return run_nodes
 | 
			
		||||
        else:
 | 
			
		||||
            return []
 | 
			
		||||
 | 
			
		||||
    def validate_rmq_cluster_running_nodes(self, sentry_units):
 | 
			
		||||
        """Check that all rmq unit hostnames are represented in the
 | 
			
		||||
        cluster_status output of all units.
 | 
			
		||||
 | 
			
		||||
        :param host_names: dict of juju unit names to host names
 | 
			
		||||
        :param units: list of sentry unit pointers (all rmq units)
 | 
			
		||||
        :returns: None if successful, otherwise return error message
 | 
			
		||||
        """
 | 
			
		||||
        host_names = self.get_unit_hostnames(sentry_units)
 | 
			
		||||
        errors = []
 | 
			
		||||
 | 
			
		||||
        # Query every unit for cluster_status running nodes
 | 
			
		||||
        for query_unit in sentry_units:
 | 
			
		||||
            query_unit_name = query_unit.info['unit_name']
 | 
			
		||||
            running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
 | 
			
		||||
 | 
			
		||||
            # Confirm that every unit is represented in the queried unit's
 | 
			
		||||
            # cluster_status running nodes output.
 | 
			
		||||
            for validate_unit in sentry_units:
 | 
			
		||||
                val_host_name = host_names[validate_unit.info['unit_name']]
 | 
			
		||||
                val_node_name = 'rabbit@{}'.format(val_host_name)
 | 
			
		||||
 | 
			
		||||
                if val_node_name not in running_nodes:
 | 
			
		||||
                    errors.append('Cluster member check failed on {}: {} not '
 | 
			
		||||
                                  'in {}\n'.format(query_unit_name,
 | 
			
		||||
                                                   val_node_name,
 | 
			
		||||
                                                   running_nodes))
 | 
			
		||||
        if errors:
 | 
			
		||||
            return ''.join(errors)
 | 
			
		||||
 | 
			
		||||
    def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
 | 
			
		||||
        """Check a single juju rmq unit for ssl and port in the config file."""
 | 
			
		||||
        host = sentry_unit.info['public-address']
 | 
			
		||||
        unit_name = sentry_unit.info['unit_name']
 | 
			
		||||
 | 
			
		||||
        conf_file = '/etc/rabbitmq/rabbitmq.config'
 | 
			
		||||
        conf_contents = str(self.file_contents_safe(sentry_unit,
 | 
			
		||||
                                                    conf_file, max_wait=16))
 | 
			
		||||
        # Checks
 | 
			
		||||
        conf_ssl = 'ssl' in conf_contents
 | 
			
		||||
        conf_port = str(port) in conf_contents
 | 
			
		||||
 | 
			
		||||
        # Port explicitly checked in config
 | 
			
		||||
        if port and conf_port and conf_ssl:
 | 
			
		||||
            self.log.debug('SSL is enabled  @{}:{} '
 | 
			
		||||
                           '({})'.format(host, port, unit_name))
 | 
			
		||||
            return True
 | 
			
		||||
        elif port and not conf_port and conf_ssl:
 | 
			
		||||
            self.log.debug('SSL is enabled @{} but not on port {} '
 | 
			
		||||
                           '({})'.format(host, port, unit_name))
 | 
			
		||||
            return False
 | 
			
		||||
        # Port not checked (useful when checking that ssl is disabled)
 | 
			
		||||
        elif not port and conf_ssl:
 | 
			
		||||
            self.log.debug('SSL is enabled  @{}:{} '
 | 
			
		||||
                           '({})'.format(host, port, unit_name))
 | 
			
		||||
            return True
 | 
			
		||||
        elif not conf_ssl:
 | 
			
		||||
            self.log.debug('SSL not enabled @{}:{} '
 | 
			
		||||
                           '({})'.format(host, port, unit_name))
 | 
			
		||||
            return False
 | 
			
		||||
        else:
 | 
			
		||||
            msg = ('Unknown condition when checking SSL status @{}:{} '
 | 
			
		||||
                   '({})'.format(host, port, unit_name))
 | 
			
		||||
            amulet.raise_status(amulet.FAIL, msg)
 | 
			
		||||
 | 
			
		||||
    def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
 | 
			
		||||
        """Check that ssl is enabled on rmq juju sentry units.
 | 
			
		||||
 | 
			
		||||
        :param sentry_units: list of all rmq sentry units
 | 
			
		||||
        :param port: optional ssl port override to validate
 | 
			
		||||
        :returns: None if successful, otherwise return error message
 | 
			
		||||
        """
 | 
			
		||||
        for sentry_unit in sentry_units:
 | 
			
		||||
            if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
 | 
			
		||||
                return ('Unexpected condition:  ssl is disabled on unit '
 | 
			
		||||
                        '({})'.format(sentry_unit.info['unit_name']))
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
    def validate_rmq_ssl_disabled_units(self, sentry_units):
 | 
			
		||||
        """Check that ssl is enabled on listed rmq juju sentry units.
 | 
			
		||||
 | 
			
		||||
        :param sentry_units: list of all rmq sentry units
 | 
			
		||||
        :returns: True if successful.  Raise on error.
 | 
			
		||||
        """
 | 
			
		||||
        for sentry_unit in sentry_units:
 | 
			
		||||
            if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
 | 
			
		||||
                return ('Unexpected condition:  ssl is enabled on unit '
 | 
			
		||||
                        '({})'.format(sentry_unit.info['unit_name']))
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
    def configure_rmq_ssl_on(self, sentry_units, deployment,
 | 
			
		||||
                             port=None, max_wait=60):
 | 
			
		||||
        """Turn ssl charm config option on, with optional non-default
 | 
			
		||||
        ssl port specification.  Confirm that it is enabled on every
 | 
			
		||||
        unit.
 | 
			
		||||
 | 
			
		||||
        :param sentry_units: list of sentry units
 | 
			
		||||
        :param deployment: amulet deployment object pointer
 | 
			
		||||
        :param port: amqp port, use defaults if None
 | 
			
		||||
        :param max_wait: maximum time to wait in seconds to confirm
 | 
			
		||||
        :returns: None if successful.  Raise on error.
 | 
			
		||||
        """
 | 
			
		||||
        self.log.debug('Setting ssl charm config option:  on')
 | 
			
		||||
 | 
			
		||||
        # Enable RMQ SSL
 | 
			
		||||
        config = {'ssl': 'on'}
 | 
			
		||||
        if port:
 | 
			
		||||
            config['ssl_port'] = port
 | 
			
		||||
 | 
			
		||||
        deployment.configure('rabbitmq-server', config)
 | 
			
		||||
 | 
			
		||||
        # Confirm
 | 
			
		||||
        tries = 0
 | 
			
		||||
        ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
 | 
			
		||||
        while ret and tries < (max_wait / 4):
 | 
			
		||||
            time.sleep(4)
 | 
			
		||||
            self.log.debug('Attempt {}: {}'.format(tries, ret))
 | 
			
		||||
            ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
 | 
			
		||||
            tries += 1
 | 
			
		||||
 | 
			
		||||
        if ret:
 | 
			
		||||
            amulet.raise_status(amulet.FAIL, ret)
 | 
			
		||||
 | 
			
		||||
    def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
 | 
			
		||||
        """Turn ssl charm config option off, confirm that it is disabled
 | 
			
		||||
        on every unit.
 | 
			
		||||
 | 
			
		||||
        :param sentry_units: list of sentry units
 | 
			
		||||
        :param deployment: amulet deployment object pointer
 | 
			
		||||
        :param max_wait: maximum time to wait in seconds to confirm
 | 
			
		||||
        :returns: None if successful.  Raise on error.
 | 
			
		||||
        """
 | 
			
		||||
        self.log.debug('Setting ssl charm config option:  off')
 | 
			
		||||
 | 
			
		||||
        # Disable RMQ SSL
 | 
			
		||||
        config = {'ssl': 'off'}
 | 
			
		||||
        deployment.configure('rabbitmq-server', config)
 | 
			
		||||
 | 
			
		||||
        # Confirm
 | 
			
		||||
        tries = 0
 | 
			
		||||
        ret = self.validate_rmq_ssl_disabled_units(sentry_units)
 | 
			
		||||
        while ret and tries < (max_wait / 4):
 | 
			
		||||
            time.sleep(4)
 | 
			
		||||
            self.log.debug('Attempt {}: {}'.format(tries, ret))
 | 
			
		||||
            ret = self.validate_rmq_ssl_disabled_units(sentry_units)
 | 
			
		||||
            tries += 1
 | 
			
		||||
 | 
			
		||||
        if ret:
 | 
			
		||||
            amulet.raise_status(amulet.FAIL, ret)
 | 
			
		||||
 | 
			
		||||
    def connect_amqp_by_unit(self, sentry_unit, ssl=False,
 | 
			
		||||
                             port=None, fatal=True,
 | 
			
		||||
                             username="testuser1", password="changeme"):
 | 
			
		||||
        """Establish and return a pika amqp connection to the rabbitmq service
 | 
			
		||||
        running on a rmq juju unit.
 | 
			
		||||
 | 
			
		||||
        :param sentry_unit: sentry unit pointer
 | 
			
		||||
        :param ssl: boolean, default to False
 | 
			
		||||
        :param port: amqp port, use defaults if None
 | 
			
		||||
        :param fatal: boolean, default to True (raises on connect error)
 | 
			
		||||
        :param username: amqp user name, default to testuser1
 | 
			
		||||
        :param password: amqp user password
 | 
			
		||||
        :returns: pika amqp connection pointer or None if failed and non-fatal
 | 
			
		||||
        """
 | 
			
		||||
        host = sentry_unit.info['public-address']
 | 
			
		||||
        unit_name = sentry_unit.info['unit_name']
 | 
			
		||||
 | 
			
		||||
        # Default port logic if port is not specified
 | 
			
		||||
        if ssl and not port:
 | 
			
		||||
            port = 5671
 | 
			
		||||
        elif not ssl and not port:
 | 
			
		||||
            port = 5672
 | 
			
		||||
 | 
			
		||||
        self.log.debug('Connecting to amqp on {}:{} ({}) as '
 | 
			
		||||
                       '{}...'.format(host, port, unit_name, username))
 | 
			
		||||
 | 
			
		||||
        try:
 | 
			
		||||
            credentials = pika.PlainCredentials(username, password)
 | 
			
		||||
            parameters = pika.ConnectionParameters(host=host, port=port,
 | 
			
		||||
                                                   credentials=credentials,
 | 
			
		||||
                                                   ssl=ssl,
 | 
			
		||||
                                                   connection_attempts=3,
 | 
			
		||||
                                                   retry_delay=5,
 | 
			
		||||
                                                   socket_timeout=1)
 | 
			
		||||
            connection = pika.BlockingConnection(parameters)
 | 
			
		||||
            assert connection.server_properties['product'] == 'RabbitMQ'
 | 
			
		||||
            self.log.debug('Connect OK')
 | 
			
		||||
            return connection
 | 
			
		||||
        except Exception as e:
 | 
			
		||||
            msg = ('amqp connection failed to {}:{} as '
 | 
			
		||||
                   '{} ({})'.format(host, port, username, str(e)))
 | 
			
		||||
            if fatal:
 | 
			
		||||
                amulet.raise_status(amulet.FAIL, msg)
 | 
			
		||||
            else:
 | 
			
		||||
                self.log.warn(msg)
 | 
			
		||||
                return None
 | 
			
		||||
 | 
			
		||||
    def publish_amqp_message_by_unit(self, sentry_unit, message,
 | 
			
		||||
                                     queue="test", ssl=False,
 | 
			
		||||
                                     username="testuser1",
 | 
			
		||||
                                     password="changeme",
 | 
			
		||||
                                     port=None):
 | 
			
		||||
        """Publish an amqp message to a rmq juju unit.
 | 
			
		||||
 | 
			
		||||
        :param sentry_unit: sentry unit pointer
 | 
			
		||||
        :param message: amqp message string
 | 
			
		||||
        :param queue: message queue, default to test
 | 
			
		||||
        :param username: amqp user name, default to testuser1
 | 
			
		||||
        :param password: amqp user password
 | 
			
		||||
        :param ssl: boolean, default to False
 | 
			
		||||
        :param port: amqp port, use defaults if None
 | 
			
		||||
        :returns: None.  Raises exception if publish failed.
 | 
			
		||||
        """
 | 
			
		||||
        self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
 | 
			
		||||
                                                                    message))
 | 
			
		||||
        connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
 | 
			
		||||
                                               port=port,
 | 
			
		||||
                                               username=username,
 | 
			
		||||
                                               password=password)
 | 
			
		||||
 | 
			
		||||
        # NOTE(beisner): extra debug here re: pika hang potential:
 | 
			
		||||
        #   https://github.com/pika/pika/issues/297
 | 
			
		||||
        #   https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
 | 
			
		||||
        self.log.debug('Defining channel...')
 | 
			
		||||
        channel = connection.channel()
 | 
			
		||||
        self.log.debug('Declaring queue...')
 | 
			
		||||
        channel.queue_declare(queue=queue, auto_delete=False, durable=True)
 | 
			
		||||
        self.log.debug('Publishing message...')
 | 
			
		||||
        channel.basic_publish(exchange='', routing_key=queue, body=message)
 | 
			
		||||
        self.log.debug('Closing channel...')
 | 
			
		||||
        channel.close()
 | 
			
		||||
        self.log.debug('Closing connection...')
 | 
			
		||||
        connection.close()
 | 
			
		||||
 | 
			
		||||
    def get_amqp_message_by_unit(self, sentry_unit, queue="test",
 | 
			
		||||
                                 username="testuser1",
 | 
			
		||||
                                 password="changeme",
 | 
			
		||||
                                 ssl=False, port=None):
 | 
			
		||||
        """Get an amqp message from a rmq juju unit.
 | 
			
		||||
 | 
			
		||||
        :param sentry_unit: sentry unit pointer
 | 
			
		||||
        :param queue: message queue, default to test
 | 
			
		||||
        :param username: amqp user name, default to testuser1
 | 
			
		||||
        :param password: amqp user password
 | 
			
		||||
        :param ssl: boolean, default to False
 | 
			
		||||
        :param port: amqp port, use defaults if None
 | 
			
		||||
        :returns: amqp message body as string.  Raise if get fails.
 | 
			
		||||
        """
 | 
			
		||||
        connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
 | 
			
		||||
                                               port=port,
 | 
			
		||||
                                               username=username,
 | 
			
		||||
                                               password=password)
 | 
			
		||||
        channel = connection.channel()
 | 
			
		||||
        method_frame, _, body = channel.basic_get(queue)
 | 
			
		||||
 | 
			
		||||
        if method_frame:
 | 
			
		||||
            self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
 | 
			
		||||
                                                                         body))
 | 
			
		||||
            channel.basic_ack(method_frame.delivery_tag)
 | 
			
		||||
            channel.close()
 | 
			
		||||
            connection.close()
 | 
			
		||||
            return body
 | 
			
		||||
        else:
 | 
			
		||||
            msg = 'No message retrieved.'
 | 
			
		||||
            amulet.raise_status(amulet.FAIL, msg)
 | 
			
		||||
							
								
								
									
										1427
									
								
								hooks/charmhelpers/contrib/openstack/context.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1427
									
								
								hooks/charmhelpers/contrib/openstack/context.py
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										18
									
								
								hooks/charmhelpers/contrib/openstack/files/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								hooks/charmhelpers/contrib/openstack/files/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,18 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
# dummy __init__.py to fool syncer into thinking this is a syncable python
 | 
			
		||||
# module
 | 
			
		||||
							
								
								
									
										32
									
								
								hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										32
									
								
								hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,32 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
#--------------------------------------------
 | 
			
		||||
# This file is managed by Juju
 | 
			
		||||
#--------------------------------------------
 | 
			
		||||
#
 | 
			
		||||
# Copyright 2009,2012 Canonical Ltd.
 | 
			
		||||
# Author: Tom Haddon
 | 
			
		||||
 | 
			
		||||
CRITICAL=0
 | 
			
		||||
NOTACTIVE=''
 | 
			
		||||
LOGFILE=/var/log/nagios/check_haproxy.log
 | 
			
		||||
AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
 | 
			
		||||
 | 
			
		||||
for appserver in $(grep '    server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
 | 
			
		||||
do
 | 
			
		||||
    output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
 | 
			
		||||
    if [ $? != 0 ]; then
 | 
			
		||||
        date >> $LOGFILE
 | 
			
		||||
        echo $output >> $LOGFILE
 | 
			
		||||
        /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
 | 
			
		||||
        CRITICAL=1
 | 
			
		||||
        NOTACTIVE="${NOTACTIVE} $appserver"
 | 
			
		||||
    fi
 | 
			
		||||
done
 | 
			
		||||
 | 
			
		||||
if [ $CRITICAL = 1 ]; then
 | 
			
		||||
    echo "CRITICAL:${NOTACTIVE}"
 | 
			
		||||
    exit 2
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
echo "OK: All haproxy instances looking good"
 | 
			
		||||
exit 0
 | 
			
		||||
							
								
								
									
										30
									
								
								hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										30
									
								
								hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,30 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
#--------------------------------------------
 | 
			
		||||
# This file is managed by Juju
 | 
			
		||||
#--------------------------------------------
 | 
			
		||||
#                                       
 | 
			
		||||
# Copyright 2009,2012 Canonical Ltd.
 | 
			
		||||
# Author: Tom Haddon
 | 
			
		||||
 | 
			
		||||
# These should be config options at some stage
 | 
			
		||||
CURRQthrsh=0
 | 
			
		||||
MAXQthrsh=100
 | 
			
		||||
 | 
			
		||||
AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
 | 
			
		||||
 | 
			
		||||
HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
 | 
			
		||||
 | 
			
		||||
for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
 | 
			
		||||
do
 | 
			
		||||
    CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
 | 
			
		||||
    MAXQ=$(echo "$HAPROXYSTATS"  | grep $BACKEND | grep BACKEND | cut -d , -f 4)
 | 
			
		||||
 | 
			
		||||
    if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
 | 
			
		||||
        echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
 | 
			
		||||
        exit 2
 | 
			
		||||
    fi
 | 
			
		||||
done
 | 
			
		||||
 | 
			
		||||
echo "OK: All haproxy queue depths looking good"
 | 
			
		||||
exit 0
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										151
									
								
								hooks/charmhelpers/contrib/openstack/ip.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										151
									
								
								hooks/charmhelpers/contrib/openstack/ip.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,151 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    config,
 | 
			
		||||
    unit_get,
 | 
			
		||||
    service_name,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.contrib.network.ip import (
 | 
			
		||||
    get_address_in_network,
 | 
			
		||||
    is_address_in_network,
 | 
			
		||||
    is_ipv6,
 | 
			
		||||
    get_ipv6_addr,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
 | 
			
		||||
 | 
			
		||||
PUBLIC = 'public'
 | 
			
		||||
INTERNAL = 'int'
 | 
			
		||||
ADMIN = 'admin'
 | 
			
		||||
 | 
			
		||||
ADDRESS_MAP = {
 | 
			
		||||
    PUBLIC: {
 | 
			
		||||
        'config': 'os-public-network',
 | 
			
		||||
        'fallback': 'public-address',
 | 
			
		||||
        'override': 'os-public-hostname',
 | 
			
		||||
    },
 | 
			
		||||
    INTERNAL: {
 | 
			
		||||
        'config': 'os-internal-network',
 | 
			
		||||
        'fallback': 'private-address',
 | 
			
		||||
        'override': 'os-internal-hostname',
 | 
			
		||||
    },
 | 
			
		||||
    ADMIN: {
 | 
			
		||||
        'config': 'os-admin-network',
 | 
			
		||||
        'fallback': 'private-address',
 | 
			
		||||
        'override': 'os-admin-hostname',
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def canonical_url(configs, endpoint_type=PUBLIC):
 | 
			
		||||
    """Returns the correct HTTP URL to this host given the state of HTTPS
 | 
			
		||||
    configuration, hacluster and charm configuration.
 | 
			
		||||
 | 
			
		||||
    :param configs: OSTemplateRenderer config templating object to inspect
 | 
			
		||||
                    for a complete https context.
 | 
			
		||||
    :param endpoint_type: str endpoint type to resolve.
 | 
			
		||||
    :param returns: str base URL for services on the current service unit.
 | 
			
		||||
    """
 | 
			
		||||
    scheme = _get_scheme(configs)
 | 
			
		||||
 | 
			
		||||
    address = resolve_address(endpoint_type)
 | 
			
		||||
    if is_ipv6(address):
 | 
			
		||||
        address = "[{}]".format(address)
 | 
			
		||||
 | 
			
		||||
    return '%s://%s' % (scheme, address)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _get_scheme(configs):
 | 
			
		||||
    """Returns the scheme to use for the url (either http or https)
 | 
			
		||||
    depending upon whether https is in the configs value.
 | 
			
		||||
 | 
			
		||||
    :param configs: OSTemplateRenderer config templating object to inspect
 | 
			
		||||
                    for a complete https context.
 | 
			
		||||
    :returns: either 'http' or 'https' depending on whether https is
 | 
			
		||||
              configured within the configs context.
 | 
			
		||||
    """
 | 
			
		||||
    scheme = 'http'
 | 
			
		||||
    if configs and 'https' in configs.complete_contexts():
 | 
			
		||||
        scheme = 'https'
 | 
			
		||||
    return scheme
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _get_address_override(endpoint_type=PUBLIC):
 | 
			
		||||
    """Returns any address overrides that the user has defined based on the
 | 
			
		||||
    endpoint type.
 | 
			
		||||
 | 
			
		||||
    Note: this function allows for the service name to be inserted into the
 | 
			
		||||
    address if the user specifies {service_name}.somehost.org.
 | 
			
		||||
 | 
			
		||||
    :param endpoint_type: the type of endpoint to retrieve the override
 | 
			
		||||
                          value for.
 | 
			
		||||
    :returns: any endpoint address or hostname that the user has overridden
 | 
			
		||||
              or None if an override is not present.
 | 
			
		||||
    """
 | 
			
		||||
    override_key = ADDRESS_MAP[endpoint_type]['override']
 | 
			
		||||
    addr_override = config(override_key)
 | 
			
		||||
    if not addr_override:
 | 
			
		||||
        return None
 | 
			
		||||
    else:
 | 
			
		||||
        return addr_override.format(service_name=service_name())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def resolve_address(endpoint_type=PUBLIC):
 | 
			
		||||
    """Return unit address depending on net config.
 | 
			
		||||
 | 
			
		||||
    If unit is clustered with vip(s) and has net splits defined, return vip on
 | 
			
		||||
    correct network. If clustered with no nets defined, return primary vip.
 | 
			
		||||
 | 
			
		||||
    If not clustered, return unit address ensuring address is on configured net
 | 
			
		||||
    split if one is configured.
 | 
			
		||||
 | 
			
		||||
    :param endpoint_type: Network endpoing type
 | 
			
		||||
    """
 | 
			
		||||
    resolved_address = _get_address_override(endpoint_type)
 | 
			
		||||
    if resolved_address:
 | 
			
		||||
        return resolved_address
 | 
			
		||||
 | 
			
		||||
    vips = config('vip')
 | 
			
		||||
    if vips:
 | 
			
		||||
        vips = vips.split()
 | 
			
		||||
 | 
			
		||||
    net_type = ADDRESS_MAP[endpoint_type]['config']
 | 
			
		||||
    net_addr = config(net_type)
 | 
			
		||||
    net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
 | 
			
		||||
    clustered = is_clustered()
 | 
			
		||||
    if clustered:
 | 
			
		||||
        if not net_addr:
 | 
			
		||||
            # If no net-splits defined, we expect a single vip
 | 
			
		||||
            resolved_address = vips[0]
 | 
			
		||||
        else:
 | 
			
		||||
            for vip in vips:
 | 
			
		||||
                if is_address_in_network(net_addr, vip):
 | 
			
		||||
                    resolved_address = vip
 | 
			
		||||
                    break
 | 
			
		||||
    else:
 | 
			
		||||
        if config('prefer-ipv6'):
 | 
			
		||||
            fallback_addr = get_ipv6_addr(exc_list=vips)[0]
 | 
			
		||||
        else:
 | 
			
		||||
            fallback_addr = unit_get(net_fallback)
 | 
			
		||||
 | 
			
		||||
        resolved_address = get_address_in_network(net_addr, fallback_addr)
 | 
			
		||||
 | 
			
		||||
    if resolved_address is None:
 | 
			
		||||
        raise ValueError("Unable to resolve a suitable IP address based on "
 | 
			
		||||
                         "charm state and configuration. (net_type=%s, "
 | 
			
		||||
                         "clustered=%s)" % (net_type, clustered))
 | 
			
		||||
 | 
			
		||||
    return resolved_address
 | 
			
		||||
							
								
								
									
										356
									
								
								hooks/charmhelpers/contrib/openstack/neutron.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										356
									
								
								hooks/charmhelpers/contrib/openstack/neutron.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,356 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
# Various utilies for dealing with Neutron and the renaming from Quantum.
 | 
			
		||||
 | 
			
		||||
import six
 | 
			
		||||
from subprocess import check_output
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    config,
 | 
			
		||||
    log,
 | 
			
		||||
    ERROR,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.contrib.openstack.utils import os_release
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def headers_package():
 | 
			
		||||
    """Ensures correct linux-headers for running kernel are installed,
 | 
			
		||||
    for building DKMS package"""
 | 
			
		||||
    kver = check_output(['uname', '-r']).decode('UTF-8').strip()
 | 
			
		||||
    return 'linux-headers-%s' % kver
 | 
			
		||||
 | 
			
		||||
QUANTUM_CONF_DIR = '/etc/quantum'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def kernel_version():
 | 
			
		||||
    """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
 | 
			
		||||
    kver = check_output(['uname', '-r']).decode('UTF-8').strip()
 | 
			
		||||
    kver = kver.split('.')
 | 
			
		||||
    return (int(kver[0]), int(kver[1]))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def determine_dkms_package():
 | 
			
		||||
    """ Determine which DKMS package should be used based on kernel version """
 | 
			
		||||
    # NOTE: 3.13 kernels have support for GRE and VXLAN native
 | 
			
		||||
    if kernel_version() >= (3, 13):
 | 
			
		||||
        return []
 | 
			
		||||
    else:
 | 
			
		||||
        return ['openvswitch-datapath-dkms']
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# legacy
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def quantum_plugins():
 | 
			
		||||
    from charmhelpers.contrib.openstack import context
 | 
			
		||||
    return {
 | 
			
		||||
        'ovs': {
 | 
			
		||||
            'config': '/etc/quantum/plugins/openvswitch/'
 | 
			
		||||
                      'ovs_quantum_plugin.ini',
 | 
			
		||||
            'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
 | 
			
		||||
                      'OVSQuantumPluginV2',
 | 
			
		||||
            'contexts': [
 | 
			
		||||
                context.SharedDBContext(user=config('neutron-database-user'),
 | 
			
		||||
                                        database=config('neutron-database'),
 | 
			
		||||
                                        relation_prefix='neutron',
 | 
			
		||||
                                        ssl_dir=QUANTUM_CONF_DIR)],
 | 
			
		||||
            'services': ['quantum-plugin-openvswitch-agent'],
 | 
			
		||||
            'packages': [[headers_package()] + determine_dkms_package(),
 | 
			
		||||
                         ['quantum-plugin-openvswitch-agent']],
 | 
			
		||||
            'server_packages': ['quantum-server',
 | 
			
		||||
                                'quantum-plugin-openvswitch'],
 | 
			
		||||
            'server_services': ['quantum-server']
 | 
			
		||||
        },
 | 
			
		||||
        'nvp': {
 | 
			
		||||
            'config': '/etc/quantum/plugins/nicira/nvp.ini',
 | 
			
		||||
            'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
 | 
			
		||||
                      'QuantumPlugin.NvpPluginV2',
 | 
			
		||||
            'contexts': [
 | 
			
		||||
                context.SharedDBContext(user=config('neutron-database-user'),
 | 
			
		||||
                                        database=config('neutron-database'),
 | 
			
		||||
                                        relation_prefix='neutron',
 | 
			
		||||
                                        ssl_dir=QUANTUM_CONF_DIR)],
 | 
			
		||||
            'services': [],
 | 
			
		||||
            'packages': [],
 | 
			
		||||
            'server_packages': ['quantum-server',
 | 
			
		||||
                                'quantum-plugin-nicira'],
 | 
			
		||||
            'server_services': ['quantum-server']
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
NEUTRON_CONF_DIR = '/etc/neutron'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def neutron_plugins():
 | 
			
		||||
    from charmhelpers.contrib.openstack import context
 | 
			
		||||
    release = os_release('nova-common')
 | 
			
		||||
    plugins = {
 | 
			
		||||
        'ovs': {
 | 
			
		||||
            'config': '/etc/neutron/plugins/openvswitch/'
 | 
			
		||||
                      'ovs_neutron_plugin.ini',
 | 
			
		||||
            'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
 | 
			
		||||
                      'OVSNeutronPluginV2',
 | 
			
		||||
            'contexts': [
 | 
			
		||||
                context.SharedDBContext(user=config('neutron-database-user'),
 | 
			
		||||
                                        database=config('neutron-database'),
 | 
			
		||||
                                        relation_prefix='neutron',
 | 
			
		||||
                                        ssl_dir=NEUTRON_CONF_DIR)],
 | 
			
		||||
            'services': ['neutron-plugin-openvswitch-agent'],
 | 
			
		||||
            'packages': [[headers_package()] + determine_dkms_package(),
 | 
			
		||||
                         ['neutron-plugin-openvswitch-agent']],
 | 
			
		||||
            'server_packages': ['neutron-server',
 | 
			
		||||
                                'neutron-plugin-openvswitch'],
 | 
			
		||||
            'server_services': ['neutron-server']
 | 
			
		||||
        },
 | 
			
		||||
        'nvp': {
 | 
			
		||||
            'config': '/etc/neutron/plugins/nicira/nvp.ini',
 | 
			
		||||
            'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
 | 
			
		||||
                      'NeutronPlugin.NvpPluginV2',
 | 
			
		||||
            'contexts': [
 | 
			
		||||
                context.SharedDBContext(user=config('neutron-database-user'),
 | 
			
		||||
                                        database=config('neutron-database'),
 | 
			
		||||
                                        relation_prefix='neutron',
 | 
			
		||||
                                        ssl_dir=NEUTRON_CONF_DIR)],
 | 
			
		||||
            'services': [],
 | 
			
		||||
            'packages': [],
 | 
			
		||||
            'server_packages': ['neutron-server',
 | 
			
		||||
                                'neutron-plugin-nicira'],
 | 
			
		||||
            'server_services': ['neutron-server']
 | 
			
		||||
        },
 | 
			
		||||
        'nsx': {
 | 
			
		||||
            'config': '/etc/neutron/plugins/vmware/nsx.ini',
 | 
			
		||||
            'driver': 'vmware',
 | 
			
		||||
            'contexts': [
 | 
			
		||||
                context.SharedDBContext(user=config('neutron-database-user'),
 | 
			
		||||
                                        database=config('neutron-database'),
 | 
			
		||||
                                        relation_prefix='neutron',
 | 
			
		||||
                                        ssl_dir=NEUTRON_CONF_DIR)],
 | 
			
		||||
            'services': [],
 | 
			
		||||
            'packages': [],
 | 
			
		||||
            'server_packages': ['neutron-server',
 | 
			
		||||
                                'neutron-plugin-vmware'],
 | 
			
		||||
            'server_services': ['neutron-server']
 | 
			
		||||
        },
 | 
			
		||||
        'n1kv': {
 | 
			
		||||
            'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
 | 
			
		||||
            'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
 | 
			
		||||
            'contexts': [
 | 
			
		||||
                context.SharedDBContext(user=config('neutron-database-user'),
 | 
			
		||||
                                        database=config('neutron-database'),
 | 
			
		||||
                                        relation_prefix='neutron',
 | 
			
		||||
                                        ssl_dir=NEUTRON_CONF_DIR)],
 | 
			
		||||
            'services': [],
 | 
			
		||||
            'packages': [[headers_package()] + determine_dkms_package(),
 | 
			
		||||
                         ['neutron-plugin-cisco']],
 | 
			
		||||
            'server_packages': ['neutron-server',
 | 
			
		||||
                                'neutron-plugin-cisco'],
 | 
			
		||||
            'server_services': ['neutron-server']
 | 
			
		||||
        },
 | 
			
		||||
        'Calico': {
 | 
			
		||||
            'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
 | 
			
		||||
            'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
 | 
			
		||||
            'contexts': [
 | 
			
		||||
                context.SharedDBContext(user=config('neutron-database-user'),
 | 
			
		||||
                                        database=config('neutron-database'),
 | 
			
		||||
                                        relation_prefix='neutron',
 | 
			
		||||
                                        ssl_dir=NEUTRON_CONF_DIR)],
 | 
			
		||||
            'services': ['calico-felix',
 | 
			
		||||
                         'bird',
 | 
			
		||||
                         'neutron-dhcp-agent',
 | 
			
		||||
                         'nova-api-metadata',
 | 
			
		||||
                         'etcd'],
 | 
			
		||||
            'packages': [[headers_package()] + determine_dkms_package(),
 | 
			
		||||
                         ['calico-compute',
 | 
			
		||||
                          'bird',
 | 
			
		||||
                          'neutron-dhcp-agent',
 | 
			
		||||
                          'nova-api-metadata',
 | 
			
		||||
                          'etcd']],
 | 
			
		||||
            'server_packages': ['neutron-server', 'calico-control', 'etcd'],
 | 
			
		||||
            'server_services': ['neutron-server', 'etcd']
 | 
			
		||||
        },
 | 
			
		||||
        'vsp': {
 | 
			
		||||
            'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
 | 
			
		||||
            'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
 | 
			
		||||
            'contexts': [
 | 
			
		||||
                context.SharedDBContext(user=config('neutron-database-user'),
 | 
			
		||||
                                        database=config('neutron-database'),
 | 
			
		||||
                                        relation_prefix='neutron',
 | 
			
		||||
                                        ssl_dir=NEUTRON_CONF_DIR)],
 | 
			
		||||
            'services': [],
 | 
			
		||||
            'packages': [],
 | 
			
		||||
            'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
 | 
			
		||||
            'server_services': ['neutron-server']
 | 
			
		||||
        },
 | 
			
		||||
        'plumgrid': {
 | 
			
		||||
            'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
 | 
			
		||||
            'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
 | 
			
		||||
            'contexts': [
 | 
			
		||||
                context.SharedDBContext(user=config('database-user'),
 | 
			
		||||
                                        database=config('database'),
 | 
			
		||||
                                        ssl_dir=NEUTRON_CONF_DIR)],
 | 
			
		||||
            'services': [],
 | 
			
		||||
            'packages': [['plumgrid-lxc'],
 | 
			
		||||
                         ['iovisor-dkms']],
 | 
			
		||||
            'server_packages': ['neutron-server',
 | 
			
		||||
                                'neutron-plugin-plumgrid'],
 | 
			
		||||
            'server_services': ['neutron-server']
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    if release >= 'icehouse':
 | 
			
		||||
        # NOTE: patch in ml2 plugin for icehouse onwards
 | 
			
		||||
        plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
 | 
			
		||||
        plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
 | 
			
		||||
        plugins['ovs']['server_packages'] = ['neutron-server',
 | 
			
		||||
                                             'neutron-plugin-ml2']
 | 
			
		||||
        # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
 | 
			
		||||
        plugins['nvp'] = plugins['nsx']
 | 
			
		||||
    return plugins
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def neutron_plugin_attribute(plugin, attr, net_manager=None):
 | 
			
		||||
    manager = net_manager or network_manager()
 | 
			
		||||
    if manager == 'quantum':
 | 
			
		||||
        plugins = quantum_plugins()
 | 
			
		||||
    elif manager == 'neutron':
 | 
			
		||||
        plugins = neutron_plugins()
 | 
			
		||||
    else:
 | 
			
		||||
        log("Network manager '%s' does not support plugins." % (manager),
 | 
			
		||||
            level=ERROR)
 | 
			
		||||
        raise Exception
 | 
			
		||||
 | 
			
		||||
    try:
 | 
			
		||||
        _plugin = plugins[plugin]
 | 
			
		||||
    except KeyError:
 | 
			
		||||
        log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
 | 
			
		||||
        raise Exception
 | 
			
		||||
 | 
			
		||||
    try:
 | 
			
		||||
        return _plugin[attr]
 | 
			
		||||
    except KeyError:
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def network_manager():
 | 
			
		||||
    '''
 | 
			
		||||
    Deals with the renaming of Quantum to Neutron in H and any situations
 | 
			
		||||
    that require compatability (eg, deploying H with network-manager=quantum,
 | 
			
		||||
    upgrading from G).
 | 
			
		||||
    '''
 | 
			
		||||
    release = os_release('nova-common')
 | 
			
		||||
    manager = config('network-manager').lower()
 | 
			
		||||
 | 
			
		||||
    if manager not in ['quantum', 'neutron']:
 | 
			
		||||
        return manager
 | 
			
		||||
 | 
			
		||||
    if release in ['essex']:
 | 
			
		||||
        # E does not support neutron
 | 
			
		||||
        log('Neutron networking not supported in Essex.', level=ERROR)
 | 
			
		||||
        raise Exception
 | 
			
		||||
    elif release in ['folsom', 'grizzly']:
 | 
			
		||||
        # neutron is named quantum in F and G
 | 
			
		||||
        return 'quantum'
 | 
			
		||||
    else:
 | 
			
		||||
        # ensure accurate naming for all releases post-H
 | 
			
		||||
        return 'neutron'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def parse_mappings(mappings, key_rvalue=False):
 | 
			
		||||
    """By default mappings are lvalue keyed.
 | 
			
		||||
 | 
			
		||||
    If key_rvalue is True, the mapping will be reversed to allow multiple
 | 
			
		||||
    configs for the same lvalue.
 | 
			
		||||
    """
 | 
			
		||||
    parsed = {}
 | 
			
		||||
    if mappings:
 | 
			
		||||
        mappings = mappings.split()
 | 
			
		||||
        for m in mappings:
 | 
			
		||||
            p = m.partition(':')
 | 
			
		||||
 | 
			
		||||
            if key_rvalue:
 | 
			
		||||
                key_index = 2
 | 
			
		||||
                val_index = 0
 | 
			
		||||
                # if there is no rvalue skip to next
 | 
			
		||||
                if not p[1]:
 | 
			
		||||
                    continue
 | 
			
		||||
            else:
 | 
			
		||||
                key_index = 0
 | 
			
		||||
                val_index = 2
 | 
			
		||||
 | 
			
		||||
            key = p[key_index].strip()
 | 
			
		||||
            parsed[key] = p[val_index].strip()
 | 
			
		||||
 | 
			
		||||
    return parsed
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def parse_bridge_mappings(mappings):
 | 
			
		||||
    """Parse bridge mappings.
 | 
			
		||||
 | 
			
		||||
    Mappings must be a space-delimited list of provider:bridge mappings.
 | 
			
		||||
 | 
			
		||||
    Returns dict of the form {provider:bridge}.
 | 
			
		||||
    """
 | 
			
		||||
    return parse_mappings(mappings)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def parse_data_port_mappings(mappings, default_bridge='br-data'):
 | 
			
		||||
    """Parse data port mappings.
 | 
			
		||||
 | 
			
		||||
    Mappings must be a space-delimited list of bridge:port.
 | 
			
		||||
 | 
			
		||||
    Returns dict of the form {port:bridge} where ports may be mac addresses or
 | 
			
		||||
    interface names.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
 | 
			
		||||
    # proposed for <port> since it may be a mac address which will differ
 | 
			
		||||
    # across units this allowing first-known-good to be chosen.
 | 
			
		||||
    _mappings = parse_mappings(mappings, key_rvalue=True)
 | 
			
		||||
    if not _mappings or list(_mappings.values()) == ['']:
 | 
			
		||||
        if not mappings:
 | 
			
		||||
            return {}
 | 
			
		||||
 | 
			
		||||
        # For backwards-compatibility we need to support port-only provided in
 | 
			
		||||
        # config.
 | 
			
		||||
        _mappings = {mappings.split()[0]: default_bridge}
 | 
			
		||||
 | 
			
		||||
    ports = _mappings.keys()
 | 
			
		||||
    if len(set(ports)) != len(ports):
 | 
			
		||||
        raise Exception("It is not allowed to have the same port configured "
 | 
			
		||||
                        "on more than one bridge")
 | 
			
		||||
 | 
			
		||||
    return _mappings
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def parse_vlan_range_mappings(mappings):
 | 
			
		||||
    """Parse vlan range mappings.
 | 
			
		||||
 | 
			
		||||
    Mappings must be a space-delimited list of provider:start:end mappings.
 | 
			
		||||
 | 
			
		||||
    The start:end range is optional and may be omitted.
 | 
			
		||||
 | 
			
		||||
    Returns dict of the form {provider: (start, end)}.
 | 
			
		||||
    """
 | 
			
		||||
    _mappings = parse_mappings(mappings)
 | 
			
		||||
    if not _mappings:
 | 
			
		||||
        return {}
 | 
			
		||||
 | 
			
		||||
    mappings = {}
 | 
			
		||||
    for p, r in six.iteritems(_mappings):
 | 
			
		||||
        mappings[p] = tuple(r.split(':'))
 | 
			
		||||
 | 
			
		||||
    return mappings
 | 
			
		||||
							
								
								
									
										18
									
								
								hooks/charmhelpers/contrib/openstack/templates/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								hooks/charmhelpers/contrib/openstack/templates/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,18 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
# dummy __init__.py to fool syncer into thinking this is a syncable python
 | 
			
		||||
# module
 | 
			
		||||
							
								
								
									
										21
									
								
								hooks/charmhelpers/contrib/openstack/templates/ceph.conf
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								hooks/charmhelpers/contrib/openstack/templates/ceph.conf
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,21 @@
 | 
			
		||||
###############################################################################
 | 
			
		||||
# [ WARNING ]
 | 
			
		||||
# cinder configuration file maintained by Juju
 | 
			
		||||
# local changes may be overwritten.
 | 
			
		||||
###############################################################################
 | 
			
		||||
[global]
 | 
			
		||||
{% if auth -%}
 | 
			
		||||
auth_supported = {{ auth }}
 | 
			
		||||
keyring = /etc/ceph/$cluster.$name.keyring
 | 
			
		||||
mon host = {{ mon_hosts }}
 | 
			
		||||
{% endif -%}
 | 
			
		||||
log to syslog = {{ use_syslog }}
 | 
			
		||||
err to syslog = {{ use_syslog }}
 | 
			
		||||
clog to syslog = {{ use_syslog }}
 | 
			
		||||
 | 
			
		||||
[client]
 | 
			
		||||
{% if rbd_client_cache_settings -%}
 | 
			
		||||
{% for key, value in rbd_client_cache_settings.iteritems() -%}
 | 
			
		||||
{{ key }} = {{ value }}
 | 
			
		||||
{% endfor -%}
 | 
			
		||||
{%- endif %}
 | 
			
		||||
							
								
								
									
										17
									
								
								hooks/charmhelpers/contrib/openstack/templates/git.upstart
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								hooks/charmhelpers/contrib/openstack/templates/git.upstart
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,17 @@
 | 
			
		||||
description "{{ service_description }}"
 | 
			
		||||
author "Juju {{ service_name }} Charm <juju@localhost>"
 | 
			
		||||
 | 
			
		||||
start on runlevel [2345]
 | 
			
		||||
stop on runlevel [!2345]
 | 
			
		||||
 | 
			
		||||
respawn
 | 
			
		||||
 | 
			
		||||
exec start-stop-daemon --start --chuid {{ user_name }} \
 | 
			
		||||
            --chdir {{ start_dir }} --name {{ process_name }} \
 | 
			
		||||
            --exec {{ executable_name }} -- \
 | 
			
		||||
            {% for config_file in config_files -%}
 | 
			
		||||
            --config-file={{ config_file }} \
 | 
			
		||||
            {% endfor -%}
 | 
			
		||||
            {% if log_file -%}
 | 
			
		||||
            --log-file={{ log_file }}
 | 
			
		||||
            {% endif -%}
 | 
			
		||||
							
								
								
									
										58
									
								
								hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										58
									
								
								hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,58 @@
 | 
			
		||||
global
 | 
			
		||||
    log {{ local_host }} local0
 | 
			
		||||
    log {{ local_host }} local1 notice
 | 
			
		||||
    maxconn 20000
 | 
			
		||||
    user haproxy
 | 
			
		||||
    group haproxy
 | 
			
		||||
    spread-checks 0
 | 
			
		||||
 | 
			
		||||
defaults
 | 
			
		||||
    log global
 | 
			
		||||
    mode tcp
 | 
			
		||||
    option tcplog
 | 
			
		||||
    option dontlognull
 | 
			
		||||
    retries 3
 | 
			
		||||
    timeout queue 1000
 | 
			
		||||
    timeout connect 1000
 | 
			
		||||
{% if haproxy_client_timeout -%}
 | 
			
		||||
    timeout client {{ haproxy_client_timeout }}
 | 
			
		||||
{% else -%}
 | 
			
		||||
    timeout client 30000
 | 
			
		||||
{% endif -%}
 | 
			
		||||
 | 
			
		||||
{% if haproxy_server_timeout -%}
 | 
			
		||||
    timeout server {{ haproxy_server_timeout }}
 | 
			
		||||
{% else -%}
 | 
			
		||||
    timeout server 30000
 | 
			
		||||
{% endif -%}
 | 
			
		||||
 | 
			
		||||
listen stats {{ stat_port }}
 | 
			
		||||
    mode http
 | 
			
		||||
    stats enable
 | 
			
		||||
    stats hide-version
 | 
			
		||||
    stats realm Haproxy\ Statistics
 | 
			
		||||
    stats uri /
 | 
			
		||||
    stats auth admin:password
 | 
			
		||||
 | 
			
		||||
{% if frontends -%}
 | 
			
		||||
{% for service, ports in service_ports.items() -%}
 | 
			
		||||
frontend tcp-in_{{ service }}
 | 
			
		||||
    bind *:{{ ports[0] }}
 | 
			
		||||
    {% if ipv6 -%}
 | 
			
		||||
    bind :::{{ ports[0] }}
 | 
			
		||||
    {% endif -%}
 | 
			
		||||
    {% for frontend in frontends -%}
 | 
			
		||||
    acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
 | 
			
		||||
    use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
 | 
			
		||||
    {% endfor -%}
 | 
			
		||||
    default_backend {{ service }}_{{ default_backend }}
 | 
			
		||||
 | 
			
		||||
{% for frontend in frontends -%}
 | 
			
		||||
backend {{ service }}_{{ frontend }}
 | 
			
		||||
    balance leastconn
 | 
			
		||||
    {% for unit, address in frontends[frontend]['backends'].items() -%}
 | 
			
		||||
    server {{ unit }} {{ address }}:{{ ports[1] }} check
 | 
			
		||||
    {% endfor %}
 | 
			
		||||
{% endfor -%}
 | 
			
		||||
{% endfor -%}
 | 
			
		||||
{% endif -%}
 | 
			
		||||
@@ -0,0 +1,24 @@
 | 
			
		||||
{% if endpoints -%}
 | 
			
		||||
{% for ext_port in ext_ports -%}
 | 
			
		||||
Listen {{ ext_port }}
 | 
			
		||||
{% endfor -%}
 | 
			
		||||
{% for address, endpoint, ext, int in endpoints -%}
 | 
			
		||||
<VirtualHost {{ address }}:{{ ext }}>
 | 
			
		||||
    ServerName {{ endpoint }}
 | 
			
		||||
    SSLEngine on
 | 
			
		||||
    SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
 | 
			
		||||
    SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
 | 
			
		||||
    ProxyPass / http://localhost:{{ int }}/
 | 
			
		||||
    ProxyPassReverse / http://localhost:{{ int }}/
 | 
			
		||||
    ProxyPreserveHost on
 | 
			
		||||
</VirtualHost>
 | 
			
		||||
{% endfor -%}
 | 
			
		||||
<Proxy *>
 | 
			
		||||
    Order deny,allow
 | 
			
		||||
    Allow from all
 | 
			
		||||
</Proxy>
 | 
			
		||||
<Location />
 | 
			
		||||
    Order allow,deny
 | 
			
		||||
    Allow from all
 | 
			
		||||
</Location>
 | 
			
		||||
{% endif -%}
 | 
			
		||||
@@ -0,0 +1,24 @@
 | 
			
		||||
{% if endpoints -%}
 | 
			
		||||
{% for ext_port in ext_ports -%}
 | 
			
		||||
Listen {{ ext_port }}
 | 
			
		||||
{% endfor -%}
 | 
			
		||||
{% for address, endpoint, ext, int in endpoints -%}
 | 
			
		||||
<VirtualHost {{ address }}:{{ ext }}>
 | 
			
		||||
    ServerName {{ endpoint }}
 | 
			
		||||
    SSLEngine on
 | 
			
		||||
    SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
 | 
			
		||||
    SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
 | 
			
		||||
    ProxyPass / http://localhost:{{ int }}/
 | 
			
		||||
    ProxyPassReverse / http://localhost:{{ int }}/
 | 
			
		||||
    ProxyPreserveHost on
 | 
			
		||||
</VirtualHost>
 | 
			
		||||
{% endfor -%}
 | 
			
		||||
<Proxy *>
 | 
			
		||||
    Order deny,allow
 | 
			
		||||
    Allow from all
 | 
			
		||||
</Proxy>
 | 
			
		||||
<Location />
 | 
			
		||||
    Order allow,deny
 | 
			
		||||
    Allow from all
 | 
			
		||||
</Location>
 | 
			
		||||
{% endif -%}
 | 
			
		||||
@@ -0,0 +1,9 @@
 | 
			
		||||
{% if auth_host -%}
 | 
			
		||||
[keystone_authtoken]
 | 
			
		||||
identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }}
 | 
			
		||||
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
 | 
			
		||||
admin_tenant_name = {{ admin_tenant_name }}
 | 
			
		||||
admin_user = {{ admin_user }}
 | 
			
		||||
admin_password = {{ admin_password }}
 | 
			
		||||
signing_dir = {{ signing_dir }}
 | 
			
		||||
{% endif -%}
 | 
			
		||||
@@ -0,0 +1,22 @@
 | 
			
		||||
{% if rabbitmq_host or rabbitmq_hosts -%}
 | 
			
		||||
[oslo_messaging_rabbit]
 | 
			
		||||
rabbit_userid = {{ rabbitmq_user }}
 | 
			
		||||
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
 | 
			
		||||
rabbit_password = {{ rabbitmq_password }}
 | 
			
		||||
{% if rabbitmq_hosts -%}
 | 
			
		||||
rabbit_hosts = {{ rabbitmq_hosts }}
 | 
			
		||||
{% if rabbitmq_ha_queues -%}
 | 
			
		||||
rabbit_ha_queues = True
 | 
			
		||||
rabbit_durable_queues = False
 | 
			
		||||
{% endif -%}
 | 
			
		||||
{% else -%}
 | 
			
		||||
rabbit_host = {{ rabbitmq_host }}
 | 
			
		||||
{% endif -%}
 | 
			
		||||
{% if rabbit_ssl_port -%}
 | 
			
		||||
rabbit_use_ssl = True
 | 
			
		||||
rabbit_port = {{ rabbit_ssl_port }}
 | 
			
		||||
{% if rabbit_ssl_ca -%}
 | 
			
		||||
kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
 | 
			
		||||
{% endif -%}
 | 
			
		||||
{% endif -%}
 | 
			
		||||
{% endif -%}
 | 
			
		||||
@@ -0,0 +1,14 @@
 | 
			
		||||
{% if zmq_host -%}
 | 
			
		||||
# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }})
 | 
			
		||||
rpc_backend = zmq
 | 
			
		||||
rpc_zmq_host = {{ zmq_host }}
 | 
			
		||||
{% if zmq_redis_address -%}
 | 
			
		||||
rpc_zmq_matchmaker = redis
 | 
			
		||||
matchmaker_heartbeat_freq = 15
 | 
			
		||||
matchmaker_heartbeat_ttl = 30
 | 
			
		||||
[matchmaker_redis]
 | 
			
		||||
host = {{ zmq_redis_address }}
 | 
			
		||||
{% else -%}
 | 
			
		||||
rpc_zmq_matchmaker = ring
 | 
			
		||||
{% endif -%}
 | 
			
		||||
{% endif -%}
 | 
			
		||||
							
								
								
									
										323
									
								
								hooks/charmhelpers/contrib/openstack/templating.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										323
									
								
								hooks/charmhelpers/contrib/openstack/templating.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,323 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
 | 
			
		||||
import six
 | 
			
		||||
 | 
			
		||||
from charmhelpers.fetch import apt_install, apt_update
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    log,
 | 
			
		||||
    ERROR,
 | 
			
		||||
    INFO
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
 | 
			
		||||
except ImportError:
 | 
			
		||||
    apt_update(fatal=True)
 | 
			
		||||
    apt_install('python-jinja2', fatal=True)
 | 
			
		||||
    from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OSConfigException(Exception):
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_loader(templates_dir, os_release):
 | 
			
		||||
    """
 | 
			
		||||
    Create a jinja2.ChoiceLoader containing template dirs up to
 | 
			
		||||
    and including os_release.  If directory template directory
 | 
			
		||||
    is missing at templates_dir, it will be omitted from the loader.
 | 
			
		||||
    templates_dir is added to the bottom of the search list as a base
 | 
			
		||||
    loading dir.
 | 
			
		||||
 | 
			
		||||
    A charm may also ship a templates dir with this module
 | 
			
		||||
    and it will be appended to the bottom of the search list, eg::
 | 
			
		||||
 | 
			
		||||
        hooks/charmhelpers/contrib/openstack/templates
 | 
			
		||||
 | 
			
		||||
    :param templates_dir (str): Base template directory containing release
 | 
			
		||||
        sub-directories.
 | 
			
		||||
    :param os_release (str): OpenStack release codename to construct template
 | 
			
		||||
        loader.
 | 
			
		||||
    :returns: jinja2.ChoiceLoader constructed with a list of
 | 
			
		||||
        jinja2.FilesystemLoaders, ordered in descending
 | 
			
		||||
        order by OpenStack release.
 | 
			
		||||
    """
 | 
			
		||||
    tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
 | 
			
		||||
                 for rel in six.itervalues(OPENSTACK_CODENAMES)]
 | 
			
		||||
 | 
			
		||||
    if not os.path.isdir(templates_dir):
 | 
			
		||||
        log('Templates directory not found @ %s.' % templates_dir,
 | 
			
		||||
            level=ERROR)
 | 
			
		||||
        raise OSConfigException
 | 
			
		||||
 | 
			
		||||
    # the bottom contains tempaltes_dir and possibly a common templates dir
 | 
			
		||||
    # shipped with the helper.
 | 
			
		||||
    loaders = [FileSystemLoader(templates_dir)]
 | 
			
		||||
    helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
 | 
			
		||||
    if os.path.isdir(helper_templates):
 | 
			
		||||
        loaders.append(FileSystemLoader(helper_templates))
 | 
			
		||||
 | 
			
		||||
    for rel, tmpl_dir in tmpl_dirs:
 | 
			
		||||
        if os.path.isdir(tmpl_dir):
 | 
			
		||||
            loaders.insert(0, FileSystemLoader(tmpl_dir))
 | 
			
		||||
        if rel == os_release:
 | 
			
		||||
            break
 | 
			
		||||
    log('Creating choice loader with dirs: %s' %
 | 
			
		||||
        [l.searchpath for l in loaders], level=INFO)
 | 
			
		||||
    return ChoiceLoader(loaders)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OSConfigTemplate(object):
 | 
			
		||||
    """
 | 
			
		||||
    Associates a config file template with a list of context generators.
 | 
			
		||||
    Responsible for constructing a template context based on those generators.
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, config_file, contexts):
 | 
			
		||||
        self.config_file = config_file
 | 
			
		||||
 | 
			
		||||
        if hasattr(contexts, '__call__'):
 | 
			
		||||
            self.contexts = [contexts]
 | 
			
		||||
        else:
 | 
			
		||||
            self.contexts = contexts
 | 
			
		||||
 | 
			
		||||
        self._complete_contexts = []
 | 
			
		||||
 | 
			
		||||
    def context(self):
 | 
			
		||||
        ctxt = {}
 | 
			
		||||
        for context in self.contexts:
 | 
			
		||||
            _ctxt = context()
 | 
			
		||||
            if _ctxt:
 | 
			
		||||
                ctxt.update(_ctxt)
 | 
			
		||||
                # track interfaces for every complete context.
 | 
			
		||||
                [self._complete_contexts.append(interface)
 | 
			
		||||
                 for interface in context.interfaces
 | 
			
		||||
                 if interface not in self._complete_contexts]
 | 
			
		||||
        return ctxt
 | 
			
		||||
 | 
			
		||||
    def complete_contexts(self):
 | 
			
		||||
        '''
 | 
			
		||||
        Return a list of interfaces that have satisfied contexts.
 | 
			
		||||
        '''
 | 
			
		||||
        if self._complete_contexts:
 | 
			
		||||
            return self._complete_contexts
 | 
			
		||||
        self.context()
 | 
			
		||||
        return self._complete_contexts
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OSConfigRenderer(object):
 | 
			
		||||
    """
 | 
			
		||||
    This class provides a common templating system to be used by OpenStack
 | 
			
		||||
    charms.  It is intended to help charms share common code and templates,
 | 
			
		||||
    and ease the burden of managing config templates across multiple OpenStack
 | 
			
		||||
    releases.
 | 
			
		||||
 | 
			
		||||
    Basic usage::
 | 
			
		||||
 | 
			
		||||
        # import some common context generates from charmhelpers
 | 
			
		||||
        from charmhelpers.contrib.openstack import context
 | 
			
		||||
 | 
			
		||||
        # Create a renderer object for a specific OS release.
 | 
			
		||||
        configs = OSConfigRenderer(templates_dir='/tmp/templates',
 | 
			
		||||
                                   openstack_release='folsom')
 | 
			
		||||
        # register some config files with context generators.
 | 
			
		||||
        configs.register(config_file='/etc/nova/nova.conf',
 | 
			
		||||
                         contexts=[context.SharedDBContext(),
 | 
			
		||||
                                   context.AMQPContext()])
 | 
			
		||||
        configs.register(config_file='/etc/nova/api-paste.ini',
 | 
			
		||||
                         contexts=[context.IdentityServiceContext()])
 | 
			
		||||
        configs.register(config_file='/etc/haproxy/haproxy.conf',
 | 
			
		||||
                         contexts=[context.HAProxyContext()])
 | 
			
		||||
        # write out a single config
 | 
			
		||||
        configs.write('/etc/nova/nova.conf')
 | 
			
		||||
        # write out all registered configs
 | 
			
		||||
        configs.write_all()
 | 
			
		||||
 | 
			
		||||
    **OpenStack Releases and template loading**
 | 
			
		||||
 | 
			
		||||
    When the object is instantiated, it is associated with a specific OS
 | 
			
		||||
    release.  This dictates how the template loader will be constructed.
 | 
			
		||||
 | 
			
		||||
    The constructed loader attempts to load the template from several places
 | 
			
		||||
    in the following order:
 | 
			
		||||
    - from the most recent OS release-specific template dir (if one exists)
 | 
			
		||||
    - the base templates_dir
 | 
			
		||||
    - a template directory shipped in the charm with this helper file.
 | 
			
		||||
 | 
			
		||||
    For the example above, '/tmp/templates' contains the following structure::
 | 
			
		||||
 | 
			
		||||
        /tmp/templates/nova.conf
 | 
			
		||||
        /tmp/templates/api-paste.ini
 | 
			
		||||
        /tmp/templates/grizzly/api-paste.ini
 | 
			
		||||
        /tmp/templates/havana/api-paste.ini
 | 
			
		||||
 | 
			
		||||
    Since it was registered with the grizzly release, it first seraches
 | 
			
		||||
    the grizzly directory for nova.conf, then the templates dir.
 | 
			
		||||
 | 
			
		||||
    When writing api-paste.ini, it will find the template in the grizzly
 | 
			
		||||
    directory.
 | 
			
		||||
 | 
			
		||||
    If the object were created with folsom, it would fall back to the
 | 
			
		||||
    base templates dir for its api-paste.ini template.
 | 
			
		||||
 | 
			
		||||
    This system should help manage changes in config files through
 | 
			
		||||
    openstack releases, allowing charms to fall back to the most recently
 | 
			
		||||
    updated config template for a given release
 | 
			
		||||
 | 
			
		||||
    The haproxy.conf, since it is not shipped in the templates dir, will
 | 
			
		||||
    be loaded from the module directory's template directory, eg
 | 
			
		||||
    $CHARM/hooks/charmhelpers/contrib/openstack/templates.  This allows
 | 
			
		||||
    us to ship common templates (haproxy, apache) with the helpers.
 | 
			
		||||
 | 
			
		||||
    **Context generators**
 | 
			
		||||
 | 
			
		||||
    Context generators are used to generate template contexts during hook
 | 
			
		||||
    execution.  Doing so may require inspecting service relations, charm
 | 
			
		||||
    config, etc.  When registered, a config file is associated with a list
 | 
			
		||||
    of generators.  When a template is rendered and written, all context
 | 
			
		||||
    generates are called in a chain to generate the context dictionary
 | 
			
		||||
    passed to the jinja2 template. See context.py for more info.
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, templates_dir, openstack_release):
 | 
			
		||||
        if not os.path.isdir(templates_dir):
 | 
			
		||||
            log('Could not locate templates dir %s' % templates_dir,
 | 
			
		||||
                level=ERROR)
 | 
			
		||||
            raise OSConfigException
 | 
			
		||||
 | 
			
		||||
        self.templates_dir = templates_dir
 | 
			
		||||
        self.openstack_release = openstack_release
 | 
			
		||||
        self.templates = {}
 | 
			
		||||
        self._tmpl_env = None
 | 
			
		||||
 | 
			
		||||
        if None in [Environment, ChoiceLoader, FileSystemLoader]:
 | 
			
		||||
            # if this code is running, the object is created pre-install hook.
 | 
			
		||||
            # jinja2 shouldn't get touched until the module is reloaded on next
 | 
			
		||||
            # hook execution, with proper jinja2 bits successfully imported.
 | 
			
		||||
            apt_install('python-jinja2')
 | 
			
		||||
 | 
			
		||||
    def register(self, config_file, contexts):
 | 
			
		||||
        """
 | 
			
		||||
        Register a config file with a list of context generators to be called
 | 
			
		||||
        during rendering.
 | 
			
		||||
        """
 | 
			
		||||
        self.templates[config_file] = OSConfigTemplate(config_file=config_file,
 | 
			
		||||
                                                       contexts=contexts)
 | 
			
		||||
        log('Registered config file: %s' % config_file, level=INFO)
 | 
			
		||||
 | 
			
		||||
    def _get_tmpl_env(self):
 | 
			
		||||
        if not self._tmpl_env:
 | 
			
		||||
            loader = get_loader(self.templates_dir, self.openstack_release)
 | 
			
		||||
            self._tmpl_env = Environment(loader=loader)
 | 
			
		||||
 | 
			
		||||
    def _get_template(self, template):
 | 
			
		||||
        self._get_tmpl_env()
 | 
			
		||||
        template = self._tmpl_env.get_template(template)
 | 
			
		||||
        log('Loaded template from %s' % template.filename, level=INFO)
 | 
			
		||||
        return template
 | 
			
		||||
 | 
			
		||||
    def render(self, config_file):
 | 
			
		||||
        if config_file not in self.templates:
 | 
			
		||||
            log('Config not registered: %s' % config_file, level=ERROR)
 | 
			
		||||
            raise OSConfigException
 | 
			
		||||
        ctxt = self.templates[config_file].context()
 | 
			
		||||
 | 
			
		||||
        _tmpl = os.path.basename(config_file)
 | 
			
		||||
        try:
 | 
			
		||||
            template = self._get_template(_tmpl)
 | 
			
		||||
        except exceptions.TemplateNotFound:
 | 
			
		||||
            # if no template is found with basename, try looking for it
 | 
			
		||||
            # using a munged full path, eg:
 | 
			
		||||
            #   /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
 | 
			
		||||
            _tmpl = '_'.join(config_file.split('/')[1:])
 | 
			
		||||
            try:
 | 
			
		||||
                template = self._get_template(_tmpl)
 | 
			
		||||
            except exceptions.TemplateNotFound as e:
 | 
			
		||||
                log('Could not load template from %s by %s or %s.' %
 | 
			
		||||
                    (self.templates_dir, os.path.basename(config_file), _tmpl),
 | 
			
		||||
                    level=ERROR)
 | 
			
		||||
                raise e
 | 
			
		||||
 | 
			
		||||
        log('Rendering from template: %s' % _tmpl, level=INFO)
 | 
			
		||||
        return template.render(ctxt)
 | 
			
		||||
 | 
			
		||||
    def write(self, config_file):
 | 
			
		||||
        """
 | 
			
		||||
        Write a single config file, raises if config file is not registered.
 | 
			
		||||
        """
 | 
			
		||||
        if config_file not in self.templates:
 | 
			
		||||
            log('Config not registered: %s' % config_file, level=ERROR)
 | 
			
		||||
            raise OSConfigException
 | 
			
		||||
 | 
			
		||||
        _out = self.render(config_file)
 | 
			
		||||
 | 
			
		||||
        with open(config_file, 'wb') as out:
 | 
			
		||||
            out.write(_out)
 | 
			
		||||
 | 
			
		||||
        log('Wrote template %s.' % config_file, level=INFO)
 | 
			
		||||
 | 
			
		||||
    def write_all(self):
 | 
			
		||||
        """
 | 
			
		||||
        Write out all registered config files.
 | 
			
		||||
        """
 | 
			
		||||
        [self.write(k) for k in six.iterkeys(self.templates)]
 | 
			
		||||
 | 
			
		||||
    def set_release(self, openstack_release):
 | 
			
		||||
        """
 | 
			
		||||
        Resets the template environment and generates a new template loader
 | 
			
		||||
        based on a the new openstack release.
 | 
			
		||||
        """
 | 
			
		||||
        self._tmpl_env = None
 | 
			
		||||
        self.openstack_release = openstack_release
 | 
			
		||||
        self._get_tmpl_env()
 | 
			
		||||
 | 
			
		||||
    def complete_contexts(self):
 | 
			
		||||
        '''
 | 
			
		||||
        Returns a list of context interfaces that yield a complete context.
 | 
			
		||||
        '''
 | 
			
		||||
        interfaces = []
 | 
			
		||||
        [interfaces.extend(i.complete_contexts())
 | 
			
		||||
         for i in six.itervalues(self.templates)]
 | 
			
		||||
        return interfaces
 | 
			
		||||
 | 
			
		||||
    def get_incomplete_context_data(self, interfaces):
 | 
			
		||||
        '''
 | 
			
		||||
        Return dictionary of relation status of interfaces and any missing
 | 
			
		||||
        required context data. Example:
 | 
			
		||||
            {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
 | 
			
		||||
             'zeromq-configuration': {'related': False}}
 | 
			
		||||
        '''
 | 
			
		||||
        incomplete_context_data = {}
 | 
			
		||||
 | 
			
		||||
        for i in six.itervalues(self.templates):
 | 
			
		||||
            for context in i.contexts:
 | 
			
		||||
                for interface in interfaces:
 | 
			
		||||
                    related = False
 | 
			
		||||
                    if interface in context.interfaces:
 | 
			
		||||
                        related = context.get_related()
 | 
			
		||||
                        missing_data = context.missing_data
 | 
			
		||||
                        if missing_data:
 | 
			
		||||
                            incomplete_context_data[interface] = {'missing_data': missing_data}
 | 
			
		||||
                        if related:
 | 
			
		||||
                            if incomplete_context_data.get(interface):
 | 
			
		||||
                                incomplete_context_data[interface].update({'related': True})
 | 
			
		||||
                            else:
 | 
			
		||||
                                incomplete_context_data[interface] = {'related': True}
 | 
			
		||||
                        else:
 | 
			
		||||
                            incomplete_context_data[interface] = {'related': False}
 | 
			
		||||
        return incomplete_context_data
 | 
			
		||||
							
								
								
									
										977
									
								
								hooks/charmhelpers/contrib/openstack/utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										977
									
								
								hooks/charmhelpers/contrib/openstack/utils.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,977 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
# Common python helper functions used for OpenStack charms.
 | 
			
		||||
from collections import OrderedDict
 | 
			
		||||
from functools import wraps
 | 
			
		||||
 | 
			
		||||
import subprocess
 | 
			
		||||
import json
 | 
			
		||||
import os
 | 
			
		||||
import sys
 | 
			
		||||
import re
 | 
			
		||||
 | 
			
		||||
import six
 | 
			
		||||
import traceback
 | 
			
		||||
import yaml
 | 
			
		||||
 | 
			
		||||
from charmhelpers.contrib.network import ip
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core import (
 | 
			
		||||
    unitdata,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    action_fail,
 | 
			
		||||
    action_set,
 | 
			
		||||
    config,
 | 
			
		||||
    log as juju_log,
 | 
			
		||||
    charm_dir,
 | 
			
		||||
    INFO,
 | 
			
		||||
    relation_ids,
 | 
			
		||||
    relation_set,
 | 
			
		||||
    status_set,
 | 
			
		||||
    hook_name
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.contrib.storage.linux.lvm import (
 | 
			
		||||
    deactivate_lvm_volume_group,
 | 
			
		||||
    is_lvm_physical_volume,
 | 
			
		||||
    remove_lvm_physical_volume,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.contrib.network.ip import (
 | 
			
		||||
    get_ipv6_addr,
 | 
			
		||||
    is_ipv6,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.contrib.python.packages import (
 | 
			
		||||
    pip_create_virtualenv,
 | 
			
		||||
    pip_install,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.host import lsb_release, mounts, umount
 | 
			
		||||
from charmhelpers.fetch import apt_install, apt_cache, install_remote
 | 
			
		||||
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
 | 
			
		||||
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
 | 
			
		||||
 | 
			
		||||
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
 | 
			
		||||
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
 | 
			
		||||
 | 
			
		||||
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
 | 
			
		||||
                   'restricted main multiverse universe')
 | 
			
		||||
 | 
			
		||||
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
 | 
			
		||||
    ('oneiric', 'diablo'),
 | 
			
		||||
    ('precise', 'essex'),
 | 
			
		||||
    ('quantal', 'folsom'),
 | 
			
		||||
    ('raring', 'grizzly'),
 | 
			
		||||
    ('saucy', 'havana'),
 | 
			
		||||
    ('trusty', 'icehouse'),
 | 
			
		||||
    ('utopic', 'juno'),
 | 
			
		||||
    ('vivid', 'kilo'),
 | 
			
		||||
    ('wily', 'liberty'),
 | 
			
		||||
])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
OPENSTACK_CODENAMES = OrderedDict([
 | 
			
		||||
    ('2011.2', 'diablo'),
 | 
			
		||||
    ('2012.1', 'essex'),
 | 
			
		||||
    ('2012.2', 'folsom'),
 | 
			
		||||
    ('2013.1', 'grizzly'),
 | 
			
		||||
    ('2013.2', 'havana'),
 | 
			
		||||
    ('2014.1', 'icehouse'),
 | 
			
		||||
    ('2014.2', 'juno'),
 | 
			
		||||
    ('2015.1', 'kilo'),
 | 
			
		||||
    ('2015.2', 'liberty'),
 | 
			
		||||
])
 | 
			
		||||
 | 
			
		||||
# The ugly duckling
 | 
			
		||||
SWIFT_CODENAMES = OrderedDict([
 | 
			
		||||
    ('1.4.3', 'diablo'),
 | 
			
		||||
    ('1.4.8', 'essex'),
 | 
			
		||||
    ('1.7.4', 'folsom'),
 | 
			
		||||
    ('1.8.0', 'grizzly'),
 | 
			
		||||
    ('1.7.7', 'grizzly'),
 | 
			
		||||
    ('1.7.6', 'grizzly'),
 | 
			
		||||
    ('1.10.0', 'havana'),
 | 
			
		||||
    ('1.9.1', 'havana'),
 | 
			
		||||
    ('1.9.0', 'havana'),
 | 
			
		||||
    ('1.13.1', 'icehouse'),
 | 
			
		||||
    ('1.13.0', 'icehouse'),
 | 
			
		||||
    ('1.12.0', 'icehouse'),
 | 
			
		||||
    ('1.11.0', 'icehouse'),
 | 
			
		||||
    ('2.0.0', 'juno'),
 | 
			
		||||
    ('2.1.0', 'juno'),
 | 
			
		||||
    ('2.2.0', 'juno'),
 | 
			
		||||
    ('2.2.1', 'kilo'),
 | 
			
		||||
    ('2.2.2', 'kilo'),
 | 
			
		||||
    ('2.3.0', 'liberty'),
 | 
			
		||||
    ('2.4.0', 'liberty'),
 | 
			
		||||
])
 | 
			
		||||
 | 
			
		||||
# >= Liberty version->codename mapping
 | 
			
		||||
PACKAGE_CODENAMES = {
 | 
			
		||||
    'nova-common': OrderedDict([
 | 
			
		||||
        ('12.0.0', 'liberty'),
 | 
			
		||||
    ]),
 | 
			
		||||
    'neutron-common': OrderedDict([
 | 
			
		||||
        ('7.0.0', 'liberty'),
 | 
			
		||||
    ]),
 | 
			
		||||
    'cinder-common': OrderedDict([
 | 
			
		||||
        ('7.0.0', 'liberty'),
 | 
			
		||||
    ]),
 | 
			
		||||
    'keystone': OrderedDict([
 | 
			
		||||
        ('8.0.0', 'liberty'),
 | 
			
		||||
    ]),
 | 
			
		||||
    'horizon-common': OrderedDict([
 | 
			
		||||
        ('8.0.0', 'liberty'),
 | 
			
		||||
    ]),
 | 
			
		||||
    'ceilometer-common': OrderedDict([
 | 
			
		||||
        ('5.0.0', 'liberty'),
 | 
			
		||||
    ]),
 | 
			
		||||
    'heat-common': OrderedDict([
 | 
			
		||||
        ('5.0.0', 'liberty'),
 | 
			
		||||
    ]),
 | 
			
		||||
    'glance-common': OrderedDict([
 | 
			
		||||
        ('11.0.0', 'liberty'),
 | 
			
		||||
    ]),
 | 
			
		||||
    'openstack-dashboard': OrderedDict([
 | 
			
		||||
        ('8.0.0', 'liberty'),
 | 
			
		||||
    ]),
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
DEFAULT_LOOPBACK_SIZE = '5G'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def error_out(msg):
 | 
			
		||||
    juju_log("FATAL ERROR: %s" % msg, level='ERROR')
 | 
			
		||||
    sys.exit(1)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_os_codename_install_source(src):
 | 
			
		||||
    '''Derive OpenStack release codename from a given installation source.'''
 | 
			
		||||
    ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
 | 
			
		||||
    rel = ''
 | 
			
		||||
    if src is None:
 | 
			
		||||
        return rel
 | 
			
		||||
    if src in ['distro', 'distro-proposed']:
 | 
			
		||||
        try:
 | 
			
		||||
            rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
 | 
			
		||||
        except KeyError:
 | 
			
		||||
            e = 'Could not derive openstack release for '\
 | 
			
		||||
                'this Ubuntu release: %s' % ubuntu_rel
 | 
			
		||||
            error_out(e)
 | 
			
		||||
        return rel
 | 
			
		||||
 | 
			
		||||
    if src.startswith('cloud:'):
 | 
			
		||||
        ca_rel = src.split(':')[1]
 | 
			
		||||
        ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
 | 
			
		||||
        return ca_rel
 | 
			
		||||
 | 
			
		||||
    # Best guess match based on deb string provided
 | 
			
		||||
    if src.startswith('deb') or src.startswith('ppa'):
 | 
			
		||||
        for k, v in six.iteritems(OPENSTACK_CODENAMES):
 | 
			
		||||
            if v in src:
 | 
			
		||||
                return v
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_os_version_install_source(src):
 | 
			
		||||
    codename = get_os_codename_install_source(src)
 | 
			
		||||
    return get_os_version_codename(codename)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_os_codename_version(vers):
 | 
			
		||||
    '''Determine OpenStack codename from version number.'''
 | 
			
		||||
    try:
 | 
			
		||||
        return OPENSTACK_CODENAMES[vers]
 | 
			
		||||
    except KeyError:
 | 
			
		||||
        e = 'Could not determine OpenStack codename for version %s' % vers
 | 
			
		||||
        error_out(e)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
 | 
			
		||||
    '''Determine OpenStack version number from codename.'''
 | 
			
		||||
    for k, v in six.iteritems(version_map):
 | 
			
		||||
        if v == codename:
 | 
			
		||||
            return k
 | 
			
		||||
    e = 'Could not derive OpenStack version for '\
 | 
			
		||||
        'codename: %s' % codename
 | 
			
		||||
    error_out(e)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_os_codename_package(package, fatal=True):
 | 
			
		||||
    '''Derive OpenStack release codename from an installed package.'''
 | 
			
		||||
    import apt_pkg as apt
 | 
			
		||||
 | 
			
		||||
    cache = apt_cache()
 | 
			
		||||
 | 
			
		||||
    try:
 | 
			
		||||
        pkg = cache[package]
 | 
			
		||||
    except:
 | 
			
		||||
        if not fatal:
 | 
			
		||||
            return None
 | 
			
		||||
        # the package is unknown to the current apt cache.
 | 
			
		||||
        e = 'Could not determine version of package with no installation '\
 | 
			
		||||
            'candidate: %s' % package
 | 
			
		||||
        error_out(e)
 | 
			
		||||
 | 
			
		||||
    if not pkg.current_ver:
 | 
			
		||||
        if not fatal:
 | 
			
		||||
            return None
 | 
			
		||||
        # package is known, but no version is currently installed.
 | 
			
		||||
        e = 'Could not determine version of uninstalled package: %s' % package
 | 
			
		||||
        error_out(e)
 | 
			
		||||
 | 
			
		||||
    vers = apt.upstream_version(pkg.current_ver.ver_str)
 | 
			
		||||
    match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
 | 
			
		||||
    if match:
 | 
			
		||||
        vers = match.group(0)
 | 
			
		||||
 | 
			
		||||
    # >= Liberty independent project versions
 | 
			
		||||
    if (package in PACKAGE_CODENAMES and
 | 
			
		||||
            vers in PACKAGE_CODENAMES[package]):
 | 
			
		||||
        return PACKAGE_CODENAMES[package][vers]
 | 
			
		||||
    else:
 | 
			
		||||
        # < Liberty co-ordinated project versions
 | 
			
		||||
        try:
 | 
			
		||||
            if 'swift' in pkg.name:
 | 
			
		||||
                swift_vers = vers[:5]
 | 
			
		||||
                if swift_vers not in SWIFT_CODENAMES:
 | 
			
		||||
                    # Deal with 1.10.0 upward
 | 
			
		||||
                    swift_vers = vers[:6]
 | 
			
		||||
                return SWIFT_CODENAMES[swift_vers]
 | 
			
		||||
            else:
 | 
			
		||||
                vers = vers[:6]
 | 
			
		||||
                return OPENSTACK_CODENAMES[vers]
 | 
			
		||||
        except KeyError:
 | 
			
		||||
            if not fatal:
 | 
			
		||||
                return None
 | 
			
		||||
            e = 'Could not determine OpenStack codename for version %s' % vers
 | 
			
		||||
            error_out(e)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_os_version_package(pkg, fatal=True):
 | 
			
		||||
    '''Derive OpenStack version number from an installed package.'''
 | 
			
		||||
    codename = get_os_codename_package(pkg, fatal=fatal)
 | 
			
		||||
 | 
			
		||||
    if not codename:
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
    if 'swift' in pkg:
 | 
			
		||||
        vers_map = SWIFT_CODENAMES
 | 
			
		||||
    else:
 | 
			
		||||
        vers_map = OPENSTACK_CODENAMES
 | 
			
		||||
 | 
			
		||||
    for version, cname in six.iteritems(vers_map):
 | 
			
		||||
        if cname == codename:
 | 
			
		||||
            return version
 | 
			
		||||
    # e = "Could not determine OpenStack version for package: %s" % pkg
 | 
			
		||||
    # error_out(e)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
os_rel = None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def os_release(package, base='essex'):
 | 
			
		||||
    '''
 | 
			
		||||
    Returns OpenStack release codename from a cached global.
 | 
			
		||||
    If the codename can not be determined from either an installed package or
 | 
			
		||||
    the installation source, the earliest release supported by the charm should
 | 
			
		||||
    be returned.
 | 
			
		||||
    '''
 | 
			
		||||
    global os_rel
 | 
			
		||||
    if os_rel:
 | 
			
		||||
        return os_rel
 | 
			
		||||
    os_rel = (get_os_codename_package(package, fatal=False) or
 | 
			
		||||
              get_os_codename_install_source(config('openstack-origin')) or
 | 
			
		||||
              base)
 | 
			
		||||
    return os_rel
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def import_key(keyid):
 | 
			
		||||
    cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
 | 
			
		||||
          "--recv-keys %s" % keyid
 | 
			
		||||
    try:
 | 
			
		||||
        subprocess.check_call(cmd.split(' '))
 | 
			
		||||
    except subprocess.CalledProcessError:
 | 
			
		||||
        error_out("Error importing repo key %s" % keyid)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def configure_installation_source(rel):
 | 
			
		||||
    '''Configure apt installation source.'''
 | 
			
		||||
    if rel == 'distro':
 | 
			
		||||
        return
 | 
			
		||||
    elif rel == 'distro-proposed':
 | 
			
		||||
        ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
 | 
			
		||||
        with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
 | 
			
		||||
            f.write(DISTRO_PROPOSED % ubuntu_rel)
 | 
			
		||||
    elif rel[:4] == "ppa:":
 | 
			
		||||
        src = rel
 | 
			
		||||
        subprocess.check_call(["add-apt-repository", "-y", src])
 | 
			
		||||
    elif rel[:3] == "deb":
 | 
			
		||||
        l = len(rel.split('|'))
 | 
			
		||||
        if l == 2:
 | 
			
		||||
            src, key = rel.split('|')
 | 
			
		||||
            juju_log("Importing PPA key from keyserver for %s" % src)
 | 
			
		||||
            import_key(key)
 | 
			
		||||
        elif l == 1:
 | 
			
		||||
            src = rel
 | 
			
		||||
        with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
 | 
			
		||||
            f.write(src)
 | 
			
		||||
    elif rel[:6] == 'cloud:':
 | 
			
		||||
        ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
 | 
			
		||||
        rel = rel.split(':')[1]
 | 
			
		||||
        u_rel = rel.split('-')[0]
 | 
			
		||||
        ca_rel = rel.split('-')[1]
 | 
			
		||||
 | 
			
		||||
        if u_rel != ubuntu_rel:
 | 
			
		||||
            e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
 | 
			
		||||
                'version (%s)' % (ca_rel, ubuntu_rel)
 | 
			
		||||
            error_out(e)
 | 
			
		||||
 | 
			
		||||
        if 'staging' in ca_rel:
 | 
			
		||||
            # staging is just a regular PPA.
 | 
			
		||||
            os_rel = ca_rel.split('/')[0]
 | 
			
		||||
            ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
 | 
			
		||||
            cmd = 'add-apt-repository -y %s' % ppa
 | 
			
		||||
            subprocess.check_call(cmd.split(' '))
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        # map charm config options to actual archive pockets.
 | 
			
		||||
        pockets = {
 | 
			
		||||
            'folsom': 'precise-updates/folsom',
 | 
			
		||||
            'folsom/updates': 'precise-updates/folsom',
 | 
			
		||||
            'folsom/proposed': 'precise-proposed/folsom',
 | 
			
		||||
            'grizzly': 'precise-updates/grizzly',
 | 
			
		||||
            'grizzly/updates': 'precise-updates/grizzly',
 | 
			
		||||
            'grizzly/proposed': 'precise-proposed/grizzly',
 | 
			
		||||
            'havana': 'precise-updates/havana',
 | 
			
		||||
            'havana/updates': 'precise-updates/havana',
 | 
			
		||||
            'havana/proposed': 'precise-proposed/havana',
 | 
			
		||||
            'icehouse': 'precise-updates/icehouse',
 | 
			
		||||
            'icehouse/updates': 'precise-updates/icehouse',
 | 
			
		||||
            'icehouse/proposed': 'precise-proposed/icehouse',
 | 
			
		||||
            'juno': 'trusty-updates/juno',
 | 
			
		||||
            'juno/updates': 'trusty-updates/juno',
 | 
			
		||||
            'juno/proposed': 'trusty-proposed/juno',
 | 
			
		||||
            'kilo': 'trusty-updates/kilo',
 | 
			
		||||
            'kilo/updates': 'trusty-updates/kilo',
 | 
			
		||||
            'kilo/proposed': 'trusty-proposed/kilo',
 | 
			
		||||
            'liberty': 'trusty-updates/liberty',
 | 
			
		||||
            'liberty/updates': 'trusty-updates/liberty',
 | 
			
		||||
            'liberty/proposed': 'trusty-proposed/liberty',
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        try:
 | 
			
		||||
            pocket = pockets[ca_rel]
 | 
			
		||||
        except KeyError:
 | 
			
		||||
            e = 'Invalid Cloud Archive release specified: %s' % rel
 | 
			
		||||
            error_out(e)
 | 
			
		||||
 | 
			
		||||
        src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
 | 
			
		||||
        apt_install('ubuntu-cloud-keyring', fatal=True)
 | 
			
		||||
 | 
			
		||||
        with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
 | 
			
		||||
            f.write(src)
 | 
			
		||||
    else:
 | 
			
		||||
        error_out("Invalid openstack-release specified: %s" % rel)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def config_value_changed(option):
 | 
			
		||||
    """
 | 
			
		||||
    Determine if config value changed since last call to this function.
 | 
			
		||||
    """
 | 
			
		||||
    hook_data = unitdata.HookData()
 | 
			
		||||
    with hook_data():
 | 
			
		||||
        db = unitdata.kv()
 | 
			
		||||
        current = config(option)
 | 
			
		||||
        saved = db.get(option)
 | 
			
		||||
        db.set(option, current)
 | 
			
		||||
        if saved is None:
 | 
			
		||||
            return False
 | 
			
		||||
        return current != saved
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
 | 
			
		||||
    """
 | 
			
		||||
    Write an rc file in the charm-delivered directory containing
 | 
			
		||||
    exported environment variables provided by env_vars. Any charm scripts run
 | 
			
		||||
    outside the juju hook environment can source this scriptrc to obtain
 | 
			
		||||
    updated config information necessary to perform health checks or
 | 
			
		||||
    service changes.
 | 
			
		||||
    """
 | 
			
		||||
    juju_rc_path = "%s/%s" % (charm_dir(), script_path)
 | 
			
		||||
    if not os.path.exists(os.path.dirname(juju_rc_path)):
 | 
			
		||||
        os.mkdir(os.path.dirname(juju_rc_path))
 | 
			
		||||
    with open(juju_rc_path, 'wb') as rc_script:
 | 
			
		||||
        rc_script.write(
 | 
			
		||||
            "#!/bin/bash\n")
 | 
			
		||||
        [rc_script.write('export %s=%s\n' % (u, p))
 | 
			
		||||
         for u, p in six.iteritems(env_vars) if u != "script_path"]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def openstack_upgrade_available(package):
 | 
			
		||||
    """
 | 
			
		||||
    Determines if an OpenStack upgrade is available from installation
 | 
			
		||||
    source, based on version of installed package.
 | 
			
		||||
 | 
			
		||||
    :param package: str: Name of installed package.
 | 
			
		||||
 | 
			
		||||
    :returns: bool:    : Returns True if configured installation source offers
 | 
			
		||||
                         a newer version of package.
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    import apt_pkg as apt
 | 
			
		||||
    src = config('openstack-origin')
 | 
			
		||||
    cur_vers = get_os_version_package(package)
 | 
			
		||||
    if "swift" in package:
 | 
			
		||||
        codename = get_os_codename_install_source(src)
 | 
			
		||||
        available_vers = get_os_version_codename(codename, SWIFT_CODENAMES)
 | 
			
		||||
    else:
 | 
			
		||||
        available_vers = get_os_version_install_source(src)
 | 
			
		||||
    apt.init()
 | 
			
		||||
    return apt.version_compare(available_vers, cur_vers) == 1
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ensure_block_device(block_device):
 | 
			
		||||
    '''
 | 
			
		||||
    Confirm block_device, create as loopback if necessary.
 | 
			
		||||
 | 
			
		||||
    :param block_device: str: Full path of block device to ensure.
 | 
			
		||||
 | 
			
		||||
    :returns: str: Full path of ensured block device.
 | 
			
		||||
    '''
 | 
			
		||||
    _none = ['None', 'none', None]
 | 
			
		||||
    if (block_device in _none):
 | 
			
		||||
        error_out('prepare_storage(): Missing required input: block_device=%s.'
 | 
			
		||||
                  % block_device)
 | 
			
		||||
 | 
			
		||||
    if block_device.startswith('/dev/'):
 | 
			
		||||
        bdev = block_device
 | 
			
		||||
    elif block_device.startswith('/'):
 | 
			
		||||
        _bd = block_device.split('|')
 | 
			
		||||
        if len(_bd) == 2:
 | 
			
		||||
            bdev, size = _bd
 | 
			
		||||
        else:
 | 
			
		||||
            bdev = block_device
 | 
			
		||||
            size = DEFAULT_LOOPBACK_SIZE
 | 
			
		||||
        bdev = ensure_loopback_device(bdev, size)
 | 
			
		||||
    else:
 | 
			
		||||
        bdev = '/dev/%s' % block_device
 | 
			
		||||
 | 
			
		||||
    if not is_block_device(bdev):
 | 
			
		||||
        error_out('Failed to locate valid block device at %s' % bdev)
 | 
			
		||||
 | 
			
		||||
    return bdev
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def clean_storage(block_device):
 | 
			
		||||
    '''
 | 
			
		||||
    Ensures a block device is clean.  That is:
 | 
			
		||||
        - unmounted
 | 
			
		||||
        - any lvm volume groups are deactivated
 | 
			
		||||
        - any lvm physical device signatures removed
 | 
			
		||||
        - partition table wiped
 | 
			
		||||
 | 
			
		||||
    :param block_device: str: Full path to block device to clean.
 | 
			
		||||
    '''
 | 
			
		||||
    for mp, d in mounts():
 | 
			
		||||
        if d == block_device:
 | 
			
		||||
            juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
 | 
			
		||||
                     (d, mp), level=INFO)
 | 
			
		||||
            umount(mp, persist=True)
 | 
			
		||||
 | 
			
		||||
    if is_lvm_physical_volume(block_device):
 | 
			
		||||
        deactivate_lvm_volume_group(block_device)
 | 
			
		||||
        remove_lvm_physical_volume(block_device)
 | 
			
		||||
    else:
 | 
			
		||||
        zap_disk(block_device)
 | 
			
		||||
 | 
			
		||||
is_ip = ip.is_ip
 | 
			
		||||
ns_query = ip.ns_query
 | 
			
		||||
get_host_ip = ip.get_host_ip
 | 
			
		||||
get_hostname = ip.get_hostname
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
 | 
			
		||||
    mm_map = {}
 | 
			
		||||
    if os.path.isfile(mm_file):
 | 
			
		||||
        with open(mm_file, 'r') as f:
 | 
			
		||||
            mm_map = json.load(f)
 | 
			
		||||
    return mm_map
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def sync_db_with_multi_ipv6_addresses(database, database_user,
 | 
			
		||||
                                      relation_prefix=None):
 | 
			
		||||
    hosts = get_ipv6_addr(dynamic_only=False)
 | 
			
		||||
 | 
			
		||||
    if config('vip'):
 | 
			
		||||
        vips = config('vip').split()
 | 
			
		||||
        for vip in vips:
 | 
			
		||||
            if vip and is_ipv6(vip):
 | 
			
		||||
                hosts.append(vip)
 | 
			
		||||
 | 
			
		||||
    kwargs = {'database': database,
 | 
			
		||||
              'username': database_user,
 | 
			
		||||
              'hostname': json.dumps(hosts)}
 | 
			
		||||
 | 
			
		||||
    if relation_prefix:
 | 
			
		||||
        for key in list(kwargs.keys()):
 | 
			
		||||
            kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
 | 
			
		||||
            del kwargs[key]
 | 
			
		||||
 | 
			
		||||
    for rid in relation_ids('shared-db'):
 | 
			
		||||
        relation_set(relation_id=rid, **kwargs)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def os_requires_version(ostack_release, pkg):
 | 
			
		||||
    """
 | 
			
		||||
    Decorator for hook to specify minimum supported release
 | 
			
		||||
    """
 | 
			
		||||
    def wrap(f):
 | 
			
		||||
        @wraps(f)
 | 
			
		||||
        def wrapped_f(*args):
 | 
			
		||||
            if os_release(pkg) < ostack_release:
 | 
			
		||||
                raise Exception("This hook is not supported on releases"
 | 
			
		||||
                                " before %s" % ostack_release)
 | 
			
		||||
            f(*args)
 | 
			
		||||
        return wrapped_f
 | 
			
		||||
    return wrap
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def git_install_requested():
 | 
			
		||||
    """
 | 
			
		||||
    Returns true if openstack-origin-git is specified.
 | 
			
		||||
    """
 | 
			
		||||
    return config('openstack-origin-git') is not None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
requirements_dir = None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _git_yaml_load(projects_yaml):
 | 
			
		||||
    """
 | 
			
		||||
    Load the specified yaml into a dictionary.
 | 
			
		||||
    """
 | 
			
		||||
    if not projects_yaml:
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
    return yaml.load(projects_yaml)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def git_clone_and_install(projects_yaml, core_project, depth=1):
 | 
			
		||||
    """
 | 
			
		||||
    Clone/install all specified OpenStack repositories.
 | 
			
		||||
 | 
			
		||||
    The expected format of projects_yaml is:
 | 
			
		||||
 | 
			
		||||
        repositories:
 | 
			
		||||
          - {name: keystone,
 | 
			
		||||
             repository: 'git://git.openstack.org/openstack/keystone.git',
 | 
			
		||||
             branch: 'stable/icehouse'}
 | 
			
		||||
          - {name: requirements,
 | 
			
		||||
             repository: 'git://git.openstack.org/openstack/requirements.git',
 | 
			
		||||
             branch: 'stable/icehouse'}
 | 
			
		||||
 | 
			
		||||
        directory: /mnt/openstack-git
 | 
			
		||||
        http_proxy: squid-proxy-url
 | 
			
		||||
        https_proxy: squid-proxy-url
 | 
			
		||||
 | 
			
		||||
    The directory, http_proxy, and https_proxy keys are optional.
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    global requirements_dir
 | 
			
		||||
    parent_dir = '/mnt/openstack-git'
 | 
			
		||||
    http_proxy = None
 | 
			
		||||
 | 
			
		||||
    projects = _git_yaml_load(projects_yaml)
 | 
			
		||||
    _git_validate_projects_yaml(projects, core_project)
 | 
			
		||||
 | 
			
		||||
    old_environ = dict(os.environ)
 | 
			
		||||
 | 
			
		||||
    if 'http_proxy' in projects.keys():
 | 
			
		||||
        http_proxy = projects['http_proxy']
 | 
			
		||||
        os.environ['http_proxy'] = projects['http_proxy']
 | 
			
		||||
    if 'https_proxy' in projects.keys():
 | 
			
		||||
        os.environ['https_proxy'] = projects['https_proxy']
 | 
			
		||||
 | 
			
		||||
    if 'directory' in projects.keys():
 | 
			
		||||
        parent_dir = projects['directory']
 | 
			
		||||
 | 
			
		||||
    pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
 | 
			
		||||
 | 
			
		||||
    # Upgrade setuptools and pip from default virtualenv versions. The default
 | 
			
		||||
    # versions in trusty break master OpenStack branch deployments.
 | 
			
		||||
    for p in ['pip', 'setuptools']:
 | 
			
		||||
        pip_install(p, upgrade=True, proxy=http_proxy,
 | 
			
		||||
                    venv=os.path.join(parent_dir, 'venv'))
 | 
			
		||||
 | 
			
		||||
    for p in projects['repositories']:
 | 
			
		||||
        repo = p['repository']
 | 
			
		||||
        branch = p['branch']
 | 
			
		||||
        if p['name'] == 'requirements':
 | 
			
		||||
            repo_dir = _git_clone_and_install_single(repo, branch, depth,
 | 
			
		||||
                                                     parent_dir, http_proxy,
 | 
			
		||||
                                                     update_requirements=False)
 | 
			
		||||
            requirements_dir = repo_dir
 | 
			
		||||
        else:
 | 
			
		||||
            repo_dir = _git_clone_and_install_single(repo, branch, depth,
 | 
			
		||||
                                                     parent_dir, http_proxy,
 | 
			
		||||
                                                     update_requirements=True)
 | 
			
		||||
 | 
			
		||||
    os.environ = old_environ
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _git_validate_projects_yaml(projects, core_project):
 | 
			
		||||
    """
 | 
			
		||||
    Validate the projects yaml.
 | 
			
		||||
    """
 | 
			
		||||
    _git_ensure_key_exists('repositories', projects)
 | 
			
		||||
 | 
			
		||||
    for project in projects['repositories']:
 | 
			
		||||
        _git_ensure_key_exists('name', project.keys())
 | 
			
		||||
        _git_ensure_key_exists('repository', project.keys())
 | 
			
		||||
        _git_ensure_key_exists('branch', project.keys())
 | 
			
		||||
 | 
			
		||||
    if projects['repositories'][0]['name'] != 'requirements':
 | 
			
		||||
        error_out('{} git repo must be specified first'.format('requirements'))
 | 
			
		||||
 | 
			
		||||
    if projects['repositories'][-1]['name'] != core_project:
 | 
			
		||||
        error_out('{} git repo must be specified last'.format(core_project))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _git_ensure_key_exists(key, keys):
 | 
			
		||||
    """
 | 
			
		||||
    Ensure that key exists in keys.
 | 
			
		||||
    """
 | 
			
		||||
    if key not in keys:
 | 
			
		||||
        error_out('openstack-origin-git key \'{}\' is missing'.format(key))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
 | 
			
		||||
                                  update_requirements):
 | 
			
		||||
    """
 | 
			
		||||
    Clone and install a single git repository.
 | 
			
		||||
    """
 | 
			
		||||
    dest_dir = os.path.join(parent_dir, os.path.basename(repo))
 | 
			
		||||
 | 
			
		||||
    if not os.path.exists(parent_dir):
 | 
			
		||||
        juju_log('Directory already exists at {}. '
 | 
			
		||||
                 'No need to create directory.'.format(parent_dir))
 | 
			
		||||
        os.mkdir(parent_dir)
 | 
			
		||||
 | 
			
		||||
    if not os.path.exists(dest_dir):
 | 
			
		||||
        juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
 | 
			
		||||
        repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
 | 
			
		||||
                                  depth=depth)
 | 
			
		||||
    else:
 | 
			
		||||
        repo_dir = dest_dir
 | 
			
		||||
 | 
			
		||||
    venv = os.path.join(parent_dir, 'venv')
 | 
			
		||||
 | 
			
		||||
    if update_requirements:
 | 
			
		||||
        if not requirements_dir:
 | 
			
		||||
            error_out('requirements repo must be cloned before '
 | 
			
		||||
                      'updating from global requirements.')
 | 
			
		||||
        _git_update_requirements(venv, repo_dir, requirements_dir)
 | 
			
		||||
 | 
			
		||||
    juju_log('Installing git repo from dir: {}'.format(repo_dir))
 | 
			
		||||
    if http_proxy:
 | 
			
		||||
        pip_install(repo_dir, proxy=http_proxy, venv=venv)
 | 
			
		||||
    else:
 | 
			
		||||
        pip_install(repo_dir, venv=venv)
 | 
			
		||||
 | 
			
		||||
    return repo_dir
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _git_update_requirements(venv, package_dir, reqs_dir):
 | 
			
		||||
    """
 | 
			
		||||
    Update from global requirements.
 | 
			
		||||
 | 
			
		||||
    Update an OpenStack git directory's requirements.txt and
 | 
			
		||||
    test-requirements.txt from global-requirements.txt.
 | 
			
		||||
    """
 | 
			
		||||
    orig_dir = os.getcwd()
 | 
			
		||||
    os.chdir(reqs_dir)
 | 
			
		||||
    python = os.path.join(venv, 'bin/python')
 | 
			
		||||
    cmd = [python, 'update.py', package_dir]
 | 
			
		||||
    try:
 | 
			
		||||
        subprocess.check_call(cmd)
 | 
			
		||||
    except subprocess.CalledProcessError:
 | 
			
		||||
        package = os.path.basename(package_dir)
 | 
			
		||||
        error_out("Error updating {} from "
 | 
			
		||||
                  "global-requirements.txt".format(package))
 | 
			
		||||
    os.chdir(orig_dir)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def git_pip_venv_dir(projects_yaml):
 | 
			
		||||
    """
 | 
			
		||||
    Return the pip virtualenv path.
 | 
			
		||||
    """
 | 
			
		||||
    parent_dir = '/mnt/openstack-git'
 | 
			
		||||
 | 
			
		||||
    projects = _git_yaml_load(projects_yaml)
 | 
			
		||||
 | 
			
		||||
    if 'directory' in projects.keys():
 | 
			
		||||
        parent_dir = projects['directory']
 | 
			
		||||
 | 
			
		||||
    return os.path.join(parent_dir, 'venv')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def git_src_dir(projects_yaml, project):
 | 
			
		||||
    """
 | 
			
		||||
    Return the directory where the specified project's source is located.
 | 
			
		||||
    """
 | 
			
		||||
    parent_dir = '/mnt/openstack-git'
 | 
			
		||||
 | 
			
		||||
    projects = _git_yaml_load(projects_yaml)
 | 
			
		||||
 | 
			
		||||
    if 'directory' in projects.keys():
 | 
			
		||||
        parent_dir = projects['directory']
 | 
			
		||||
 | 
			
		||||
    for p in projects['repositories']:
 | 
			
		||||
        if p['name'] == project:
 | 
			
		||||
            return os.path.join(parent_dir, os.path.basename(p['repository']))
 | 
			
		||||
 | 
			
		||||
    return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def git_yaml_value(projects_yaml, key):
 | 
			
		||||
    """
 | 
			
		||||
    Return the value in projects_yaml for the specified key.
 | 
			
		||||
    """
 | 
			
		||||
    projects = _git_yaml_load(projects_yaml)
 | 
			
		||||
 | 
			
		||||
    if key in projects.keys():
 | 
			
		||||
        return projects[key]
 | 
			
		||||
 | 
			
		||||
    return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def os_workload_status(configs, required_interfaces, charm_func=None):
 | 
			
		||||
    """
 | 
			
		||||
    Decorator to set workload status based on complete contexts
 | 
			
		||||
    """
 | 
			
		||||
    def wrap(f):
 | 
			
		||||
        @wraps(f)
 | 
			
		||||
        def wrapped_f(*args, **kwargs):
 | 
			
		||||
            # Run the original function first
 | 
			
		||||
            f(*args, **kwargs)
 | 
			
		||||
            # Set workload status now that contexts have been
 | 
			
		||||
            # acted on
 | 
			
		||||
            set_os_workload_status(configs, required_interfaces, charm_func)
 | 
			
		||||
        return wrapped_f
 | 
			
		||||
    return wrap
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def set_os_workload_status(configs, required_interfaces, charm_func=None):
 | 
			
		||||
    """
 | 
			
		||||
    Set workload status based on complete contexts.
 | 
			
		||||
    status-set missing or incomplete contexts
 | 
			
		||||
    and juju-log details of missing required data.
 | 
			
		||||
    charm_func is a charm specific function to run checking
 | 
			
		||||
    for charm specific requirements such as a VIP setting.
 | 
			
		||||
    """
 | 
			
		||||
    incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
 | 
			
		||||
    state = 'active'
 | 
			
		||||
    missing_relations = []
 | 
			
		||||
    incomplete_relations = []
 | 
			
		||||
    message = None
 | 
			
		||||
    charm_state = None
 | 
			
		||||
    charm_message = None
 | 
			
		||||
 | 
			
		||||
    for generic_interface in incomplete_rel_data.keys():
 | 
			
		||||
        related_interface = None
 | 
			
		||||
        missing_data = {}
 | 
			
		||||
        # Related or not?
 | 
			
		||||
        for interface in incomplete_rel_data[generic_interface]:
 | 
			
		||||
            if incomplete_rel_data[generic_interface][interface].get('related'):
 | 
			
		||||
                related_interface = interface
 | 
			
		||||
                missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
 | 
			
		||||
        # No relation ID for the generic_interface
 | 
			
		||||
        if not related_interface:
 | 
			
		||||
            juju_log("{} relation is missing and must be related for "
 | 
			
		||||
                     "functionality. ".format(generic_interface), 'WARN')
 | 
			
		||||
            state = 'blocked'
 | 
			
		||||
            if generic_interface not in missing_relations:
 | 
			
		||||
                missing_relations.append(generic_interface)
 | 
			
		||||
        else:
 | 
			
		||||
            # Relation ID exists but no related unit
 | 
			
		||||
            if not missing_data:
 | 
			
		||||
                # Edge case relation ID exists but departing
 | 
			
		||||
                if ('departed' in hook_name() or 'broken' in hook_name()) \
 | 
			
		||||
                        and related_interface in hook_name():
 | 
			
		||||
                    state = 'blocked'
 | 
			
		||||
                    if generic_interface not in missing_relations:
 | 
			
		||||
                        missing_relations.append(generic_interface)
 | 
			
		||||
                    juju_log("{} relation's interface, {}, "
 | 
			
		||||
                             "relationship is departed or broken "
 | 
			
		||||
                             "and is required for functionality."
 | 
			
		||||
                             "".format(generic_interface, related_interface), "WARN")
 | 
			
		||||
                # Normal case relation ID exists but no related unit
 | 
			
		||||
                # (joining)
 | 
			
		||||
                else:
 | 
			
		||||
                    juju_log("{} relations's interface, {}, is related but has "
 | 
			
		||||
                             "no units in the relation."
 | 
			
		||||
                             "".format(generic_interface, related_interface), "INFO")
 | 
			
		||||
            # Related unit exists and data missing on the relation
 | 
			
		||||
            else:
 | 
			
		||||
                juju_log("{} relation's interface, {}, is related awaiting "
 | 
			
		||||
                         "the following data from the relationship: {}. "
 | 
			
		||||
                         "".format(generic_interface, related_interface,
 | 
			
		||||
                                   ", ".join(missing_data)), "INFO")
 | 
			
		||||
            if state != 'blocked':
 | 
			
		||||
                state = 'waiting'
 | 
			
		||||
            if generic_interface not in incomplete_relations \
 | 
			
		||||
                    and generic_interface not in missing_relations:
 | 
			
		||||
                incomplete_relations.append(generic_interface)
 | 
			
		||||
 | 
			
		||||
    if missing_relations:
 | 
			
		||||
        message = "Missing relations: {}".format(", ".join(missing_relations))
 | 
			
		||||
        if incomplete_relations:
 | 
			
		||||
            message += "; incomplete relations: {}" \
 | 
			
		||||
                       "".format(", ".join(incomplete_relations))
 | 
			
		||||
        state = 'blocked'
 | 
			
		||||
    elif incomplete_relations:
 | 
			
		||||
        message = "Incomplete relations: {}" \
 | 
			
		||||
                  "".format(", ".join(incomplete_relations))
 | 
			
		||||
        state = 'waiting'
 | 
			
		||||
 | 
			
		||||
    # Run charm specific checks
 | 
			
		||||
    if charm_func:
 | 
			
		||||
        charm_state, charm_message = charm_func(configs)
 | 
			
		||||
        if charm_state != 'active' and charm_state != 'unknown':
 | 
			
		||||
            state = workload_state_compare(state, charm_state)
 | 
			
		||||
            if message:
 | 
			
		||||
                message = "{} {}".format(message, charm_message)
 | 
			
		||||
            else:
 | 
			
		||||
                message = charm_message
 | 
			
		||||
 | 
			
		||||
    # Set to active if all requirements have been met
 | 
			
		||||
    if state == 'active':
 | 
			
		||||
        message = "Unit is ready"
 | 
			
		||||
        juju_log(message, "INFO")
 | 
			
		||||
 | 
			
		||||
    status_set(state, message)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def workload_state_compare(current_workload_state, workload_state):
 | 
			
		||||
    """ Return highest priority of two states"""
 | 
			
		||||
    hierarchy = {'unknown': -1,
 | 
			
		||||
                 'active': 0,
 | 
			
		||||
                 'maintenance': 1,
 | 
			
		||||
                 'waiting': 2,
 | 
			
		||||
                 'blocked': 3,
 | 
			
		||||
                 }
 | 
			
		||||
 | 
			
		||||
    if hierarchy.get(workload_state) is None:
 | 
			
		||||
        workload_state = 'unknown'
 | 
			
		||||
    if hierarchy.get(current_workload_state) is None:
 | 
			
		||||
        current_workload_state = 'unknown'
 | 
			
		||||
 | 
			
		||||
    # Set workload_state based on hierarchy of statuses
 | 
			
		||||
    if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
 | 
			
		||||
        return current_workload_state
 | 
			
		||||
    else:
 | 
			
		||||
        return workload_state
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def incomplete_relation_data(configs, required_interfaces):
 | 
			
		||||
    """
 | 
			
		||||
    Check complete contexts against required_interfaces
 | 
			
		||||
    Return dictionary of incomplete relation data.
 | 
			
		||||
 | 
			
		||||
    configs is an OSConfigRenderer object with configs registered
 | 
			
		||||
 | 
			
		||||
    required_interfaces is a dictionary of required general interfaces
 | 
			
		||||
    with dictionary values of possible specific interfaces.
 | 
			
		||||
    Example:
 | 
			
		||||
    required_interfaces = {'database': ['shared-db', 'pgsql-db']}
 | 
			
		||||
 | 
			
		||||
    The interface is said to be satisfied if anyone of the interfaces in the
 | 
			
		||||
    list has a complete context.
 | 
			
		||||
 | 
			
		||||
    Return dictionary of incomplete or missing required contexts with relation
 | 
			
		||||
    status of interfaces and any missing data points. Example:
 | 
			
		||||
        {'message':
 | 
			
		||||
             {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
 | 
			
		||||
              'zeromq-configuration': {'related': False}},
 | 
			
		||||
         'identity':
 | 
			
		||||
             {'identity-service': {'related': False}},
 | 
			
		||||
         'database':
 | 
			
		||||
             {'pgsql-db': {'related': False},
 | 
			
		||||
              'shared-db': {'related': True}}}
 | 
			
		||||
    """
 | 
			
		||||
    complete_ctxts = configs.complete_contexts()
 | 
			
		||||
    incomplete_relations = []
 | 
			
		||||
    for svc_type in required_interfaces.keys():
 | 
			
		||||
        # Avoid duplicates
 | 
			
		||||
        found_ctxt = False
 | 
			
		||||
        for interface in required_interfaces[svc_type]:
 | 
			
		||||
            if interface in complete_ctxts:
 | 
			
		||||
                found_ctxt = True
 | 
			
		||||
        if not found_ctxt:
 | 
			
		||||
            incomplete_relations.append(svc_type)
 | 
			
		||||
    incomplete_context_data = {}
 | 
			
		||||
    for i in incomplete_relations:
 | 
			
		||||
        incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
 | 
			
		||||
    return incomplete_context_data
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def do_action_openstack_upgrade(package, upgrade_callback, configs):
 | 
			
		||||
    """Perform action-managed OpenStack upgrade.
 | 
			
		||||
 | 
			
		||||
    Upgrades packages to the configured openstack-origin version and sets
 | 
			
		||||
    the corresponding action status as a result.
 | 
			
		||||
 | 
			
		||||
    If the charm was installed from source we cannot upgrade it.
 | 
			
		||||
    For backwards compatibility a config flag (action-managed-upgrade) must
 | 
			
		||||
    be set for this code to run, otherwise a full service level upgrade will
 | 
			
		||||
    fire on config-changed.
 | 
			
		||||
 | 
			
		||||
    @param package: package name for determining if upgrade available
 | 
			
		||||
    @param upgrade_callback: function callback to charm's upgrade function
 | 
			
		||||
    @param configs: templating object derived from OSConfigRenderer class
 | 
			
		||||
 | 
			
		||||
    @return: True if upgrade successful; False if upgrade failed or skipped
 | 
			
		||||
    """
 | 
			
		||||
    ret = False
 | 
			
		||||
 | 
			
		||||
    if git_install_requested():
 | 
			
		||||
        action_set({'outcome': 'installed from source, skipped upgrade.'})
 | 
			
		||||
    else:
 | 
			
		||||
        if openstack_upgrade_available(package):
 | 
			
		||||
            if config('action-managed-upgrade'):
 | 
			
		||||
                juju_log('Upgrading OpenStack release')
 | 
			
		||||
 | 
			
		||||
                try:
 | 
			
		||||
                    upgrade_callback(configs=configs)
 | 
			
		||||
                    action_set({'outcome': 'success, upgrade completed.'})
 | 
			
		||||
                    ret = True
 | 
			
		||||
                except:
 | 
			
		||||
                    action_set({'outcome': 'upgrade failed, see traceback.'})
 | 
			
		||||
                    action_set({'traceback': traceback.format_exc()})
 | 
			
		||||
                    action_fail('do_openstack_upgrade resulted in an '
 | 
			
		||||
                                'unexpected error')
 | 
			
		||||
            else:
 | 
			
		||||
                action_set({'outcome': 'action-managed-upgrade config is '
 | 
			
		||||
                                       'False, skipped upgrade.'})
 | 
			
		||||
        else:
 | 
			
		||||
            action_set({'outcome': 'no upgrade available.'})
 | 
			
		||||
 | 
			
		||||
    return ret
 | 
			
		||||
							
								
								
									
										15
									
								
								hooks/charmhelpers/contrib/python/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								hooks/charmhelpers/contrib/python/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,15 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
							
								
								
									
										121
									
								
								hooks/charmhelpers/contrib/python/packages.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										121
									
								
								hooks/charmhelpers/contrib/python/packages.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,121 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# coding: utf-8
 | 
			
		||||
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import subprocess
 | 
			
		||||
 | 
			
		||||
from charmhelpers.fetch import apt_install, apt_update
 | 
			
		||||
from charmhelpers.core.hookenv import charm_dir, log
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    from pip import main as pip_execute
 | 
			
		||||
except ImportError:
 | 
			
		||||
    apt_update()
 | 
			
		||||
    apt_install('python-pip')
 | 
			
		||||
    from pip import main as pip_execute
 | 
			
		||||
 | 
			
		||||
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def parse_options(given, available):
 | 
			
		||||
    """Given a set of options, check if available"""
 | 
			
		||||
    for key, value in sorted(given.items()):
 | 
			
		||||
        if not value:
 | 
			
		||||
            continue
 | 
			
		||||
        if key in available:
 | 
			
		||||
            yield "--{0}={1}".format(key, value)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def pip_install_requirements(requirements, **options):
 | 
			
		||||
    """Install a requirements file """
 | 
			
		||||
    command = ["install"]
 | 
			
		||||
 | 
			
		||||
    available_options = ('proxy', 'src', 'log', )
 | 
			
		||||
    for option in parse_options(options, available_options):
 | 
			
		||||
        command.append(option)
 | 
			
		||||
 | 
			
		||||
    command.append("-r {0}".format(requirements))
 | 
			
		||||
    log("Installing from file: {} with options: {}".format(requirements,
 | 
			
		||||
                                                           command))
 | 
			
		||||
    pip_execute(command)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def pip_install(package, fatal=False, upgrade=False, venv=None, **options):
 | 
			
		||||
    """Install a python package"""
 | 
			
		||||
    if venv:
 | 
			
		||||
        venv_python = os.path.join(venv, 'bin/pip')
 | 
			
		||||
        command = [venv_python, "install"]
 | 
			
		||||
    else:
 | 
			
		||||
        command = ["install"]
 | 
			
		||||
 | 
			
		||||
    available_options = ('proxy', 'src', 'log', 'index-url', )
 | 
			
		||||
    for option in parse_options(options, available_options):
 | 
			
		||||
        command.append(option)
 | 
			
		||||
 | 
			
		||||
    if upgrade:
 | 
			
		||||
        command.append('--upgrade')
 | 
			
		||||
 | 
			
		||||
    if isinstance(package, list):
 | 
			
		||||
        command.extend(package)
 | 
			
		||||
    else:
 | 
			
		||||
        command.append(package)
 | 
			
		||||
 | 
			
		||||
    log("Installing {} package with options: {}".format(package,
 | 
			
		||||
                                                        command))
 | 
			
		||||
    if venv:
 | 
			
		||||
        subprocess.check_call(command)
 | 
			
		||||
    else:
 | 
			
		||||
        pip_execute(command)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def pip_uninstall(package, **options):
 | 
			
		||||
    """Uninstall a python package"""
 | 
			
		||||
    command = ["uninstall", "-q", "-y"]
 | 
			
		||||
 | 
			
		||||
    available_options = ('proxy', 'log', )
 | 
			
		||||
    for option in parse_options(options, available_options):
 | 
			
		||||
        command.append(option)
 | 
			
		||||
 | 
			
		||||
    if isinstance(package, list):
 | 
			
		||||
        command.extend(package)
 | 
			
		||||
    else:
 | 
			
		||||
        command.append(package)
 | 
			
		||||
 | 
			
		||||
    log("Uninstalling {} package with options: {}".format(package,
 | 
			
		||||
                                                          command))
 | 
			
		||||
    pip_execute(command)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def pip_list():
 | 
			
		||||
    """Returns the list of current python installed packages
 | 
			
		||||
    """
 | 
			
		||||
    return pip_execute(["list"])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def pip_create_virtualenv(path=None):
 | 
			
		||||
    """Create an isolated Python environment."""
 | 
			
		||||
    apt_install('python-virtualenv')
 | 
			
		||||
 | 
			
		||||
    if path:
 | 
			
		||||
        venv_path = path
 | 
			
		||||
    else:
 | 
			
		||||
        venv_path = os.path.join(charm_dir(), 'venv')
 | 
			
		||||
 | 
			
		||||
    if not os.path.exists(venv_path):
 | 
			
		||||
        subprocess.check_call(['virtualenv', venv_path])
 | 
			
		||||
							
								
								
									
										15
									
								
								hooks/charmhelpers/contrib/storage/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								hooks/charmhelpers/contrib/storage/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,15 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
							
								
								
									
										15
									
								
								hooks/charmhelpers/contrib/storage/linux/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								hooks/charmhelpers/contrib/storage/linux/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,15 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
							
								
								
									
										657
									
								
								hooks/charmhelpers/contrib/storage/linux/ceph.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										657
									
								
								hooks/charmhelpers/contrib/storage/linux/ceph.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,657 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# Copyright 2012 Canonical Ltd.
 | 
			
		||||
#
 | 
			
		||||
# This file is sourced from lp:openstack-charm-helpers
 | 
			
		||||
#
 | 
			
		||||
# Authors:
 | 
			
		||||
#  James Page <james.page@ubuntu.com>
 | 
			
		||||
#  Adam Gandelman <adamg@ubuntu.com>
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import shutil
 | 
			
		||||
import json
 | 
			
		||||
import time
 | 
			
		||||
import uuid
 | 
			
		||||
 | 
			
		||||
from subprocess import (
 | 
			
		||||
    check_call,
 | 
			
		||||
    check_output,
 | 
			
		||||
    CalledProcessError,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    local_unit,
 | 
			
		||||
    relation_get,
 | 
			
		||||
    relation_ids,
 | 
			
		||||
    relation_set,
 | 
			
		||||
    related_units,
 | 
			
		||||
    log,
 | 
			
		||||
    DEBUG,
 | 
			
		||||
    INFO,
 | 
			
		||||
    WARNING,
 | 
			
		||||
    ERROR,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.core.host import (
 | 
			
		||||
    mount,
 | 
			
		||||
    mounts,
 | 
			
		||||
    service_start,
 | 
			
		||||
    service_stop,
 | 
			
		||||
    service_running,
 | 
			
		||||
    umount,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.fetch import (
 | 
			
		||||
    apt_install,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.kernel import modprobe
 | 
			
		||||
 | 
			
		||||
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
 | 
			
		||||
KEYFILE = '/etc/ceph/ceph.client.{}.key'
 | 
			
		||||
 | 
			
		||||
CEPH_CONF = """[global]
 | 
			
		||||
auth supported = {auth}
 | 
			
		||||
keyring = {keyring}
 | 
			
		||||
mon host = {mon_hosts}
 | 
			
		||||
log to syslog = {use_syslog}
 | 
			
		||||
err to syslog = {use_syslog}
 | 
			
		||||
clog to syslog = {use_syslog}
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def install():
 | 
			
		||||
    """Basic Ceph client installation."""
 | 
			
		||||
    ceph_dir = "/etc/ceph"
 | 
			
		||||
    if not os.path.exists(ceph_dir):
 | 
			
		||||
        os.mkdir(ceph_dir)
 | 
			
		||||
 | 
			
		||||
    apt_install('ceph-common', fatal=True)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def rbd_exists(service, pool, rbd_img):
 | 
			
		||||
    """Check to see if a RADOS block device exists."""
 | 
			
		||||
    try:
 | 
			
		||||
        out = check_output(['rbd', 'list', '--id',
 | 
			
		||||
                            service, '--pool', pool]).decode('UTF-8')
 | 
			
		||||
    except CalledProcessError:
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
    return rbd_img in out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def create_rbd_image(service, pool, image, sizemb):
 | 
			
		||||
    """Create a new RADOS block device."""
 | 
			
		||||
    cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
 | 
			
		||||
           '--pool', pool]
 | 
			
		||||
    check_call(cmd)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def pool_exists(service, name):
 | 
			
		||||
    """Check to see if a RADOS pool already exists."""
 | 
			
		||||
    try:
 | 
			
		||||
        out = check_output(['rados', '--id', service,
 | 
			
		||||
                            'lspools']).decode('UTF-8')
 | 
			
		||||
    except CalledProcessError:
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
    return name in out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_osds(service):
 | 
			
		||||
    """Return a list of all Ceph Object Storage Daemons currently in the
 | 
			
		||||
    cluster.
 | 
			
		||||
    """
 | 
			
		||||
    version = ceph_version()
 | 
			
		||||
    if version and version >= '0.56':
 | 
			
		||||
        return json.loads(check_output(['ceph', '--id', service,
 | 
			
		||||
                                        'osd', 'ls',
 | 
			
		||||
                                        '--format=json']).decode('UTF-8'))
 | 
			
		||||
 | 
			
		||||
    return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def create_pool(service, name, replicas=3):
 | 
			
		||||
    """Create a new RADOS pool."""
 | 
			
		||||
    if pool_exists(service, name):
 | 
			
		||||
        log("Ceph pool {} already exists, skipping creation".format(name),
 | 
			
		||||
            level=WARNING)
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    # Calculate the number of placement groups based
 | 
			
		||||
    # on upstream recommended best practices.
 | 
			
		||||
    osds = get_osds(service)
 | 
			
		||||
    if osds:
 | 
			
		||||
        pgnum = (len(osds) * 100 // replicas)
 | 
			
		||||
    else:
 | 
			
		||||
        # NOTE(james-page): Default to 200 for older ceph versions
 | 
			
		||||
        # which don't support OSD query from cli
 | 
			
		||||
        pgnum = 200
 | 
			
		||||
 | 
			
		||||
    cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
 | 
			
		||||
    check_call(cmd)
 | 
			
		||||
 | 
			
		||||
    cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
 | 
			
		||||
           str(replicas)]
 | 
			
		||||
    check_call(cmd)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def delete_pool(service, name):
 | 
			
		||||
    """Delete a RADOS pool from ceph."""
 | 
			
		||||
    cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
 | 
			
		||||
           '--yes-i-really-really-mean-it']
 | 
			
		||||
    check_call(cmd)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _keyfile_path(service):
 | 
			
		||||
    return KEYFILE.format(service)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _keyring_path(service):
 | 
			
		||||
    return KEYRING.format(service)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def create_keyring(service, key):
 | 
			
		||||
    """Create a new Ceph keyring containing key."""
 | 
			
		||||
    keyring = _keyring_path(service)
 | 
			
		||||
    if os.path.exists(keyring):
 | 
			
		||||
        log('Ceph keyring exists at %s.' % keyring, level=WARNING)
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    cmd = ['ceph-authtool', keyring, '--create-keyring',
 | 
			
		||||
           '--name=client.{}'.format(service), '--add-key={}'.format(key)]
 | 
			
		||||
    check_call(cmd)
 | 
			
		||||
    log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def delete_keyring(service):
 | 
			
		||||
    """Delete an existing Ceph keyring."""
 | 
			
		||||
    keyring = _keyring_path(service)
 | 
			
		||||
    if not os.path.exists(keyring):
 | 
			
		||||
        log('Keyring does not exist at %s' % keyring, level=WARNING)
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    os.remove(keyring)
 | 
			
		||||
    log('Deleted ring at %s.' % keyring, level=INFO)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def create_key_file(service, key):
 | 
			
		||||
    """Create a file containing key."""
 | 
			
		||||
    keyfile = _keyfile_path(service)
 | 
			
		||||
    if os.path.exists(keyfile):
 | 
			
		||||
        log('Keyfile exists at %s.' % keyfile, level=WARNING)
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    with open(keyfile, 'w') as fd:
 | 
			
		||||
        fd.write(key)
 | 
			
		||||
 | 
			
		||||
    log('Created new keyfile at %s.' % keyfile, level=INFO)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_ceph_nodes():
 | 
			
		||||
    """Query named relation 'ceph' to determine current nodes."""
 | 
			
		||||
    hosts = []
 | 
			
		||||
    for r_id in relation_ids('ceph'):
 | 
			
		||||
        for unit in related_units(r_id):
 | 
			
		||||
            hosts.append(relation_get('private-address', unit=unit, rid=r_id))
 | 
			
		||||
 | 
			
		||||
    return hosts
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def configure(service, key, auth, use_syslog):
 | 
			
		||||
    """Perform basic configuration of Ceph."""
 | 
			
		||||
    create_keyring(service, key)
 | 
			
		||||
    create_key_file(service, key)
 | 
			
		||||
    hosts = get_ceph_nodes()
 | 
			
		||||
    with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
 | 
			
		||||
        ceph_conf.write(CEPH_CONF.format(auth=auth,
 | 
			
		||||
                                         keyring=_keyring_path(service),
 | 
			
		||||
                                         mon_hosts=",".join(map(str, hosts)),
 | 
			
		||||
                                         use_syslog=use_syslog))
 | 
			
		||||
    modprobe('rbd')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def image_mapped(name):
 | 
			
		||||
    """Determine whether a RADOS block device is mapped locally."""
 | 
			
		||||
    try:
 | 
			
		||||
        out = check_output(['rbd', 'showmapped']).decode('UTF-8')
 | 
			
		||||
    except CalledProcessError:
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
    return name in out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def map_block_storage(service, pool, image):
 | 
			
		||||
    """Map a RADOS block device for local use."""
 | 
			
		||||
    cmd = [
 | 
			
		||||
        'rbd',
 | 
			
		||||
        'map',
 | 
			
		||||
        '{}/{}'.format(pool, image),
 | 
			
		||||
        '--user',
 | 
			
		||||
        service,
 | 
			
		||||
        '--secret',
 | 
			
		||||
        _keyfile_path(service),
 | 
			
		||||
    ]
 | 
			
		||||
    check_call(cmd)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def filesystem_mounted(fs):
 | 
			
		||||
    """Determine whether a filesytems is already mounted."""
 | 
			
		||||
    return fs in [f for f, m in mounts()]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def make_filesystem(blk_device, fstype='ext4', timeout=10):
 | 
			
		||||
    """Make a new filesystem on the specified block device."""
 | 
			
		||||
    count = 0
 | 
			
		||||
    e_noent = os.errno.ENOENT
 | 
			
		||||
    while not os.path.exists(blk_device):
 | 
			
		||||
        if count >= timeout:
 | 
			
		||||
            log('Gave up waiting on block device %s' % blk_device,
 | 
			
		||||
                level=ERROR)
 | 
			
		||||
            raise IOError(e_noent, os.strerror(e_noent), blk_device)
 | 
			
		||||
 | 
			
		||||
        log('Waiting for block device %s to appear' % blk_device,
 | 
			
		||||
            level=DEBUG)
 | 
			
		||||
        count += 1
 | 
			
		||||
        time.sleep(1)
 | 
			
		||||
    else:
 | 
			
		||||
        log('Formatting block device %s as filesystem %s.' %
 | 
			
		||||
            (blk_device, fstype), level=INFO)
 | 
			
		||||
        check_call(['mkfs', '-t', fstype, blk_device])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def place_data_on_block_device(blk_device, data_src_dst):
 | 
			
		||||
    """Migrate data in data_src_dst to blk_device and then remount."""
 | 
			
		||||
    # mount block device into /mnt
 | 
			
		||||
    mount(blk_device, '/mnt')
 | 
			
		||||
    # copy data to /mnt
 | 
			
		||||
    copy_files(data_src_dst, '/mnt')
 | 
			
		||||
    # umount block device
 | 
			
		||||
    umount('/mnt')
 | 
			
		||||
    # Grab user/group ID's from original source
 | 
			
		||||
    _dir = os.stat(data_src_dst)
 | 
			
		||||
    uid = _dir.st_uid
 | 
			
		||||
    gid = _dir.st_gid
 | 
			
		||||
    # re-mount where the data should originally be
 | 
			
		||||
    # TODO: persist is currently a NO-OP in core.host
 | 
			
		||||
    mount(blk_device, data_src_dst, persist=True)
 | 
			
		||||
    # ensure original ownership of new mount.
 | 
			
		||||
    os.chown(data_src_dst, uid, gid)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def copy_files(src, dst, symlinks=False, ignore=None):
 | 
			
		||||
    """Copy files from src to dst."""
 | 
			
		||||
    for item in os.listdir(src):
 | 
			
		||||
        s = os.path.join(src, item)
 | 
			
		||||
        d = os.path.join(dst, item)
 | 
			
		||||
        if os.path.isdir(s):
 | 
			
		||||
            shutil.copytree(s, d, symlinks, ignore)
 | 
			
		||||
        else:
 | 
			
		||||
            shutil.copy2(s, d)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
 | 
			
		||||
                        blk_device, fstype, system_services=[],
 | 
			
		||||
                        replicas=3):
 | 
			
		||||
    """NOTE: This function must only be called from a single service unit for
 | 
			
		||||
    the same rbd_img otherwise data loss will occur.
 | 
			
		||||
 | 
			
		||||
    Ensures given pool and RBD image exists, is mapped to a block device,
 | 
			
		||||
    and the device is formatted and mounted at the given mount_point.
 | 
			
		||||
 | 
			
		||||
    If formatting a device for the first time, data existing at mount_point
 | 
			
		||||
    will be migrated to the RBD device before being re-mounted.
 | 
			
		||||
 | 
			
		||||
    All services listed in system_services will be stopped prior to data
 | 
			
		||||
    migration and restarted when complete.
 | 
			
		||||
    """
 | 
			
		||||
    # Ensure pool, RBD image, RBD mappings are in place.
 | 
			
		||||
    if not pool_exists(service, pool):
 | 
			
		||||
        log('Creating new pool {}.'.format(pool), level=INFO)
 | 
			
		||||
        create_pool(service, pool, replicas=replicas)
 | 
			
		||||
 | 
			
		||||
    if not rbd_exists(service, pool, rbd_img):
 | 
			
		||||
        log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
 | 
			
		||||
        create_rbd_image(service, pool, rbd_img, sizemb)
 | 
			
		||||
 | 
			
		||||
    if not image_mapped(rbd_img):
 | 
			
		||||
        log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
 | 
			
		||||
            level=INFO)
 | 
			
		||||
        map_block_storage(service, pool, rbd_img)
 | 
			
		||||
 | 
			
		||||
    # make file system
 | 
			
		||||
    # TODO: What happens if for whatever reason this is run again and
 | 
			
		||||
    # the data is already in the rbd device and/or is mounted??
 | 
			
		||||
    # When it is mounted already, it will fail to make the fs
 | 
			
		||||
    # XXX: This is really sketchy!  Need to at least add an fstab entry
 | 
			
		||||
    #      otherwise this hook will blow away existing data if its executed
 | 
			
		||||
    #      after a reboot.
 | 
			
		||||
    if not filesystem_mounted(mount_point):
 | 
			
		||||
        make_filesystem(blk_device, fstype)
 | 
			
		||||
 | 
			
		||||
        for svc in system_services:
 | 
			
		||||
            if service_running(svc):
 | 
			
		||||
                log('Stopping services {} prior to migrating data.'
 | 
			
		||||
                    .format(svc), level=DEBUG)
 | 
			
		||||
                service_stop(svc)
 | 
			
		||||
 | 
			
		||||
        place_data_on_block_device(blk_device, mount_point)
 | 
			
		||||
 | 
			
		||||
        for svc in system_services:
 | 
			
		||||
            log('Starting service {} after migrating data.'
 | 
			
		||||
                .format(svc), level=DEBUG)
 | 
			
		||||
            service_start(svc)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ensure_ceph_keyring(service, user=None, group=None):
 | 
			
		||||
    """Ensures a ceph keyring is created for a named service and optionally
 | 
			
		||||
    ensures user and group ownership.
 | 
			
		||||
 | 
			
		||||
    Returns False if no ceph key is available in relation state.
 | 
			
		||||
    """
 | 
			
		||||
    key = None
 | 
			
		||||
    for rid in relation_ids('ceph'):
 | 
			
		||||
        for unit in related_units(rid):
 | 
			
		||||
            key = relation_get('key', rid=rid, unit=unit)
 | 
			
		||||
            if key:
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
    if not key:
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
    create_keyring(service=service, key=key)
 | 
			
		||||
    keyring = _keyring_path(service)
 | 
			
		||||
    if user and group:
 | 
			
		||||
        check_call(['chown', '%s.%s' % (user, group), keyring])
 | 
			
		||||
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ceph_version():
 | 
			
		||||
    """Retrieve the local version of ceph."""
 | 
			
		||||
    if os.path.exists('/usr/bin/ceph'):
 | 
			
		||||
        cmd = ['ceph', '-v']
 | 
			
		||||
        output = check_output(cmd).decode('US-ASCII')
 | 
			
		||||
        output = output.split()
 | 
			
		||||
        if len(output) > 3:
 | 
			
		||||
            return output[2]
 | 
			
		||||
        else:
 | 
			
		||||
            return None
 | 
			
		||||
    else:
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class CephBrokerRq(object):
 | 
			
		||||
    """Ceph broker request.
 | 
			
		||||
 | 
			
		||||
    Multiple operations can be added to a request and sent to the Ceph broker
 | 
			
		||||
    to be executed.
 | 
			
		||||
 | 
			
		||||
    Request is json-encoded for sending over the wire.
 | 
			
		||||
 | 
			
		||||
    The API is versioned and defaults to version 1.
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, api_version=1, request_id=None):
 | 
			
		||||
        self.api_version = api_version
 | 
			
		||||
        if request_id:
 | 
			
		||||
            self.request_id = request_id
 | 
			
		||||
        else:
 | 
			
		||||
            self.request_id = str(uuid.uuid1())
 | 
			
		||||
        self.ops = []
 | 
			
		||||
 | 
			
		||||
    def add_op_create_pool(self, name, replica_count=3):
 | 
			
		||||
        self.ops.append({'op': 'create-pool', 'name': name,
 | 
			
		||||
                         'replicas': replica_count})
 | 
			
		||||
 | 
			
		||||
    def set_ops(self, ops):
 | 
			
		||||
        """Set request ops to provided value.
 | 
			
		||||
 | 
			
		||||
        Useful for injecting ops that come from a previous request
 | 
			
		||||
        to allow comparisons to ensure validity.
 | 
			
		||||
        """
 | 
			
		||||
        self.ops = ops
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def request(self):
 | 
			
		||||
        return json.dumps({'api-version': self.api_version, 'ops': self.ops,
 | 
			
		||||
                           'request-id': self.request_id})
 | 
			
		||||
 | 
			
		||||
    def _ops_equal(self, other):
 | 
			
		||||
        if len(self.ops) == len(other.ops):
 | 
			
		||||
            for req_no in range(0, len(self.ops)):
 | 
			
		||||
                for key in ['replicas', 'name', 'op']:
 | 
			
		||||
                    if self.ops[req_no][key] != other.ops[req_no][key]:
 | 
			
		||||
                        return False
 | 
			
		||||
        else:
 | 
			
		||||
            return False
 | 
			
		||||
        return True
 | 
			
		||||
 | 
			
		||||
    def __eq__(self, other):
 | 
			
		||||
        if not isinstance(other, self.__class__):
 | 
			
		||||
            return False
 | 
			
		||||
        if self.api_version == other.api_version and \
 | 
			
		||||
                self._ops_equal(other):
 | 
			
		||||
            return True
 | 
			
		||||
        else:
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
    def __ne__(self, other):
 | 
			
		||||
        return not self.__eq__(other)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class CephBrokerRsp(object):
 | 
			
		||||
    """Ceph broker response.
 | 
			
		||||
 | 
			
		||||
    Response is json-decoded and contents provided as methods/properties.
 | 
			
		||||
 | 
			
		||||
    The API is versioned and defaults to version 1.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, encoded_rsp):
 | 
			
		||||
        self.api_version = None
 | 
			
		||||
        self.rsp = json.loads(encoded_rsp)
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def request_id(self):
 | 
			
		||||
        return self.rsp.get('request-id')
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def exit_code(self):
 | 
			
		||||
        return self.rsp.get('exit-code')
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def exit_msg(self):
 | 
			
		||||
        return self.rsp.get('stderr')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Ceph Broker Conversation:
 | 
			
		||||
# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
 | 
			
		||||
# and send that request to ceph via the ceph relation. The CephBrokerRq has a
 | 
			
		||||
# unique id so that the client can identity which CephBrokerRsp is associated
 | 
			
		||||
# with the request. Ceph will also respond to each client unit individually
 | 
			
		||||
# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
 | 
			
		||||
# via key broker-rsp-glance-0
 | 
			
		||||
#
 | 
			
		||||
# To use this the charm can just do something like:
 | 
			
		||||
#
 | 
			
		||||
# from charmhelpers.contrib.storage.linux.ceph import (
 | 
			
		||||
#     send_request_if_needed,
 | 
			
		||||
#     is_request_complete,
 | 
			
		||||
#     CephBrokerRq,
 | 
			
		||||
# )
 | 
			
		||||
#
 | 
			
		||||
# @hooks.hook('ceph-relation-changed')
 | 
			
		||||
# def ceph_changed():
 | 
			
		||||
#     rq = CephBrokerRq()
 | 
			
		||||
#     rq.add_op_create_pool(name='poolname', replica_count=3)
 | 
			
		||||
#
 | 
			
		||||
#     if is_request_complete(rq):
 | 
			
		||||
#         <Request complete actions>
 | 
			
		||||
#     else:
 | 
			
		||||
#         send_request_if_needed(get_ceph_request())
 | 
			
		||||
#
 | 
			
		||||
# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
 | 
			
		||||
# of glance having sent a request to ceph which ceph has successfully processed
 | 
			
		||||
#  'ceph:8': {
 | 
			
		||||
#      'ceph/0': {
 | 
			
		||||
#          'auth': 'cephx',
 | 
			
		||||
#          'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
 | 
			
		||||
#          'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
 | 
			
		||||
#          'ceph-public-address': '10.5.44.103',
 | 
			
		||||
#          'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
 | 
			
		||||
#          'private-address': '10.5.44.103',
 | 
			
		||||
#      },
 | 
			
		||||
#      'glance/0': {
 | 
			
		||||
#          'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
 | 
			
		||||
#                         '"ops": [{"replicas": 3, "name": "glance", '
 | 
			
		||||
#                         '"op": "create-pool"}]}'),
 | 
			
		||||
#          'private-address': '10.5.44.109',
 | 
			
		||||
#      },
 | 
			
		||||
#  }
 | 
			
		||||
 | 
			
		||||
def get_previous_request(rid):
 | 
			
		||||
    """Return the last ceph broker request sent on a given relation
 | 
			
		||||
 | 
			
		||||
    @param rid: Relation id to query for request
 | 
			
		||||
    """
 | 
			
		||||
    request = None
 | 
			
		||||
    broker_req = relation_get(attribute='broker_req', rid=rid,
 | 
			
		||||
                              unit=local_unit())
 | 
			
		||||
    if broker_req:
 | 
			
		||||
        request_data = json.loads(broker_req)
 | 
			
		||||
        request = CephBrokerRq(api_version=request_data['api-version'],
 | 
			
		||||
                               request_id=request_data['request-id'])
 | 
			
		||||
        request.set_ops(request_data['ops'])
 | 
			
		||||
 | 
			
		||||
    return request
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_request_states(request):
 | 
			
		||||
    """Return a dict of requests per relation id with their corresponding
 | 
			
		||||
       completion state.
 | 
			
		||||
 | 
			
		||||
    This allows a charm, which has a request for ceph, to see whether there is
 | 
			
		||||
    an equivalent request already being processed and if so what state that
 | 
			
		||||
    request is in.
 | 
			
		||||
 | 
			
		||||
    @param request: A CephBrokerRq object
 | 
			
		||||
    """
 | 
			
		||||
    complete = []
 | 
			
		||||
    requests = {}
 | 
			
		||||
    for rid in relation_ids('ceph'):
 | 
			
		||||
        complete = False
 | 
			
		||||
        previous_request = get_previous_request(rid)
 | 
			
		||||
        if request == previous_request:
 | 
			
		||||
            sent = True
 | 
			
		||||
            complete = is_request_complete_for_rid(previous_request, rid)
 | 
			
		||||
        else:
 | 
			
		||||
            sent = False
 | 
			
		||||
            complete = False
 | 
			
		||||
 | 
			
		||||
        requests[rid] = {
 | 
			
		||||
            'sent': sent,
 | 
			
		||||
            'complete': complete,
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
    return requests
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_request_sent(request):
 | 
			
		||||
    """Check to see if a functionally equivalent request has already been sent
 | 
			
		||||
 | 
			
		||||
    Returns True if a similair request has been sent
 | 
			
		||||
 | 
			
		||||
    @param request: A CephBrokerRq object
 | 
			
		||||
    """
 | 
			
		||||
    states = get_request_states(request)
 | 
			
		||||
    for rid in states.keys():
 | 
			
		||||
        if not states[rid]['sent']:
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_request_complete(request):
 | 
			
		||||
    """Check to see if a functionally equivalent request has already been
 | 
			
		||||
    completed
 | 
			
		||||
 | 
			
		||||
    Returns True if a similair request has been completed
 | 
			
		||||
 | 
			
		||||
    @param request: A CephBrokerRq object
 | 
			
		||||
    """
 | 
			
		||||
    states = get_request_states(request)
 | 
			
		||||
    for rid in states.keys():
 | 
			
		||||
        if not states[rid]['complete']:
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_request_complete_for_rid(request, rid):
 | 
			
		||||
    """Check if a given request has been completed on the given relation
 | 
			
		||||
 | 
			
		||||
    @param request: A CephBrokerRq object
 | 
			
		||||
    @param rid: Relation ID
 | 
			
		||||
    """
 | 
			
		||||
    broker_key = get_broker_rsp_key()
 | 
			
		||||
    for unit in related_units(rid):
 | 
			
		||||
        rdata = relation_get(rid=rid, unit=unit)
 | 
			
		||||
        if rdata.get(broker_key):
 | 
			
		||||
            rsp = CephBrokerRsp(rdata.get(broker_key))
 | 
			
		||||
            if rsp.request_id == request.request_id:
 | 
			
		||||
                if not rsp.exit_code:
 | 
			
		||||
                    return True
 | 
			
		||||
        else:
 | 
			
		||||
            # The remote unit sent no reply targeted at this unit so either the
 | 
			
		||||
            # remote ceph cluster does not support unit targeted replies or it
 | 
			
		||||
            # has not processed our request yet.
 | 
			
		||||
            if rdata.get('broker_rsp'):
 | 
			
		||||
                request_data = json.loads(rdata['broker_rsp'])
 | 
			
		||||
                if request_data.get('request-id'):
 | 
			
		||||
                    log('Ignoring legacy broker_rsp without unit key as remote '
 | 
			
		||||
                        'service supports unit specific replies', level=DEBUG)
 | 
			
		||||
                else:
 | 
			
		||||
                    log('Using legacy broker_rsp as remote service does not '
 | 
			
		||||
                        'supports unit specific replies', level=DEBUG)
 | 
			
		||||
                    rsp = CephBrokerRsp(rdata['broker_rsp'])
 | 
			
		||||
                    if not rsp.exit_code:
 | 
			
		||||
                        return True
 | 
			
		||||
 | 
			
		||||
    return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_broker_rsp_key():
 | 
			
		||||
    """Return broker response key for this unit
 | 
			
		||||
 | 
			
		||||
    This is the key that ceph is going to use to pass request status
 | 
			
		||||
    information back to this unit
 | 
			
		||||
    """
 | 
			
		||||
    return 'broker-rsp-' + local_unit().replace('/', '-')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def send_request_if_needed(request):
 | 
			
		||||
    """Send broker request if an equivalent request has not already been sent
 | 
			
		||||
 | 
			
		||||
    @param request: A CephBrokerRq object
 | 
			
		||||
    """
 | 
			
		||||
    if is_request_sent(request):
 | 
			
		||||
        log('Request already sent but not complete, not sending new request',
 | 
			
		||||
            level=DEBUG)
 | 
			
		||||
    else:
 | 
			
		||||
        for rid in relation_ids('ceph'):
 | 
			
		||||
            log('Sending request {}'.format(request.request_id), level=DEBUG)
 | 
			
		||||
            relation_set(relation_id=rid, broker_req=request.request)
 | 
			
		||||
							
								
								
									
										78
									
								
								hooks/charmhelpers/contrib/storage/linux/loopback.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										78
									
								
								hooks/charmhelpers/contrib/storage/linux/loopback.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,78 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
from subprocess import (
 | 
			
		||||
    check_call,
 | 
			
		||||
    check_output,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
import six
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##################################################
 | 
			
		||||
# loopback device helpers.
 | 
			
		||||
##################################################
 | 
			
		||||
def loopback_devices():
 | 
			
		||||
    '''
 | 
			
		||||
    Parse through 'losetup -a' output to determine currently mapped
 | 
			
		||||
    loopback devices. Output is expected to look like:
 | 
			
		||||
 | 
			
		||||
        /dev/loop0: [0807]:961814 (/tmp/my.img)
 | 
			
		||||
 | 
			
		||||
    :returns: dict: a dict mapping {loopback_dev: backing_file}
 | 
			
		||||
    '''
 | 
			
		||||
    loopbacks = {}
 | 
			
		||||
    cmd = ['losetup', '-a']
 | 
			
		||||
    devs = [d.strip().split(' ') for d in
 | 
			
		||||
            check_output(cmd).splitlines() if d != '']
 | 
			
		||||
    for dev, _, f in devs:
 | 
			
		||||
        loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
 | 
			
		||||
    return loopbacks
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def create_loopback(file_path):
 | 
			
		||||
    '''
 | 
			
		||||
    Create a loopback device for a given backing file.
 | 
			
		||||
 | 
			
		||||
    :returns: str: Full path to new loopback device (eg, /dev/loop0)
 | 
			
		||||
    '''
 | 
			
		||||
    file_path = os.path.abspath(file_path)
 | 
			
		||||
    check_call(['losetup', '--find', file_path])
 | 
			
		||||
    for d, f in six.iteritems(loopback_devices()):
 | 
			
		||||
        if f == file_path:
 | 
			
		||||
            return d
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ensure_loopback_device(path, size):
 | 
			
		||||
    '''
 | 
			
		||||
    Ensure a loopback device exists for a given backing file path and size.
 | 
			
		||||
    If it a loopback device is not mapped to file, a new one will be created.
 | 
			
		||||
 | 
			
		||||
    TODO: Confirm size of found loopback device.
 | 
			
		||||
 | 
			
		||||
    :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
 | 
			
		||||
    '''
 | 
			
		||||
    for d, f in six.iteritems(loopback_devices()):
 | 
			
		||||
        if f == path:
 | 
			
		||||
            return d
 | 
			
		||||
 | 
			
		||||
    if not os.path.exists(path):
 | 
			
		||||
        cmd = ['truncate', '--size', size, path]
 | 
			
		||||
        check_call(cmd)
 | 
			
		||||
 | 
			
		||||
    return create_loopback(path)
 | 
			
		||||
							
								
								
									
										105
									
								
								hooks/charmhelpers/contrib/storage/linux/lvm.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										105
									
								
								hooks/charmhelpers/contrib/storage/linux/lvm.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,105 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
from subprocess import (
 | 
			
		||||
    CalledProcessError,
 | 
			
		||||
    check_call,
 | 
			
		||||
    check_output,
 | 
			
		||||
    Popen,
 | 
			
		||||
    PIPE,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##################################################
 | 
			
		||||
# LVM helpers.
 | 
			
		||||
##################################################
 | 
			
		||||
def deactivate_lvm_volume_group(block_device):
 | 
			
		||||
    '''
 | 
			
		||||
    Deactivate any volume gruop associated with an LVM physical volume.
 | 
			
		||||
 | 
			
		||||
    :param block_device: str: Full path to LVM physical volume
 | 
			
		||||
    '''
 | 
			
		||||
    vg = list_lvm_volume_group(block_device)
 | 
			
		||||
    if vg:
 | 
			
		||||
        cmd = ['vgchange', '-an', vg]
 | 
			
		||||
        check_call(cmd)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_lvm_physical_volume(block_device):
 | 
			
		||||
    '''
 | 
			
		||||
    Determine whether a block device is initialized as an LVM PV.
 | 
			
		||||
 | 
			
		||||
    :param block_device: str: Full path of block device to inspect.
 | 
			
		||||
 | 
			
		||||
    :returns: boolean: True if block device is a PV, False if not.
 | 
			
		||||
    '''
 | 
			
		||||
    try:
 | 
			
		||||
        check_output(['pvdisplay', block_device])
 | 
			
		||||
        return True
 | 
			
		||||
    except CalledProcessError:
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def remove_lvm_physical_volume(block_device):
 | 
			
		||||
    '''
 | 
			
		||||
    Remove LVM PV signatures from a given block device.
 | 
			
		||||
 | 
			
		||||
    :param block_device: str: Full path of block device to scrub.
 | 
			
		||||
    '''
 | 
			
		||||
    p = Popen(['pvremove', '-ff', block_device],
 | 
			
		||||
              stdin=PIPE)
 | 
			
		||||
    p.communicate(input='y\n')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def list_lvm_volume_group(block_device):
 | 
			
		||||
    '''
 | 
			
		||||
    List LVM volume group associated with a given block device.
 | 
			
		||||
 | 
			
		||||
    Assumes block device is a valid LVM PV.
 | 
			
		||||
 | 
			
		||||
    :param block_device: str: Full path of block device to inspect.
 | 
			
		||||
 | 
			
		||||
    :returns: str: Name of volume group associated with block device or None
 | 
			
		||||
    '''
 | 
			
		||||
    vg = None
 | 
			
		||||
    pvd = check_output(['pvdisplay', block_device]).splitlines()
 | 
			
		||||
    for l in pvd:
 | 
			
		||||
        l = l.decode('UTF-8')
 | 
			
		||||
        if l.strip().startswith('VG Name'):
 | 
			
		||||
            vg = ' '.join(l.strip().split()[2:])
 | 
			
		||||
    return vg
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def create_lvm_physical_volume(block_device):
 | 
			
		||||
    '''
 | 
			
		||||
    Initialize a block device as an LVM physical volume.
 | 
			
		||||
 | 
			
		||||
    :param block_device: str: Full path of block device to initialize.
 | 
			
		||||
 | 
			
		||||
    '''
 | 
			
		||||
    check_call(['pvcreate', block_device])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def create_lvm_volume_group(volume_group, block_device):
 | 
			
		||||
    '''
 | 
			
		||||
    Create an LVM volume group backed by a given block device.
 | 
			
		||||
 | 
			
		||||
    Assumes block device has already been initialized as an LVM PV.
 | 
			
		||||
 | 
			
		||||
    :param volume_group: str: Name of volume group to create.
 | 
			
		||||
    :block_device: str: Full path of PV-initialized block device.
 | 
			
		||||
    '''
 | 
			
		||||
    check_call(['vgcreate', volume_group, block_device])
 | 
			
		||||
							
								
								
									
										71
									
								
								hooks/charmhelpers/contrib/storage/linux/utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										71
									
								
								hooks/charmhelpers/contrib/storage/linux/utils.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,71 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
from stat import S_ISBLK
 | 
			
		||||
 | 
			
		||||
from subprocess import (
 | 
			
		||||
    check_call,
 | 
			
		||||
    check_output,
 | 
			
		||||
    call
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_block_device(path):
 | 
			
		||||
    '''
 | 
			
		||||
    Confirm device at path is a valid block device node.
 | 
			
		||||
 | 
			
		||||
    :returns: boolean: True if path is a block device, False if not.
 | 
			
		||||
    '''
 | 
			
		||||
    if not os.path.exists(path):
 | 
			
		||||
        return False
 | 
			
		||||
    return S_ISBLK(os.stat(path).st_mode)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def zap_disk(block_device):
 | 
			
		||||
    '''
 | 
			
		||||
    Clear a block device of partition table. Relies on sgdisk, which is
 | 
			
		||||
    installed as pat of the 'gdisk' package in Ubuntu.
 | 
			
		||||
 | 
			
		||||
    :param block_device: str: Full path of block device to clean.
 | 
			
		||||
    '''
 | 
			
		||||
    # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
 | 
			
		||||
    # sometimes sgdisk exits non-zero; this is OK, dd will clean up
 | 
			
		||||
    call(['sgdisk', '--zap-all', '--', block_device])
 | 
			
		||||
    call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
 | 
			
		||||
    dev_end = check_output(['blockdev', '--getsz',
 | 
			
		||||
                            block_device]).decode('UTF-8')
 | 
			
		||||
    gpt_end = int(dev_end.split()[0]) - 100
 | 
			
		||||
    check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
 | 
			
		||||
                'bs=1M', 'count=1'])
 | 
			
		||||
    check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
 | 
			
		||||
                'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_device_mounted(device):
 | 
			
		||||
    '''Given a device path, return True if that device is mounted, and False
 | 
			
		||||
    if it isn't.
 | 
			
		||||
 | 
			
		||||
    :param device: str: Full path of the device to check.
 | 
			
		||||
    :returns: boolean: True if the path represents a mounted device, False if
 | 
			
		||||
        it doesn't.
 | 
			
		||||
    '''
 | 
			
		||||
    is_partition = bool(re.search(r".*[0-9]+\b", device))
 | 
			
		||||
    out = check_output(['mount']).decode('UTF-8')
 | 
			
		||||
    if is_partition:
 | 
			
		||||
        return bool(re.search(device + r"\b", out))
 | 
			
		||||
    return bool(re.search(device + r"[0-9]*\b", out))
 | 
			
		||||
							
								
								
									
										15
									
								
								hooks/charmhelpers/core/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								hooks/charmhelpers/core/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,15 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
							
								
								
									
										57
									
								
								hooks/charmhelpers/core/decorators.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										57
									
								
								hooks/charmhelpers/core/decorators.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,57 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# Copyright 2014 Canonical Ltd.
 | 
			
		||||
#
 | 
			
		||||
# Authors:
 | 
			
		||||
#  Edward Hope-Morley <opentastic@gmail.com>
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
import time
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    log,
 | 
			
		||||
    INFO,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
 | 
			
		||||
    """If the decorated function raises exception exc_type, allow num_retries
 | 
			
		||||
    retry attempts before raise the exception.
 | 
			
		||||
    """
 | 
			
		||||
    def _retry_on_exception_inner_1(f):
 | 
			
		||||
        def _retry_on_exception_inner_2(*args, **kwargs):
 | 
			
		||||
            retries = num_retries
 | 
			
		||||
            multiplier = 1
 | 
			
		||||
            while True:
 | 
			
		||||
                try:
 | 
			
		||||
                    return f(*args, **kwargs)
 | 
			
		||||
                except exc_type:
 | 
			
		||||
                    if not retries:
 | 
			
		||||
                        raise
 | 
			
		||||
 | 
			
		||||
                delay = base_delay * multiplier
 | 
			
		||||
                multiplier += 1
 | 
			
		||||
                log("Retrying '%s' %d more times (delay=%s)" %
 | 
			
		||||
                    (f.__name__, retries, delay), level=INFO)
 | 
			
		||||
                retries -= 1
 | 
			
		||||
                if delay:
 | 
			
		||||
                    time.sleep(delay)
 | 
			
		||||
 | 
			
		||||
        return _retry_on_exception_inner_2
 | 
			
		||||
 | 
			
		||||
    return _retry_on_exception_inner_1
 | 
			
		||||
							
								
								
									
										45
									
								
								hooks/charmhelpers/core/files.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								hooks/charmhelpers/core/files.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,45 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import subprocess
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def sed(filename, before, after, flags='g'):
 | 
			
		||||
    """
 | 
			
		||||
    Search and replaces the given pattern on filename.
 | 
			
		||||
 | 
			
		||||
    :param filename: relative or absolute file path.
 | 
			
		||||
    :param before: expression to be replaced (see 'man sed')
 | 
			
		||||
    :param after: expression to replace with (see 'man sed')
 | 
			
		||||
    :param flags: sed-compatible regex flags in example, to make
 | 
			
		||||
    the  search and replace case insensitive, specify ``flags="i"``.
 | 
			
		||||
    The ``g`` flag is always specified regardless, so you do not
 | 
			
		||||
    need to remember to include it when overriding this parameter.
 | 
			
		||||
    :returns: If the sed command exit code was zero then return,
 | 
			
		||||
    otherwise raise CalledProcessError.
 | 
			
		||||
    """
 | 
			
		||||
    expression = r's/{0}/{1}/{2}'.format(before,
 | 
			
		||||
                                         after, flags)
 | 
			
		||||
 | 
			
		||||
    return subprocess.check_call(["sed", "-i", "-r", "-e",
 | 
			
		||||
                                  expression,
 | 
			
		||||
                                  os.path.expanduser(filename)])
 | 
			
		||||
							
								
								
									
										134
									
								
								hooks/charmhelpers/core/fstab.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										134
									
								
								hooks/charmhelpers/core/fstab.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,134 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import io
 | 
			
		||||
import os
 | 
			
		||||
 | 
			
		||||
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Fstab(io.FileIO):
 | 
			
		||||
    """This class extends file in order to implement a file reader/writer
 | 
			
		||||
    for file `/etc/fstab`
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    class Entry(object):
 | 
			
		||||
        """Entry class represents a non-comment line on the `/etc/fstab` file
 | 
			
		||||
        """
 | 
			
		||||
        def __init__(self, device, mountpoint, filesystem,
 | 
			
		||||
                     options, d=0, p=0):
 | 
			
		||||
            self.device = device
 | 
			
		||||
            self.mountpoint = mountpoint
 | 
			
		||||
            self.filesystem = filesystem
 | 
			
		||||
 | 
			
		||||
            if not options:
 | 
			
		||||
                options = "defaults"
 | 
			
		||||
 | 
			
		||||
            self.options = options
 | 
			
		||||
            self.d = int(d)
 | 
			
		||||
            self.p = int(p)
 | 
			
		||||
 | 
			
		||||
        def __eq__(self, o):
 | 
			
		||||
            return str(self) == str(o)
 | 
			
		||||
 | 
			
		||||
        def __str__(self):
 | 
			
		||||
            return "{} {} {} {} {} {}".format(self.device,
 | 
			
		||||
                                              self.mountpoint,
 | 
			
		||||
                                              self.filesystem,
 | 
			
		||||
                                              self.options,
 | 
			
		||||
                                              self.d,
 | 
			
		||||
                                              self.p)
 | 
			
		||||
 | 
			
		||||
    DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
 | 
			
		||||
 | 
			
		||||
    def __init__(self, path=None):
 | 
			
		||||
        if path:
 | 
			
		||||
            self._path = path
 | 
			
		||||
        else:
 | 
			
		||||
            self._path = self.DEFAULT_PATH
 | 
			
		||||
        super(Fstab, self).__init__(self._path, 'rb+')
 | 
			
		||||
 | 
			
		||||
    def _hydrate_entry(self, line):
 | 
			
		||||
        # NOTE: use split with no arguments to split on any
 | 
			
		||||
        #       whitespace including tabs
 | 
			
		||||
        return Fstab.Entry(*filter(
 | 
			
		||||
            lambda x: x not in ('', None),
 | 
			
		||||
            line.strip("\n").split()))
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def entries(self):
 | 
			
		||||
        self.seek(0)
 | 
			
		||||
        for line in self.readlines():
 | 
			
		||||
            line = line.decode('us-ascii')
 | 
			
		||||
            try:
 | 
			
		||||
                if line.strip() and not line.strip().startswith("#"):
 | 
			
		||||
                    yield self._hydrate_entry(line)
 | 
			
		||||
            except ValueError:
 | 
			
		||||
                pass
 | 
			
		||||
 | 
			
		||||
    def get_entry_by_attr(self, attr, value):
 | 
			
		||||
        for entry in self.entries:
 | 
			
		||||
            e_attr = getattr(entry, attr)
 | 
			
		||||
            if e_attr == value:
 | 
			
		||||
                return entry
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
    def add_entry(self, entry):
 | 
			
		||||
        if self.get_entry_by_attr('device', entry.device):
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
        self.write((str(entry) + '\n').encode('us-ascii'))
 | 
			
		||||
        self.truncate()
 | 
			
		||||
        return entry
 | 
			
		||||
 | 
			
		||||
    def remove_entry(self, entry):
 | 
			
		||||
        self.seek(0)
 | 
			
		||||
 | 
			
		||||
        lines = [l.decode('us-ascii') for l in self.readlines()]
 | 
			
		||||
 | 
			
		||||
        found = False
 | 
			
		||||
        for index, line in enumerate(lines):
 | 
			
		||||
            if line.strip() and not line.strip().startswith("#"):
 | 
			
		||||
                if self._hydrate_entry(line) == entry:
 | 
			
		||||
                    found = True
 | 
			
		||||
                    break
 | 
			
		||||
 | 
			
		||||
        if not found:
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
        lines.remove(line)
 | 
			
		||||
 | 
			
		||||
        self.seek(0)
 | 
			
		||||
        self.write(''.join(lines).encode('us-ascii'))
 | 
			
		||||
        self.truncate()
 | 
			
		||||
        return True
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def remove_by_mountpoint(cls, mountpoint, path=None):
 | 
			
		||||
        fstab = cls(path=path)
 | 
			
		||||
        entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
 | 
			
		||||
        if entry:
 | 
			
		||||
            return fstab.remove_entry(entry)
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def add(cls, device, mountpoint, filesystem, options=None, path=None):
 | 
			
		||||
        return cls(path=path).add_entry(Fstab.Entry(device,
 | 
			
		||||
                                                    mountpoint, filesystem,
 | 
			
		||||
                                                    options=options))
 | 
			
		||||
							
								
								
									
										930
									
								
								hooks/charmhelpers/core/hookenv.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										930
									
								
								hooks/charmhelpers/core/hookenv.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,930 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
"Interactions with the Juju environment"
 | 
			
		||||
# Copyright 2013 Canonical Ltd.
 | 
			
		||||
#
 | 
			
		||||
# Authors:
 | 
			
		||||
#  Charm Helpers Developers <juju@lists.ubuntu.com>
 | 
			
		||||
 | 
			
		||||
from __future__ import print_function
 | 
			
		||||
import copy
 | 
			
		||||
from distutils.version import LooseVersion
 | 
			
		||||
from functools import wraps
 | 
			
		||||
import glob
 | 
			
		||||
import os
 | 
			
		||||
import json
 | 
			
		||||
import yaml
 | 
			
		||||
import subprocess
 | 
			
		||||
import sys
 | 
			
		||||
import errno
 | 
			
		||||
import tempfile
 | 
			
		||||
from subprocess import CalledProcessError
 | 
			
		||||
 | 
			
		||||
import six
 | 
			
		||||
if not six.PY3:
 | 
			
		||||
    from UserDict import UserDict
 | 
			
		||||
else:
 | 
			
		||||
    from collections import UserDict
 | 
			
		||||
 | 
			
		||||
CRITICAL = "CRITICAL"
 | 
			
		||||
ERROR = "ERROR"
 | 
			
		||||
WARNING = "WARNING"
 | 
			
		||||
INFO = "INFO"
 | 
			
		||||
DEBUG = "DEBUG"
 | 
			
		||||
MARKER = object()
 | 
			
		||||
 | 
			
		||||
cache = {}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def cached(func):
 | 
			
		||||
    """Cache return values for multiple executions of func + args
 | 
			
		||||
 | 
			
		||||
    For example::
 | 
			
		||||
 | 
			
		||||
        @cached
 | 
			
		||||
        def unit_get(attribute):
 | 
			
		||||
            pass
 | 
			
		||||
 | 
			
		||||
        unit_get('test')
 | 
			
		||||
 | 
			
		||||
    will cache the result of unit_get + 'test' for future calls.
 | 
			
		||||
    """
 | 
			
		||||
    @wraps(func)
 | 
			
		||||
    def wrapper(*args, **kwargs):
 | 
			
		||||
        global cache
 | 
			
		||||
        key = str((func, args, kwargs))
 | 
			
		||||
        try:
 | 
			
		||||
            return cache[key]
 | 
			
		||||
        except KeyError:
 | 
			
		||||
            pass  # Drop out of the exception handler scope.
 | 
			
		||||
        res = func(*args, **kwargs)
 | 
			
		||||
        cache[key] = res
 | 
			
		||||
        return res
 | 
			
		||||
    wrapper._wrapped = func
 | 
			
		||||
    return wrapper
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def flush(key):
 | 
			
		||||
    """Flushes any entries from function cache where the
 | 
			
		||||
    key is found in the function+args """
 | 
			
		||||
    flush_list = []
 | 
			
		||||
    for item in cache:
 | 
			
		||||
        if key in item:
 | 
			
		||||
            flush_list.append(item)
 | 
			
		||||
    for item in flush_list:
 | 
			
		||||
        del cache[item]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def log(message, level=None):
 | 
			
		||||
    """Write a message to the juju log"""
 | 
			
		||||
    command = ['juju-log']
 | 
			
		||||
    if level:
 | 
			
		||||
        command += ['-l', level]
 | 
			
		||||
    if not isinstance(message, six.string_types):
 | 
			
		||||
        message = repr(message)
 | 
			
		||||
    command += [message]
 | 
			
		||||
    # Missing juju-log should not cause failures in unit tests
 | 
			
		||||
    # Send log output to stderr
 | 
			
		||||
    try:
 | 
			
		||||
        subprocess.call(command)
 | 
			
		||||
    except OSError as e:
 | 
			
		||||
        if e.errno == errno.ENOENT:
 | 
			
		||||
            if level:
 | 
			
		||||
                message = "{}: {}".format(level, message)
 | 
			
		||||
            message = "juju-log: {}".format(message)
 | 
			
		||||
            print(message, file=sys.stderr)
 | 
			
		||||
        else:
 | 
			
		||||
            raise
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Serializable(UserDict):
 | 
			
		||||
    """Wrapper, an object that can be serialized to yaml or json"""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, obj):
 | 
			
		||||
        # wrap the object
 | 
			
		||||
        UserDict.__init__(self)
 | 
			
		||||
        self.data = obj
 | 
			
		||||
 | 
			
		||||
    def __getattr__(self, attr):
 | 
			
		||||
        # See if this object has attribute.
 | 
			
		||||
        if attr in ("json", "yaml", "data"):
 | 
			
		||||
            return self.__dict__[attr]
 | 
			
		||||
        # Check for attribute in wrapped object.
 | 
			
		||||
        got = getattr(self.data, attr, MARKER)
 | 
			
		||||
        if got is not MARKER:
 | 
			
		||||
            return got
 | 
			
		||||
        # Proxy to the wrapped object via dict interface.
 | 
			
		||||
        try:
 | 
			
		||||
            return self.data[attr]
 | 
			
		||||
        except KeyError:
 | 
			
		||||
            raise AttributeError(attr)
 | 
			
		||||
 | 
			
		||||
    def __getstate__(self):
 | 
			
		||||
        # Pickle as a standard dictionary.
 | 
			
		||||
        return self.data
 | 
			
		||||
 | 
			
		||||
    def __setstate__(self, state):
 | 
			
		||||
        # Unpickle into our wrapper.
 | 
			
		||||
        self.data = state
 | 
			
		||||
 | 
			
		||||
    def json(self):
 | 
			
		||||
        """Serialize the object to json"""
 | 
			
		||||
        return json.dumps(self.data)
 | 
			
		||||
 | 
			
		||||
    def yaml(self):
 | 
			
		||||
        """Serialize the object to yaml"""
 | 
			
		||||
        return yaml.dump(self.data)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def execution_environment():
 | 
			
		||||
    """A convenient bundling of the current execution context"""
 | 
			
		||||
    context = {}
 | 
			
		||||
    context['conf'] = config()
 | 
			
		||||
    if relation_id():
 | 
			
		||||
        context['reltype'] = relation_type()
 | 
			
		||||
        context['relid'] = relation_id()
 | 
			
		||||
        context['rel'] = relation_get()
 | 
			
		||||
    context['unit'] = local_unit()
 | 
			
		||||
    context['rels'] = relations()
 | 
			
		||||
    context['env'] = os.environ
 | 
			
		||||
    return context
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def in_relation_hook():
 | 
			
		||||
    """Determine whether we're running in a relation hook"""
 | 
			
		||||
    return 'JUJU_RELATION' in os.environ
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def relation_type():
 | 
			
		||||
    """The scope for the current relation hook"""
 | 
			
		||||
    return os.environ.get('JUJU_RELATION', None)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def relation_id(relation_name=None, service_or_unit=None):
 | 
			
		||||
    """The relation ID for the current or a specified relation"""
 | 
			
		||||
    if not relation_name and not service_or_unit:
 | 
			
		||||
        return os.environ.get('JUJU_RELATION_ID', None)
 | 
			
		||||
    elif relation_name and service_or_unit:
 | 
			
		||||
        service_name = service_or_unit.split('/')[0]
 | 
			
		||||
        for relid in relation_ids(relation_name):
 | 
			
		||||
            remote_service = remote_service_name(relid)
 | 
			
		||||
            if remote_service == service_name:
 | 
			
		||||
                return relid
 | 
			
		||||
    else:
 | 
			
		||||
        raise ValueError('Must specify neither or both of relation_name and service_or_unit')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def local_unit():
 | 
			
		||||
    """Local unit ID"""
 | 
			
		||||
    return os.environ['JUJU_UNIT_NAME']
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def remote_unit():
 | 
			
		||||
    """The remote unit for the current relation hook"""
 | 
			
		||||
    return os.environ.get('JUJU_REMOTE_UNIT', None)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def service_name():
 | 
			
		||||
    """The name service group this unit belongs to"""
 | 
			
		||||
    return local_unit().split('/')[0]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def remote_service_name(relid=None):
 | 
			
		||||
    """The remote service name for a given relation-id (or the current relation)"""
 | 
			
		||||
    if relid is None:
 | 
			
		||||
        unit = remote_unit()
 | 
			
		||||
    else:
 | 
			
		||||
        units = related_units(relid)
 | 
			
		||||
        unit = units[0] if units else None
 | 
			
		||||
    return unit.split('/')[0] if unit else None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def hook_name():
 | 
			
		||||
    """The name of the currently executing hook"""
 | 
			
		||||
    return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Config(dict):
 | 
			
		||||
    """A dictionary representation of the charm's config.yaml, with some
 | 
			
		||||
    extra features:
 | 
			
		||||
 | 
			
		||||
    - See which values in the dictionary have changed since the previous hook.
 | 
			
		||||
    - For values that have changed, see what the previous value was.
 | 
			
		||||
    - Store arbitrary data for use in a later hook.
 | 
			
		||||
 | 
			
		||||
    NOTE: Do not instantiate this object directly - instead call
 | 
			
		||||
    ``hookenv.config()``, which will return an instance of :class:`Config`.
 | 
			
		||||
 | 
			
		||||
    Example usage::
 | 
			
		||||
 | 
			
		||||
        >>> # inside a hook
 | 
			
		||||
        >>> from charmhelpers.core import hookenv
 | 
			
		||||
        >>> config = hookenv.config()
 | 
			
		||||
        >>> config['foo']
 | 
			
		||||
        'bar'
 | 
			
		||||
        >>> # store a new key/value for later use
 | 
			
		||||
        >>> config['mykey'] = 'myval'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        >>> # user runs `juju set mycharm foo=baz`
 | 
			
		||||
        >>> # now we're inside subsequent config-changed hook
 | 
			
		||||
        >>> config = hookenv.config()
 | 
			
		||||
        >>> config['foo']
 | 
			
		||||
        'baz'
 | 
			
		||||
        >>> # test to see if this val has changed since last hook
 | 
			
		||||
        >>> config.changed('foo')
 | 
			
		||||
        True
 | 
			
		||||
        >>> # what was the previous value?
 | 
			
		||||
        >>> config.previous('foo')
 | 
			
		||||
        'bar'
 | 
			
		||||
        >>> # keys/values that we add are preserved across hooks
 | 
			
		||||
        >>> config['mykey']
 | 
			
		||||
        'myval'
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    CONFIG_FILE_NAME = '.juju-persistent-config'
 | 
			
		||||
 | 
			
		||||
    def __init__(self, *args, **kw):
 | 
			
		||||
        super(Config, self).__init__(*args, **kw)
 | 
			
		||||
        self.implicit_save = True
 | 
			
		||||
        self._prev_dict = None
 | 
			
		||||
        self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
 | 
			
		||||
        if os.path.exists(self.path):
 | 
			
		||||
            self.load_previous()
 | 
			
		||||
        atexit(self._implicit_save)
 | 
			
		||||
 | 
			
		||||
    def load_previous(self, path=None):
 | 
			
		||||
        """Load previous copy of config from disk.
 | 
			
		||||
 | 
			
		||||
        In normal usage you don't need to call this method directly - it
 | 
			
		||||
        is called automatically at object initialization.
 | 
			
		||||
 | 
			
		||||
        :param path:
 | 
			
		||||
 | 
			
		||||
            File path from which to load the previous config. If `None`,
 | 
			
		||||
            config is loaded from the default location. If `path` is
 | 
			
		||||
            specified, subsequent `save()` calls will write to the same
 | 
			
		||||
            path.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        self.path = path or self.path
 | 
			
		||||
        with open(self.path) as f:
 | 
			
		||||
            self._prev_dict = json.load(f)
 | 
			
		||||
        for k, v in copy.deepcopy(self._prev_dict).items():
 | 
			
		||||
            if k not in self:
 | 
			
		||||
                self[k] = v
 | 
			
		||||
 | 
			
		||||
    def changed(self, key):
 | 
			
		||||
        """Return True if the current value for this key is different from
 | 
			
		||||
        the previous value.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        if self._prev_dict is None:
 | 
			
		||||
            return True
 | 
			
		||||
        return self.previous(key) != self.get(key)
 | 
			
		||||
 | 
			
		||||
    def previous(self, key):
 | 
			
		||||
        """Return previous value for this key, or None if there
 | 
			
		||||
        is no previous value.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        if self._prev_dict:
 | 
			
		||||
            return self._prev_dict.get(key)
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
    def save(self):
 | 
			
		||||
        """Save this config to disk.
 | 
			
		||||
 | 
			
		||||
        If the charm is using the :mod:`Services Framework <services.base>`
 | 
			
		||||
        or :meth:'@hook <Hooks.hook>' decorator, this
 | 
			
		||||
        is called automatically at the end of successful hook execution.
 | 
			
		||||
        Otherwise, it should be called directly by user code.
 | 
			
		||||
 | 
			
		||||
        To disable automatic saves, set ``implicit_save=False`` on this
 | 
			
		||||
        instance.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        with open(self.path, 'w') as f:
 | 
			
		||||
            json.dump(self, f)
 | 
			
		||||
 | 
			
		||||
    def _implicit_save(self):
 | 
			
		||||
        if self.implicit_save:
 | 
			
		||||
            self.save()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def config(scope=None):
 | 
			
		||||
    """Juju charm configuration"""
 | 
			
		||||
    config_cmd_line = ['config-get']
 | 
			
		||||
    if scope is not None:
 | 
			
		||||
        config_cmd_line.append(scope)
 | 
			
		||||
    config_cmd_line.append('--format=json')
 | 
			
		||||
    try:
 | 
			
		||||
        config_data = json.loads(
 | 
			
		||||
            subprocess.check_output(config_cmd_line).decode('UTF-8'))
 | 
			
		||||
        if scope is not None:
 | 
			
		||||
            return config_data
 | 
			
		||||
        return Config(config_data)
 | 
			
		||||
    except ValueError:
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def relation_get(attribute=None, unit=None, rid=None):
 | 
			
		||||
    """Get relation information"""
 | 
			
		||||
    _args = ['relation-get', '--format=json']
 | 
			
		||||
    if rid:
 | 
			
		||||
        _args.append('-r')
 | 
			
		||||
        _args.append(rid)
 | 
			
		||||
    _args.append(attribute or '-')
 | 
			
		||||
    if unit:
 | 
			
		||||
        _args.append(unit)
 | 
			
		||||
    try:
 | 
			
		||||
        return json.loads(subprocess.check_output(_args).decode('UTF-8'))
 | 
			
		||||
    except ValueError:
 | 
			
		||||
        return None
 | 
			
		||||
    except CalledProcessError as e:
 | 
			
		||||
        if e.returncode == 2:
 | 
			
		||||
            return None
 | 
			
		||||
        raise
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def relation_set(relation_id=None, relation_settings=None, **kwargs):
 | 
			
		||||
    """Set relation information for the current unit"""
 | 
			
		||||
    relation_settings = relation_settings if relation_settings else {}
 | 
			
		||||
    relation_cmd_line = ['relation-set']
 | 
			
		||||
    accepts_file = "--file" in subprocess.check_output(
 | 
			
		||||
        relation_cmd_line + ["--help"], universal_newlines=True)
 | 
			
		||||
    if relation_id is not None:
 | 
			
		||||
        relation_cmd_line.extend(('-r', relation_id))
 | 
			
		||||
    settings = relation_settings.copy()
 | 
			
		||||
    settings.update(kwargs)
 | 
			
		||||
    for key, value in settings.items():
 | 
			
		||||
        # Force value to be a string: it always should, but some call
 | 
			
		||||
        # sites pass in things like dicts or numbers.
 | 
			
		||||
        if value is not None:
 | 
			
		||||
            settings[key] = "{}".format(value)
 | 
			
		||||
    if accepts_file:
 | 
			
		||||
        # --file was introduced in Juju 1.23.2. Use it by default if
 | 
			
		||||
        # available, since otherwise we'll break if the relation data is
 | 
			
		||||
        # too big. Ideally we should tell relation-set to read the data from
 | 
			
		||||
        # stdin, but that feature is broken in 1.23.2: Bug #1454678.
 | 
			
		||||
        with tempfile.NamedTemporaryFile(delete=False) as settings_file:
 | 
			
		||||
            settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
 | 
			
		||||
        subprocess.check_call(
 | 
			
		||||
            relation_cmd_line + ["--file", settings_file.name])
 | 
			
		||||
        os.remove(settings_file.name)
 | 
			
		||||
    else:
 | 
			
		||||
        for key, value in settings.items():
 | 
			
		||||
            if value is None:
 | 
			
		||||
                relation_cmd_line.append('{}='.format(key))
 | 
			
		||||
            else:
 | 
			
		||||
                relation_cmd_line.append('{}={}'.format(key, value))
 | 
			
		||||
        subprocess.check_call(relation_cmd_line)
 | 
			
		||||
    # Flush cache of any relation-gets for local unit
 | 
			
		||||
    flush(local_unit())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def relation_clear(r_id=None):
 | 
			
		||||
    ''' Clears any relation data already set on relation r_id '''
 | 
			
		||||
    settings = relation_get(rid=r_id,
 | 
			
		||||
                            unit=local_unit())
 | 
			
		||||
    for setting in settings:
 | 
			
		||||
        if setting not in ['public-address', 'private-address']:
 | 
			
		||||
            settings[setting] = None
 | 
			
		||||
    relation_set(relation_id=r_id,
 | 
			
		||||
                 **settings)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def relation_ids(reltype=None):
 | 
			
		||||
    """A list of relation_ids"""
 | 
			
		||||
    reltype = reltype or relation_type()
 | 
			
		||||
    relid_cmd_line = ['relation-ids', '--format=json']
 | 
			
		||||
    if reltype is not None:
 | 
			
		||||
        relid_cmd_line.append(reltype)
 | 
			
		||||
        return json.loads(
 | 
			
		||||
            subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
 | 
			
		||||
    return []
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def related_units(relid=None):
 | 
			
		||||
    """A list of related units"""
 | 
			
		||||
    relid = relid or relation_id()
 | 
			
		||||
    units_cmd_line = ['relation-list', '--format=json']
 | 
			
		||||
    if relid is not None:
 | 
			
		||||
        units_cmd_line.extend(('-r', relid))
 | 
			
		||||
    return json.loads(
 | 
			
		||||
        subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def relation_for_unit(unit=None, rid=None):
 | 
			
		||||
    """Get the json represenation of a unit's relation"""
 | 
			
		||||
    unit = unit or remote_unit()
 | 
			
		||||
    relation = relation_get(unit=unit, rid=rid)
 | 
			
		||||
    for key in relation:
 | 
			
		||||
        if key.endswith('-list'):
 | 
			
		||||
            relation[key] = relation[key].split()
 | 
			
		||||
    relation['__unit__'] = unit
 | 
			
		||||
    return relation
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def relations_for_id(relid=None):
 | 
			
		||||
    """Get relations of a specific relation ID"""
 | 
			
		||||
    relation_data = []
 | 
			
		||||
    relid = relid or relation_ids()
 | 
			
		||||
    for unit in related_units(relid):
 | 
			
		||||
        unit_data = relation_for_unit(unit, relid)
 | 
			
		||||
        unit_data['__relid__'] = relid
 | 
			
		||||
        relation_data.append(unit_data)
 | 
			
		||||
    return relation_data
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def relations_of_type(reltype=None):
 | 
			
		||||
    """Get relations of a specific type"""
 | 
			
		||||
    relation_data = []
 | 
			
		||||
    reltype = reltype or relation_type()
 | 
			
		||||
    for relid in relation_ids(reltype):
 | 
			
		||||
        for relation in relations_for_id(relid):
 | 
			
		||||
            relation['__relid__'] = relid
 | 
			
		||||
            relation_data.append(relation)
 | 
			
		||||
    return relation_data
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def metadata():
 | 
			
		||||
    """Get the current charm metadata.yaml contents as a python object"""
 | 
			
		||||
    with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
 | 
			
		||||
        return yaml.safe_load(md)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def relation_types():
 | 
			
		||||
    """Get a list of relation types supported by this charm"""
 | 
			
		||||
    rel_types = []
 | 
			
		||||
    md = metadata()
 | 
			
		||||
    for key in ('provides', 'requires', 'peers'):
 | 
			
		||||
        section = md.get(key)
 | 
			
		||||
        if section:
 | 
			
		||||
            rel_types.extend(section.keys())
 | 
			
		||||
    return rel_types
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def relation_to_interface(relation_name):
 | 
			
		||||
    """
 | 
			
		||||
    Given the name of a relation, return the interface that relation uses.
 | 
			
		||||
 | 
			
		||||
    :returns: The interface name, or ``None``.
 | 
			
		||||
    """
 | 
			
		||||
    return relation_to_role_and_interface(relation_name)[1]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def relation_to_role_and_interface(relation_name):
 | 
			
		||||
    """
 | 
			
		||||
    Given the name of a relation, return the role and the name of the interface
 | 
			
		||||
    that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
 | 
			
		||||
 | 
			
		||||
    :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
 | 
			
		||||
    """
 | 
			
		||||
    _metadata = metadata()
 | 
			
		||||
    for role in ('provides', 'requires', 'peer'):
 | 
			
		||||
        interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
 | 
			
		||||
        if interface:
 | 
			
		||||
            return role, interface
 | 
			
		||||
    return None, None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def role_and_interface_to_relations(role, interface_name):
 | 
			
		||||
    """
 | 
			
		||||
    Given a role and interface name, return a list of relation names for the
 | 
			
		||||
    current charm that use that interface under that role (where role is one
 | 
			
		||||
    of ``provides``, ``requires``, or ``peer``).
 | 
			
		||||
 | 
			
		||||
    :returns: A list of relation names.
 | 
			
		||||
    """
 | 
			
		||||
    _metadata = metadata()
 | 
			
		||||
    results = []
 | 
			
		||||
    for relation_name, relation in _metadata.get(role, {}).items():
 | 
			
		||||
        if relation['interface'] == interface_name:
 | 
			
		||||
            results.append(relation_name)
 | 
			
		||||
    return results
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def interface_to_relations(interface_name):
 | 
			
		||||
    """
 | 
			
		||||
    Given an interface, return a list of relation names for the current
 | 
			
		||||
    charm that use that interface.
 | 
			
		||||
 | 
			
		||||
    :returns: A list of relation names.
 | 
			
		||||
    """
 | 
			
		||||
    results = []
 | 
			
		||||
    for role in ('provides', 'requires', 'peer'):
 | 
			
		||||
        results.extend(role_and_interface_to_relations(role, interface_name))
 | 
			
		||||
    return results
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def charm_name():
 | 
			
		||||
    """Get the name of the current charm as is specified on metadata.yaml"""
 | 
			
		||||
    return metadata().get('name')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def relations():
 | 
			
		||||
    """Get a nested dictionary of relation data for all related units"""
 | 
			
		||||
    rels = {}
 | 
			
		||||
    for reltype in relation_types():
 | 
			
		||||
        relids = {}
 | 
			
		||||
        for relid in relation_ids(reltype):
 | 
			
		||||
            units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
 | 
			
		||||
            for unit in related_units(relid):
 | 
			
		||||
                reldata = relation_get(unit=unit, rid=relid)
 | 
			
		||||
                units[unit] = reldata
 | 
			
		||||
            relids[relid] = units
 | 
			
		||||
        rels[reltype] = relids
 | 
			
		||||
    return rels
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def is_relation_made(relation, keys='private-address'):
 | 
			
		||||
    '''
 | 
			
		||||
    Determine whether a relation is established by checking for
 | 
			
		||||
    presence of key(s).  If a list of keys is provided, they
 | 
			
		||||
    must all be present for the relation to be identified as made
 | 
			
		||||
    '''
 | 
			
		||||
    if isinstance(keys, str):
 | 
			
		||||
        keys = [keys]
 | 
			
		||||
    for r_id in relation_ids(relation):
 | 
			
		||||
        for unit in related_units(r_id):
 | 
			
		||||
            context = {}
 | 
			
		||||
            for k in keys:
 | 
			
		||||
                context[k] = relation_get(k, rid=r_id,
 | 
			
		||||
                                          unit=unit)
 | 
			
		||||
            if None not in context.values():
 | 
			
		||||
                return True
 | 
			
		||||
    return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def open_port(port, protocol="TCP"):
 | 
			
		||||
    """Open a service network port"""
 | 
			
		||||
    _args = ['open-port']
 | 
			
		||||
    _args.append('{}/{}'.format(port, protocol))
 | 
			
		||||
    subprocess.check_call(_args)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def close_port(port, protocol="TCP"):
 | 
			
		||||
    """Close a service network port"""
 | 
			
		||||
    _args = ['close-port']
 | 
			
		||||
    _args.append('{}/{}'.format(port, protocol))
 | 
			
		||||
    subprocess.check_call(_args)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def unit_get(attribute):
 | 
			
		||||
    """Get the unit ID for the remote unit"""
 | 
			
		||||
    _args = ['unit-get', '--format=json', attribute]
 | 
			
		||||
    try:
 | 
			
		||||
        return json.loads(subprocess.check_output(_args).decode('UTF-8'))
 | 
			
		||||
    except ValueError:
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def unit_public_ip():
 | 
			
		||||
    """Get this unit's public IP address"""
 | 
			
		||||
    return unit_get('public-address')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def unit_private_ip():
 | 
			
		||||
    """Get this unit's private IP address"""
 | 
			
		||||
    return unit_get('private-address')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def storage_get(attribute="", storage_id=""):
 | 
			
		||||
    """Get storage attributes"""
 | 
			
		||||
    _args = ['storage-get', '--format=json']
 | 
			
		||||
    if storage_id:
 | 
			
		||||
        _args.extend(('-s', storage_id))
 | 
			
		||||
    if attribute:
 | 
			
		||||
        _args.append(attribute)
 | 
			
		||||
    try:
 | 
			
		||||
        return json.loads(subprocess.check_output(_args).decode('UTF-8'))
 | 
			
		||||
    except ValueError:
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def storage_list(storage_name=""):
 | 
			
		||||
    """List the storage IDs for the unit"""
 | 
			
		||||
    _args = ['storage-list', '--format=json']
 | 
			
		||||
    if storage_name:
 | 
			
		||||
        _args.append(storage_name)
 | 
			
		||||
    try:
 | 
			
		||||
        return json.loads(subprocess.check_output(_args).decode('UTF-8'))
 | 
			
		||||
    except ValueError:
 | 
			
		||||
        return None
 | 
			
		||||
    except OSError as e:
 | 
			
		||||
        import errno
 | 
			
		||||
        if e.errno == errno.ENOENT:
 | 
			
		||||
            # storage-list does not exist
 | 
			
		||||
            return []
 | 
			
		||||
        raise
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class UnregisteredHookError(Exception):
 | 
			
		||||
    """Raised when an undefined hook is called"""
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Hooks(object):
 | 
			
		||||
    """A convenient handler for hook functions.
 | 
			
		||||
 | 
			
		||||
    Example::
 | 
			
		||||
 | 
			
		||||
        hooks = Hooks()
 | 
			
		||||
 | 
			
		||||
        # register a hook, taking its name from the function name
 | 
			
		||||
        @hooks.hook()
 | 
			
		||||
        def install():
 | 
			
		||||
            pass  # your code here
 | 
			
		||||
 | 
			
		||||
        # register a hook, providing a custom hook name
 | 
			
		||||
        @hooks.hook("config-changed")
 | 
			
		||||
        def config_changed():
 | 
			
		||||
            pass  # your code here
 | 
			
		||||
 | 
			
		||||
        if __name__ == "__main__":
 | 
			
		||||
            # execute a hook based on the name the program is called by
 | 
			
		||||
            hooks.execute(sys.argv)
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config_save=None):
 | 
			
		||||
        super(Hooks, self).__init__()
 | 
			
		||||
        self._hooks = {}
 | 
			
		||||
 | 
			
		||||
        # For unknown reasons, we allow the Hooks constructor to override
 | 
			
		||||
        # config().implicit_save.
 | 
			
		||||
        if config_save is not None:
 | 
			
		||||
            config().implicit_save = config_save
 | 
			
		||||
 | 
			
		||||
    def register(self, name, function):
 | 
			
		||||
        """Register a hook"""
 | 
			
		||||
        self._hooks[name] = function
 | 
			
		||||
 | 
			
		||||
    def execute(self, args):
 | 
			
		||||
        """Execute a registered hook based on args[0]"""
 | 
			
		||||
        _run_atstart()
 | 
			
		||||
        hook_name = os.path.basename(args[0])
 | 
			
		||||
        if hook_name in self._hooks:
 | 
			
		||||
            try:
 | 
			
		||||
                self._hooks[hook_name]()
 | 
			
		||||
            except SystemExit as x:
 | 
			
		||||
                if x.code is None or x.code == 0:
 | 
			
		||||
                    _run_atexit()
 | 
			
		||||
                raise
 | 
			
		||||
            _run_atexit()
 | 
			
		||||
        else:
 | 
			
		||||
            raise UnregisteredHookError(hook_name)
 | 
			
		||||
 | 
			
		||||
    def hook(self, *hook_names):
 | 
			
		||||
        """Decorator, registering them as hooks"""
 | 
			
		||||
        def wrapper(decorated):
 | 
			
		||||
            for hook_name in hook_names:
 | 
			
		||||
                self.register(hook_name, decorated)
 | 
			
		||||
            else:
 | 
			
		||||
                self.register(decorated.__name__, decorated)
 | 
			
		||||
                if '_' in decorated.__name__:
 | 
			
		||||
                    self.register(
 | 
			
		||||
                        decorated.__name__.replace('_', '-'), decorated)
 | 
			
		||||
            return decorated
 | 
			
		||||
        return wrapper
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def charm_dir():
 | 
			
		||||
    """Return the root directory of the current charm"""
 | 
			
		||||
    return os.environ.get('CHARM_DIR')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def action_get(key=None):
 | 
			
		||||
    """Gets the value of an action parameter, or all key/value param pairs"""
 | 
			
		||||
    cmd = ['action-get']
 | 
			
		||||
    if key is not None:
 | 
			
		||||
        cmd.append(key)
 | 
			
		||||
    cmd.append('--format=json')
 | 
			
		||||
    action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
 | 
			
		||||
    return action_data
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def action_set(values):
 | 
			
		||||
    """Sets the values to be returned after the action finishes"""
 | 
			
		||||
    cmd = ['action-set']
 | 
			
		||||
    for k, v in list(values.items()):
 | 
			
		||||
        cmd.append('{}={}'.format(k, v))
 | 
			
		||||
    subprocess.check_call(cmd)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def action_fail(message):
 | 
			
		||||
    """Sets the action status to failed and sets the error message.
 | 
			
		||||
 | 
			
		||||
    The results set by action_set are preserved."""
 | 
			
		||||
    subprocess.check_call(['action-fail', message])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def action_name():
 | 
			
		||||
    """Get the name of the currently executing action."""
 | 
			
		||||
    return os.environ.get('JUJU_ACTION_NAME')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def action_uuid():
 | 
			
		||||
    """Get the UUID of the currently executing action."""
 | 
			
		||||
    return os.environ.get('JUJU_ACTION_UUID')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def action_tag():
 | 
			
		||||
    """Get the tag for the currently executing action."""
 | 
			
		||||
    return os.environ.get('JUJU_ACTION_TAG')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def status_set(workload_state, message):
 | 
			
		||||
    """Set the workload state with a message
 | 
			
		||||
 | 
			
		||||
    Use status-set to set the workload state with a message which is visible
 | 
			
		||||
    to the user via juju status. If the status-set command is not found then
 | 
			
		||||
    assume this is juju < 1.23 and juju-log the message unstead.
 | 
			
		||||
 | 
			
		||||
    workload_state -- valid juju workload state.
 | 
			
		||||
    message        -- status update message
 | 
			
		||||
    """
 | 
			
		||||
    valid_states = ['maintenance', 'blocked', 'waiting', 'active']
 | 
			
		||||
    if workload_state not in valid_states:
 | 
			
		||||
        raise ValueError(
 | 
			
		||||
            '{!r} is not a valid workload state'.format(workload_state)
 | 
			
		||||
        )
 | 
			
		||||
    cmd = ['status-set', workload_state, message]
 | 
			
		||||
    try:
 | 
			
		||||
        ret = subprocess.call(cmd)
 | 
			
		||||
        if ret == 0:
 | 
			
		||||
            return
 | 
			
		||||
    except OSError as e:
 | 
			
		||||
        if e.errno != errno.ENOENT:
 | 
			
		||||
            raise
 | 
			
		||||
    log_message = 'status-set failed: {} {}'.format(workload_state,
 | 
			
		||||
                                                    message)
 | 
			
		||||
    log(log_message, level='INFO')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def status_get():
 | 
			
		||||
    """Retrieve the previously set juju workload state and message
 | 
			
		||||
 | 
			
		||||
    If the status-get command is not found then assume this is juju < 1.23 and
 | 
			
		||||
    return 'unknown', ""
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    cmd = ['status-get', "--format=json", "--include-data"]
 | 
			
		||||
    try:
 | 
			
		||||
        raw_status = subprocess.check_output(cmd)
 | 
			
		||||
    except OSError as e:
 | 
			
		||||
        if e.errno == errno.ENOENT:
 | 
			
		||||
            return ('unknown', "")
 | 
			
		||||
        else:
 | 
			
		||||
            raise
 | 
			
		||||
    else:
 | 
			
		||||
        status = json.loads(raw_status.decode("UTF-8"))
 | 
			
		||||
        return (status["status"], status["message"])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def translate_exc(from_exc, to_exc):
 | 
			
		||||
    def inner_translate_exc1(f):
 | 
			
		||||
        def inner_translate_exc2(*args, **kwargs):
 | 
			
		||||
            try:
 | 
			
		||||
                return f(*args, **kwargs)
 | 
			
		||||
            except from_exc:
 | 
			
		||||
                raise to_exc
 | 
			
		||||
 | 
			
		||||
        return inner_translate_exc2
 | 
			
		||||
 | 
			
		||||
    return inner_translate_exc1
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
 | 
			
		||||
def is_leader():
 | 
			
		||||
    """Does the current unit hold the juju leadership
 | 
			
		||||
 | 
			
		||||
    Uses juju to determine whether the current unit is the leader of its peers
 | 
			
		||||
    """
 | 
			
		||||
    cmd = ['is-leader', '--format=json']
 | 
			
		||||
    return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
 | 
			
		||||
def leader_get(attribute=None):
 | 
			
		||||
    """Juju leader get value(s)"""
 | 
			
		||||
    cmd = ['leader-get', '--format=json'] + [attribute or '-']
 | 
			
		||||
    return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
 | 
			
		||||
def leader_set(settings=None, **kwargs):
 | 
			
		||||
    """Juju leader set value(s)"""
 | 
			
		||||
    # Don't log secrets.
 | 
			
		||||
    # log("Juju leader-set '%s'" % (settings), level=DEBUG)
 | 
			
		||||
    cmd = ['leader-set']
 | 
			
		||||
    settings = settings or {}
 | 
			
		||||
    settings.update(kwargs)
 | 
			
		||||
    for k, v in settings.items():
 | 
			
		||||
        if v is None:
 | 
			
		||||
            cmd.append('{}='.format(k))
 | 
			
		||||
        else:
 | 
			
		||||
            cmd.append('{}={}'.format(k, v))
 | 
			
		||||
    subprocess.check_call(cmd)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def juju_version():
 | 
			
		||||
    """Full version string (eg. '1.23.3.1-trusty-amd64')"""
 | 
			
		||||
    # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
 | 
			
		||||
    jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
 | 
			
		||||
    return subprocess.check_output([jujud, 'version'],
 | 
			
		||||
                                   universal_newlines=True).strip()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@cached
 | 
			
		||||
def has_juju_version(minimum_version):
 | 
			
		||||
    """Return True if the Juju version is at least the provided version"""
 | 
			
		||||
    return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
_atexit = []
 | 
			
		||||
_atstart = []
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def atstart(callback, *args, **kwargs):
 | 
			
		||||
    '''Schedule a callback to run before the main hook.
 | 
			
		||||
 | 
			
		||||
    Callbacks are run in the order they were added.
 | 
			
		||||
 | 
			
		||||
    This is useful for modules and classes to perform initialization
 | 
			
		||||
    and inject behavior. In particular:
 | 
			
		||||
 | 
			
		||||
        - Run common code before all of your hooks, such as logging
 | 
			
		||||
          the hook name or interesting relation data.
 | 
			
		||||
        - Defer object or module initialization that requires a hook
 | 
			
		||||
          context until we know there actually is a hook context,
 | 
			
		||||
          making testing easier.
 | 
			
		||||
        - Rather than requiring charm authors to include boilerplate to
 | 
			
		||||
          invoke your helper's behavior, have it run automatically if
 | 
			
		||||
          your object is instantiated or module imported.
 | 
			
		||||
 | 
			
		||||
    This is not at all useful after your hook framework as been launched.
 | 
			
		||||
    '''
 | 
			
		||||
    global _atstart
 | 
			
		||||
    _atstart.append((callback, args, kwargs))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def atexit(callback, *args, **kwargs):
 | 
			
		||||
    '''Schedule a callback to run on successful hook completion.
 | 
			
		||||
 | 
			
		||||
    Callbacks are run in the reverse order that they were added.'''
 | 
			
		||||
    _atexit.append((callback, args, kwargs))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _run_atstart():
 | 
			
		||||
    '''Hook frameworks must invoke this before running the main hook body.'''
 | 
			
		||||
    global _atstart
 | 
			
		||||
    for callback, args, kwargs in _atstart:
 | 
			
		||||
        callback(*args, **kwargs)
 | 
			
		||||
    del _atstart[:]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _run_atexit():
 | 
			
		||||
    '''Hook frameworks must invoke this after the main hook body has
 | 
			
		||||
    successfully completed. Do not invoke it if the hook fails.'''
 | 
			
		||||
    global _atexit
 | 
			
		||||
    for callback, args, kwargs in reversed(_atexit):
 | 
			
		||||
        callback(*args, **kwargs)
 | 
			
		||||
    del _atexit[:]
 | 
			
		||||
							
								
								
									
										586
									
								
								hooks/charmhelpers/core/host.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										586
									
								
								hooks/charmhelpers/core/host.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,586 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
"""Tools for working with the host system"""
 | 
			
		||||
# Copyright 2012 Canonical Ltd.
 | 
			
		||||
#
 | 
			
		||||
# Authors:
 | 
			
		||||
#  Nick Moffitt <nick.moffitt@canonical.com>
 | 
			
		||||
#  Matthew Wedgwood <matthew.wedgwood@canonical.com>
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
import pwd
 | 
			
		||||
import glob
 | 
			
		||||
import grp
 | 
			
		||||
import random
 | 
			
		||||
import string
 | 
			
		||||
import subprocess
 | 
			
		||||
import hashlib
 | 
			
		||||
from contextlib import contextmanager
 | 
			
		||||
from collections import OrderedDict
 | 
			
		||||
 | 
			
		||||
import six
 | 
			
		||||
 | 
			
		||||
from .hookenv import log
 | 
			
		||||
from .fstab import Fstab
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def service_start(service_name):
 | 
			
		||||
    """Start a system service"""
 | 
			
		||||
    return service('start', service_name)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def service_stop(service_name):
 | 
			
		||||
    """Stop a system service"""
 | 
			
		||||
    return service('stop', service_name)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def service_restart(service_name):
 | 
			
		||||
    """Restart a system service"""
 | 
			
		||||
    return service('restart', service_name)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def service_reload(service_name, restart_on_failure=False):
 | 
			
		||||
    """Reload a system service, optionally falling back to restart if
 | 
			
		||||
    reload fails"""
 | 
			
		||||
    service_result = service('reload', service_name)
 | 
			
		||||
    if not service_result and restart_on_failure:
 | 
			
		||||
        service_result = service('restart', service_name)
 | 
			
		||||
    return service_result
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
 | 
			
		||||
    """Pause a system service.
 | 
			
		||||
 | 
			
		||||
    Stop it, and prevent it from starting again at boot."""
 | 
			
		||||
    stopped = service_stop(service_name)
 | 
			
		||||
    upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
 | 
			
		||||
    sysv_file = os.path.join(initd_dir, service_name)
 | 
			
		||||
    if os.path.exists(upstart_file):
 | 
			
		||||
        override_path = os.path.join(
 | 
			
		||||
            init_dir, '{}.override'.format(service_name))
 | 
			
		||||
        with open(override_path, 'w') as fh:
 | 
			
		||||
            fh.write("manual\n")
 | 
			
		||||
    elif os.path.exists(sysv_file):
 | 
			
		||||
        subprocess.check_call(["update-rc.d", service_name, "disable"])
 | 
			
		||||
    else:
 | 
			
		||||
        # XXX: Support SystemD too
 | 
			
		||||
        raise ValueError(
 | 
			
		||||
            "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
 | 
			
		||||
                service_name, upstart_file, sysv_file))
 | 
			
		||||
    return stopped
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def service_resume(service_name, init_dir="/etc/init",
 | 
			
		||||
                   initd_dir="/etc/init.d"):
 | 
			
		||||
    """Resume a system service.
 | 
			
		||||
 | 
			
		||||
    Reenable starting again at boot. Start the service"""
 | 
			
		||||
    upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
 | 
			
		||||
    sysv_file = os.path.join(initd_dir, service_name)
 | 
			
		||||
    if os.path.exists(upstart_file):
 | 
			
		||||
        override_path = os.path.join(
 | 
			
		||||
            init_dir, '{}.override'.format(service_name))
 | 
			
		||||
        if os.path.exists(override_path):
 | 
			
		||||
            os.unlink(override_path)
 | 
			
		||||
    elif os.path.exists(sysv_file):
 | 
			
		||||
        subprocess.check_call(["update-rc.d", service_name, "enable"])
 | 
			
		||||
    else:
 | 
			
		||||
        # XXX: Support SystemD too
 | 
			
		||||
        raise ValueError(
 | 
			
		||||
            "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
 | 
			
		||||
                service_name, upstart_file, sysv_file))
 | 
			
		||||
 | 
			
		||||
    started = service_start(service_name)
 | 
			
		||||
    return started
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def service(action, service_name):
 | 
			
		||||
    """Control a system service"""
 | 
			
		||||
    cmd = ['service', service_name, action]
 | 
			
		||||
    return subprocess.call(cmd) == 0
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def service_running(service):
 | 
			
		||||
    """Determine whether a system service is running"""
 | 
			
		||||
    try:
 | 
			
		||||
        output = subprocess.check_output(
 | 
			
		||||
            ['service', service, 'status'],
 | 
			
		||||
            stderr=subprocess.STDOUT).decode('UTF-8')
 | 
			
		||||
    except subprocess.CalledProcessError:
 | 
			
		||||
        return False
 | 
			
		||||
    else:
 | 
			
		||||
        if ("start/running" in output or "is running" in output):
 | 
			
		||||
            return True
 | 
			
		||||
        else:
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def service_available(service_name):
 | 
			
		||||
    """Determine whether a system service is available"""
 | 
			
		||||
    try:
 | 
			
		||||
        subprocess.check_output(
 | 
			
		||||
            ['service', service_name, 'status'],
 | 
			
		||||
            stderr=subprocess.STDOUT).decode('UTF-8')
 | 
			
		||||
    except subprocess.CalledProcessError as e:
 | 
			
		||||
        return b'unrecognized service' not in e.output
 | 
			
		||||
    else:
 | 
			
		||||
        return True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
 | 
			
		||||
    """Add a user to the system"""
 | 
			
		||||
    try:
 | 
			
		||||
        user_info = pwd.getpwnam(username)
 | 
			
		||||
        log('user {0} already exists!'.format(username))
 | 
			
		||||
    except KeyError:
 | 
			
		||||
        log('creating user {0}'.format(username))
 | 
			
		||||
        cmd = ['useradd']
 | 
			
		||||
        if system_user or password is None:
 | 
			
		||||
            cmd.append('--system')
 | 
			
		||||
        else:
 | 
			
		||||
            cmd.extend([
 | 
			
		||||
                '--create-home',
 | 
			
		||||
                '--shell', shell,
 | 
			
		||||
                '--password', password,
 | 
			
		||||
            ])
 | 
			
		||||
        cmd.append(username)
 | 
			
		||||
        subprocess.check_call(cmd)
 | 
			
		||||
        user_info = pwd.getpwnam(username)
 | 
			
		||||
    return user_info
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def user_exists(username):
 | 
			
		||||
    """Check if a user exists"""
 | 
			
		||||
    try:
 | 
			
		||||
        pwd.getpwnam(username)
 | 
			
		||||
        user_exists = True
 | 
			
		||||
    except KeyError:
 | 
			
		||||
        user_exists = False
 | 
			
		||||
    return user_exists
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def add_group(group_name, system_group=False):
 | 
			
		||||
    """Add a group to the system"""
 | 
			
		||||
    try:
 | 
			
		||||
        group_info = grp.getgrnam(group_name)
 | 
			
		||||
        log('group {0} already exists!'.format(group_name))
 | 
			
		||||
    except KeyError:
 | 
			
		||||
        log('creating group {0}'.format(group_name))
 | 
			
		||||
        cmd = ['addgroup']
 | 
			
		||||
        if system_group:
 | 
			
		||||
            cmd.append('--system')
 | 
			
		||||
        else:
 | 
			
		||||
            cmd.extend([
 | 
			
		||||
                '--group',
 | 
			
		||||
            ])
 | 
			
		||||
        cmd.append(group_name)
 | 
			
		||||
        subprocess.check_call(cmd)
 | 
			
		||||
        group_info = grp.getgrnam(group_name)
 | 
			
		||||
    return group_info
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def add_user_to_group(username, group):
 | 
			
		||||
    """Add a user to a group"""
 | 
			
		||||
    cmd = ['gpasswd', '-a', username, group]
 | 
			
		||||
    log("Adding user {} to group {}".format(username, group))
 | 
			
		||||
    subprocess.check_call(cmd)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def rsync(from_path, to_path, flags='-r', options=None):
 | 
			
		||||
    """Replicate the contents of a path"""
 | 
			
		||||
    options = options or ['--delete', '--executability']
 | 
			
		||||
    cmd = ['/usr/bin/rsync', flags]
 | 
			
		||||
    cmd.extend(options)
 | 
			
		||||
    cmd.append(from_path)
 | 
			
		||||
    cmd.append(to_path)
 | 
			
		||||
    log(" ".join(cmd))
 | 
			
		||||
    return subprocess.check_output(cmd).decode('UTF-8').strip()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def symlink(source, destination):
 | 
			
		||||
    """Create a symbolic link"""
 | 
			
		||||
    log("Symlinking {} as {}".format(source, destination))
 | 
			
		||||
    cmd = [
 | 
			
		||||
        'ln',
 | 
			
		||||
        '-sf',
 | 
			
		||||
        source,
 | 
			
		||||
        destination,
 | 
			
		||||
    ]
 | 
			
		||||
    subprocess.check_call(cmd)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def mkdir(path, owner='root', group='root', perms=0o555, force=False):
 | 
			
		||||
    """Create a directory"""
 | 
			
		||||
    log("Making dir {} {}:{} {:o}".format(path, owner, group,
 | 
			
		||||
                                          perms))
 | 
			
		||||
    uid = pwd.getpwnam(owner).pw_uid
 | 
			
		||||
    gid = grp.getgrnam(group).gr_gid
 | 
			
		||||
    realpath = os.path.abspath(path)
 | 
			
		||||
    path_exists = os.path.exists(realpath)
 | 
			
		||||
    if path_exists and force:
 | 
			
		||||
        if not os.path.isdir(realpath):
 | 
			
		||||
            log("Removing non-directory file {} prior to mkdir()".format(path))
 | 
			
		||||
            os.unlink(realpath)
 | 
			
		||||
            os.makedirs(realpath, perms)
 | 
			
		||||
    elif not path_exists:
 | 
			
		||||
        os.makedirs(realpath, perms)
 | 
			
		||||
    os.chown(realpath, uid, gid)
 | 
			
		||||
    os.chmod(realpath, perms)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def write_file(path, content, owner='root', group='root', perms=0o444):
 | 
			
		||||
    """Create or overwrite a file with the contents of a byte string."""
 | 
			
		||||
    log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
 | 
			
		||||
    uid = pwd.getpwnam(owner).pw_uid
 | 
			
		||||
    gid = grp.getgrnam(group).gr_gid
 | 
			
		||||
    with open(path, 'wb') as target:
 | 
			
		||||
        os.fchown(target.fileno(), uid, gid)
 | 
			
		||||
        os.fchmod(target.fileno(), perms)
 | 
			
		||||
        target.write(content)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def fstab_remove(mp):
 | 
			
		||||
    """Remove the given mountpoint entry from /etc/fstab
 | 
			
		||||
    """
 | 
			
		||||
    return Fstab.remove_by_mountpoint(mp)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def fstab_add(dev, mp, fs, options=None):
 | 
			
		||||
    """Adds the given device entry to the /etc/fstab file
 | 
			
		||||
    """
 | 
			
		||||
    return Fstab.add(dev, mp, fs, options=options)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
 | 
			
		||||
    """Mount a filesystem at a particular mountpoint"""
 | 
			
		||||
    cmd_args = ['mount']
 | 
			
		||||
    if options is not None:
 | 
			
		||||
        cmd_args.extend(['-o', options])
 | 
			
		||||
    cmd_args.extend([device, mountpoint])
 | 
			
		||||
    try:
 | 
			
		||||
        subprocess.check_output(cmd_args)
 | 
			
		||||
    except subprocess.CalledProcessError as e:
 | 
			
		||||
        log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
    if persist:
 | 
			
		||||
        return fstab_add(device, mountpoint, filesystem, options=options)
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def umount(mountpoint, persist=False):
 | 
			
		||||
    """Unmount a filesystem"""
 | 
			
		||||
    cmd_args = ['umount', mountpoint]
 | 
			
		||||
    try:
 | 
			
		||||
        subprocess.check_output(cmd_args)
 | 
			
		||||
    except subprocess.CalledProcessError as e:
 | 
			
		||||
        log('Error unmounting {}\n{}'.format(mountpoint, e.output))
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
    if persist:
 | 
			
		||||
        return fstab_remove(mountpoint)
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def mounts():
 | 
			
		||||
    """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
 | 
			
		||||
    with open('/proc/mounts') as f:
 | 
			
		||||
        # [['/mount/point','/dev/path'],[...]]
 | 
			
		||||
        system_mounts = [m[1::-1] for m in [l.strip().split()
 | 
			
		||||
                                            for l in f.readlines()]]
 | 
			
		||||
    return system_mounts
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def fstab_mount(mountpoint):
 | 
			
		||||
    """Mount filesystem using fstab"""
 | 
			
		||||
    cmd_args = ['mount', mountpoint]
 | 
			
		||||
    try:
 | 
			
		||||
        subprocess.check_output(cmd_args)
 | 
			
		||||
    except subprocess.CalledProcessError as e:
 | 
			
		||||
        log('Error unmounting {}\n{}'.format(mountpoint, e.output))
 | 
			
		||||
        return False
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def file_hash(path, hash_type='md5'):
 | 
			
		||||
    """
 | 
			
		||||
    Generate a hash checksum of the contents of 'path' or None if not found.
 | 
			
		||||
 | 
			
		||||
    :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
 | 
			
		||||
                          such as md5, sha1, sha256, sha512, etc.
 | 
			
		||||
    """
 | 
			
		||||
    if os.path.exists(path):
 | 
			
		||||
        h = getattr(hashlib, hash_type)()
 | 
			
		||||
        with open(path, 'rb') as source:
 | 
			
		||||
            h.update(source.read())
 | 
			
		||||
        return h.hexdigest()
 | 
			
		||||
    else:
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def path_hash(path):
 | 
			
		||||
    """
 | 
			
		||||
    Generate a hash checksum of all files matching 'path'. Standard wildcards
 | 
			
		||||
    like '*' and '?' are supported, see documentation for the 'glob' module for
 | 
			
		||||
    more information.
 | 
			
		||||
 | 
			
		||||
    :return: dict: A { filename: hash } dictionary for all matched files.
 | 
			
		||||
                   Empty if none found.
 | 
			
		||||
    """
 | 
			
		||||
    return {
 | 
			
		||||
        filename: file_hash(filename)
 | 
			
		||||
        for filename in glob.iglob(path)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def check_hash(path, checksum, hash_type='md5'):
 | 
			
		||||
    """
 | 
			
		||||
    Validate a file using a cryptographic checksum.
 | 
			
		||||
 | 
			
		||||
    :param str checksum: Value of the checksum used to validate the file.
 | 
			
		||||
    :param str hash_type: Hash algorithm used to generate `checksum`.
 | 
			
		||||
        Can be any hash alrgorithm supported by :mod:`hashlib`,
 | 
			
		||||
        such as md5, sha1, sha256, sha512, etc.
 | 
			
		||||
    :raises ChecksumError: If the file fails the checksum
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    actual_checksum = file_hash(path, hash_type)
 | 
			
		||||
    if checksum != actual_checksum:
 | 
			
		||||
        raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ChecksumError(ValueError):
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def restart_on_change(restart_map, stopstart=False):
 | 
			
		||||
    """Restart services based on configuration files changing
 | 
			
		||||
 | 
			
		||||
    This function is used a decorator, for example::
 | 
			
		||||
 | 
			
		||||
        @restart_on_change({
 | 
			
		||||
            '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
 | 
			
		||||
            '/etc/apache/sites-enabled/*': [ 'apache2' ]
 | 
			
		||||
            })
 | 
			
		||||
        def config_changed():
 | 
			
		||||
            pass  # your code here
 | 
			
		||||
 | 
			
		||||
    In this example, the cinder-api and cinder-volume services
 | 
			
		||||
    would be restarted if /etc/ceph/ceph.conf is changed by the
 | 
			
		||||
    ceph_client_changed function. The apache2 service would be
 | 
			
		||||
    restarted if any file matching the pattern got changed, created
 | 
			
		||||
    or removed. Standard wildcards are supported, see documentation
 | 
			
		||||
    for the 'glob' module for more information.
 | 
			
		||||
    """
 | 
			
		||||
    def wrap(f):
 | 
			
		||||
        def wrapped_f(*args, **kwargs):
 | 
			
		||||
            checksums = {path: path_hash(path) for path in restart_map}
 | 
			
		||||
            f(*args, **kwargs)
 | 
			
		||||
            restarts = []
 | 
			
		||||
            for path in restart_map:
 | 
			
		||||
                if path_hash(path) != checksums[path]:
 | 
			
		||||
                    restarts += restart_map[path]
 | 
			
		||||
            services_list = list(OrderedDict.fromkeys(restarts))
 | 
			
		||||
            if not stopstart:
 | 
			
		||||
                for service_name in services_list:
 | 
			
		||||
                    service('restart', service_name)
 | 
			
		||||
            else:
 | 
			
		||||
                for action in ['stop', 'start']:
 | 
			
		||||
                    for service_name in services_list:
 | 
			
		||||
                        service(action, service_name)
 | 
			
		||||
        return wrapped_f
 | 
			
		||||
    return wrap
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def lsb_release():
 | 
			
		||||
    """Return /etc/lsb-release in a dict"""
 | 
			
		||||
    d = {}
 | 
			
		||||
    with open('/etc/lsb-release', 'r') as lsb:
 | 
			
		||||
        for l in lsb:
 | 
			
		||||
            k, v = l.split('=')
 | 
			
		||||
            d[k.strip()] = v.strip()
 | 
			
		||||
    return d
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def pwgen(length=None):
 | 
			
		||||
    """Generate a random pasword."""
 | 
			
		||||
    if length is None:
 | 
			
		||||
        # A random length is ok to use a weak PRNG
 | 
			
		||||
        length = random.choice(range(35, 45))
 | 
			
		||||
    alphanumeric_chars = [
 | 
			
		||||
        l for l in (string.ascii_letters + string.digits)
 | 
			
		||||
        if l not in 'l0QD1vAEIOUaeiou']
 | 
			
		||||
    # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
 | 
			
		||||
    # actual password
 | 
			
		||||
    random_generator = random.SystemRandom()
 | 
			
		||||
    random_chars = [
 | 
			
		||||
        random_generator.choice(alphanumeric_chars) for _ in range(length)]
 | 
			
		||||
    return(''.join(random_chars))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_phy_iface(interface):
 | 
			
		||||
    """Returns True if interface is not virtual, otherwise False."""
 | 
			
		||||
    if interface:
 | 
			
		||||
        sys_net = '/sys/class/net'
 | 
			
		||||
        if os.path.isdir(sys_net):
 | 
			
		||||
            for iface in glob.glob(os.path.join(sys_net, '*')):
 | 
			
		||||
                if '/virtual/' in os.path.realpath(iface):
 | 
			
		||||
                    continue
 | 
			
		||||
 | 
			
		||||
                if interface == os.path.basename(iface):
 | 
			
		||||
                    return True
 | 
			
		||||
 | 
			
		||||
    return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_bond_master(interface):
 | 
			
		||||
    """Returns bond master if interface is bond slave otherwise None.
 | 
			
		||||
 | 
			
		||||
    NOTE: the provided interface is expected to be physical
 | 
			
		||||
    """
 | 
			
		||||
    if interface:
 | 
			
		||||
        iface_path = '/sys/class/net/%s' % (interface)
 | 
			
		||||
        if os.path.exists(iface_path):
 | 
			
		||||
            if '/virtual/' in os.path.realpath(iface_path):
 | 
			
		||||
                return None
 | 
			
		||||
 | 
			
		||||
            master = os.path.join(iface_path, 'master')
 | 
			
		||||
            if os.path.exists(master):
 | 
			
		||||
                master = os.path.realpath(master)
 | 
			
		||||
                # make sure it is a bond master
 | 
			
		||||
                if os.path.exists(os.path.join(master, 'bonding')):
 | 
			
		||||
                    return os.path.basename(master)
 | 
			
		||||
 | 
			
		||||
    return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def list_nics(nic_type=None):
 | 
			
		||||
    '''Return a list of nics of given type(s)'''
 | 
			
		||||
    if isinstance(nic_type, six.string_types):
 | 
			
		||||
        int_types = [nic_type]
 | 
			
		||||
    else:
 | 
			
		||||
        int_types = nic_type
 | 
			
		||||
 | 
			
		||||
    interfaces = []
 | 
			
		||||
    if nic_type:
 | 
			
		||||
        for int_type in int_types:
 | 
			
		||||
            cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
 | 
			
		||||
            ip_output = subprocess.check_output(cmd).decode('UTF-8')
 | 
			
		||||
            ip_output = ip_output.split('\n')
 | 
			
		||||
            ip_output = (line for line in ip_output if line)
 | 
			
		||||
            for line in ip_output:
 | 
			
		||||
                if line.split()[1].startswith(int_type):
 | 
			
		||||
                    matched = re.search('.*: (' + int_type +
 | 
			
		||||
                                        r'[0-9]+\.[0-9]+)@.*', line)
 | 
			
		||||
                    if matched:
 | 
			
		||||
                        iface = matched.groups()[0]
 | 
			
		||||
                    else:
 | 
			
		||||
                        iface = line.split()[1].replace(":", "")
 | 
			
		||||
 | 
			
		||||
                    if iface not in interfaces:
 | 
			
		||||
                        interfaces.append(iface)
 | 
			
		||||
    else:
 | 
			
		||||
        cmd = ['ip', 'a']
 | 
			
		||||
        ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
 | 
			
		||||
        ip_output = (line.strip() for line in ip_output if line)
 | 
			
		||||
 | 
			
		||||
        key = re.compile('^[0-9]+:\s+(.+):')
 | 
			
		||||
        for line in ip_output:
 | 
			
		||||
            matched = re.search(key, line)
 | 
			
		||||
            if matched:
 | 
			
		||||
                iface = matched.group(1)
 | 
			
		||||
                iface = iface.partition("@")[0]
 | 
			
		||||
                if iface not in interfaces:
 | 
			
		||||
                    interfaces.append(iface)
 | 
			
		||||
 | 
			
		||||
    return interfaces
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def set_nic_mtu(nic, mtu):
 | 
			
		||||
    '''Set MTU on a network interface'''
 | 
			
		||||
    cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
 | 
			
		||||
    subprocess.check_call(cmd)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_nic_mtu(nic):
 | 
			
		||||
    cmd = ['ip', 'addr', 'show', nic]
 | 
			
		||||
    ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
 | 
			
		||||
    mtu = ""
 | 
			
		||||
    for line in ip_output:
 | 
			
		||||
        words = line.split()
 | 
			
		||||
        if 'mtu' in words:
 | 
			
		||||
            mtu = words[words.index("mtu") + 1]
 | 
			
		||||
    return mtu
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_nic_hwaddr(nic):
 | 
			
		||||
    cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
 | 
			
		||||
    ip_output = subprocess.check_output(cmd).decode('UTF-8')
 | 
			
		||||
    hwaddr = ""
 | 
			
		||||
    words = ip_output.split()
 | 
			
		||||
    if 'link/ether' in words:
 | 
			
		||||
        hwaddr = words[words.index('link/ether') + 1]
 | 
			
		||||
    return hwaddr
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def cmp_pkgrevno(package, revno, pkgcache=None):
 | 
			
		||||
    '''Compare supplied revno with the revno of the installed package
 | 
			
		||||
 | 
			
		||||
    *  1 => Installed revno is greater than supplied arg
 | 
			
		||||
    *  0 => Installed revno is the same as supplied arg
 | 
			
		||||
    * -1 => Installed revno is less than supplied arg
 | 
			
		||||
 | 
			
		||||
    This function imports apt_cache function from charmhelpers.fetch if
 | 
			
		||||
    the pkgcache argument is None. Be sure to add charmhelpers.fetch if
 | 
			
		||||
    you call this function, or pass an apt_pkg.Cache() instance.
 | 
			
		||||
    '''
 | 
			
		||||
    import apt_pkg
 | 
			
		||||
    if not pkgcache:
 | 
			
		||||
        from charmhelpers.fetch import apt_cache
 | 
			
		||||
        pkgcache = apt_cache()
 | 
			
		||||
    pkg = pkgcache[package]
 | 
			
		||||
    return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@contextmanager
 | 
			
		||||
def chdir(d):
 | 
			
		||||
    cur = os.getcwd()
 | 
			
		||||
    try:
 | 
			
		||||
        yield os.chdir(d)
 | 
			
		||||
    finally:
 | 
			
		||||
        os.chdir(cur)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def chownr(path, owner, group, follow_links=True):
 | 
			
		||||
    uid = pwd.getpwnam(owner).pw_uid
 | 
			
		||||
    gid = grp.getgrnam(group).gr_gid
 | 
			
		||||
    if follow_links:
 | 
			
		||||
        chown = os.chown
 | 
			
		||||
    else:
 | 
			
		||||
        chown = os.lchown
 | 
			
		||||
 | 
			
		||||
    for root, dirs, files in os.walk(path):
 | 
			
		||||
        for name in dirs + files:
 | 
			
		||||
            full = os.path.join(root, name)
 | 
			
		||||
            broken_symlink = os.path.lexists(full) and not os.path.exists(full)
 | 
			
		||||
            if not broken_symlink:
 | 
			
		||||
                chown(full, uid, gid)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def lchownr(path, owner, group):
 | 
			
		||||
    chownr(path, owner, group, follow_links=False)
 | 
			
		||||
							
								
								
									
										69
									
								
								hooks/charmhelpers/core/hugepage.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										69
									
								
								hooks/charmhelpers/core/hugepage.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,69 @@
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import yaml
 | 
			
		||||
from charmhelpers.core import fstab
 | 
			
		||||
from charmhelpers.core import sysctl
 | 
			
		||||
from charmhelpers.core.host import (
 | 
			
		||||
    add_group,
 | 
			
		||||
    add_user_to_group,
 | 
			
		||||
    fstab_mount,
 | 
			
		||||
    mkdir,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.core.strutils import bytes_from_string
 | 
			
		||||
from subprocess import check_output
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def hugepage_support(user, group='hugetlb', nr_hugepages=256,
 | 
			
		||||
                     max_map_count=65536, mnt_point='/run/hugepages/kvm',
 | 
			
		||||
                     pagesize='2MB', mount=True, set_shmmax=False):
 | 
			
		||||
    """Enable hugepages on system.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    user (str)  -- Username to allow access to hugepages to
 | 
			
		||||
    group (str) -- Group name to own hugepages
 | 
			
		||||
    nr_hugepages (int) -- Number of pages to reserve
 | 
			
		||||
    max_map_count (int) -- Number of Virtual Memory Areas a process can own
 | 
			
		||||
    mnt_point (str) -- Directory to mount hugepages on
 | 
			
		||||
    pagesize (str) -- Size of hugepages
 | 
			
		||||
    mount (bool) -- Whether to Mount hugepages
 | 
			
		||||
    """
 | 
			
		||||
    group_info = add_group(group)
 | 
			
		||||
    gid = group_info.gr_gid
 | 
			
		||||
    add_user_to_group(user, group)
 | 
			
		||||
    sysctl_settings = {
 | 
			
		||||
        'vm.nr_hugepages': nr_hugepages,
 | 
			
		||||
        'vm.max_map_count': max_map_count,
 | 
			
		||||
        'vm.hugetlb_shm_group': gid,
 | 
			
		||||
    }
 | 
			
		||||
    if set_shmmax:
 | 
			
		||||
        shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
 | 
			
		||||
        shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
 | 
			
		||||
        if shmmax_minsize > shmmax_current:
 | 
			
		||||
            sysctl_settings['kernel.shmmax'] = shmmax_minsize
 | 
			
		||||
    sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
 | 
			
		||||
    mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
 | 
			
		||||
    lfstab = fstab.Fstab()
 | 
			
		||||
    fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
 | 
			
		||||
    if fstab_entry:
 | 
			
		||||
        lfstab.remove_entry(fstab_entry)
 | 
			
		||||
    entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
 | 
			
		||||
                         'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
 | 
			
		||||
    lfstab.add_entry(entry)
 | 
			
		||||
    if mount:
 | 
			
		||||
        fstab_mount(mnt_point)
 | 
			
		||||
							
								
								
									
										68
									
								
								hooks/charmhelpers/core/kernel.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										68
									
								
								hooks/charmhelpers/core/kernel.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,68 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    log,
 | 
			
		||||
    INFO
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from subprocess import check_call, check_output
 | 
			
		||||
import re
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def modprobe(module, persist=True):
 | 
			
		||||
    """Load a kernel module and configure for auto-load on reboot."""
 | 
			
		||||
    cmd = ['modprobe', module]
 | 
			
		||||
 | 
			
		||||
    log('Loading kernel module %s' % module, level=INFO)
 | 
			
		||||
 | 
			
		||||
    check_call(cmd)
 | 
			
		||||
    if persist:
 | 
			
		||||
        with open('/etc/modules', 'r+') as modules:
 | 
			
		||||
            if module not in modules.read():
 | 
			
		||||
                modules.write(module)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def rmmod(module, force=False):
 | 
			
		||||
    """Remove a module from the linux kernel"""
 | 
			
		||||
    cmd = ['rmmod']
 | 
			
		||||
    if force:
 | 
			
		||||
        cmd.append('-f')
 | 
			
		||||
    cmd.append(module)
 | 
			
		||||
    log('Removing kernel module %s' % module, level=INFO)
 | 
			
		||||
    return check_call(cmd)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def lsmod():
 | 
			
		||||
    """Shows what kernel modules are currently loaded"""
 | 
			
		||||
    return check_output(['lsmod'],
 | 
			
		||||
                        universal_newlines=True)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_module_loaded(module):
 | 
			
		||||
    """Checks if a kernel module is already loaded"""
 | 
			
		||||
    matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
 | 
			
		||||
    return len(matches) > 0
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def update_initramfs(version='all'):
 | 
			
		||||
    """Updates an initramfs image"""
 | 
			
		||||
    return check_call(["update-initramfs", "-k", version, "-u"])
 | 
			
		||||
							
								
								
									
										18
									
								
								hooks/charmhelpers/core/services/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								hooks/charmhelpers/core/services/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,18 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
from .base import *  # NOQA
 | 
			
		||||
from .helpers import *  # NOQA
 | 
			
		||||
							
								
								
									
										353
									
								
								hooks/charmhelpers/core/services/base.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										353
									
								
								hooks/charmhelpers/core/services/base.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,353 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import json
 | 
			
		||||
from inspect import getargspec
 | 
			
		||||
from collections import Iterable, OrderedDict
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core import host
 | 
			
		||||
from charmhelpers.core import hookenv
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
__all__ = ['ServiceManager', 'ManagerCallback',
 | 
			
		||||
           'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
 | 
			
		||||
           'service_restart', 'service_stop']
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ServiceManager(object):
 | 
			
		||||
    def __init__(self, services=None):
 | 
			
		||||
        """
 | 
			
		||||
        Register a list of services, given their definitions.
 | 
			
		||||
 | 
			
		||||
        Service definitions are dicts in the following formats (all keys except
 | 
			
		||||
        'service' are optional)::
 | 
			
		||||
 | 
			
		||||
            {
 | 
			
		||||
                "service": <service name>,
 | 
			
		||||
                "required_data": <list of required data contexts>,
 | 
			
		||||
                "provided_data": <list of provided data contexts>,
 | 
			
		||||
                "data_ready": <one or more callbacks>,
 | 
			
		||||
                "data_lost": <one or more callbacks>,
 | 
			
		||||
                "start": <one or more callbacks>,
 | 
			
		||||
                "stop": <one or more callbacks>,
 | 
			
		||||
                "ports": <list of ports to manage>,
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
        The 'required_data' list should contain dicts of required data (or
 | 
			
		||||
        dependency managers that act like dicts and know how to collect the data).
 | 
			
		||||
        Only when all items in the 'required_data' list are populated are the list
 | 
			
		||||
        of 'data_ready' and 'start' callbacks executed.  See `is_ready()` for more
 | 
			
		||||
        information.
 | 
			
		||||
 | 
			
		||||
        The 'provided_data' list should contain relation data providers, most likely
 | 
			
		||||
        a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
 | 
			
		||||
        that will indicate a set of data to set on a given relation.
 | 
			
		||||
 | 
			
		||||
        The 'data_ready' value should be either a single callback, or a list of
 | 
			
		||||
        callbacks, to be called when all items in 'required_data' pass `is_ready()`.
 | 
			
		||||
        Each callback will be called with the service name as the only parameter.
 | 
			
		||||
        After all of the 'data_ready' callbacks are called, the 'start' callbacks
 | 
			
		||||
        are fired.
 | 
			
		||||
 | 
			
		||||
        The 'data_lost' value should be either a single callback, or a list of
 | 
			
		||||
        callbacks, to be called when a 'required_data' item no longer passes
 | 
			
		||||
        `is_ready()`.  Each callback will be called with the service name as the
 | 
			
		||||
        only parameter.  After all of the 'data_lost' callbacks are called,
 | 
			
		||||
        the 'stop' callbacks are fired.
 | 
			
		||||
 | 
			
		||||
        The 'start' value should be either a single callback, or a list of
 | 
			
		||||
        callbacks, to be called when starting the service, after the 'data_ready'
 | 
			
		||||
        callbacks are complete.  Each callback will be called with the service
 | 
			
		||||
        name as the only parameter.  This defaults to
 | 
			
		||||
        `[host.service_start, services.open_ports]`.
 | 
			
		||||
 | 
			
		||||
        The 'stop' value should be either a single callback, or a list of
 | 
			
		||||
        callbacks, to be called when stopping the service.  If the service is
 | 
			
		||||
        being stopped because it no longer has all of its 'required_data', this
 | 
			
		||||
        will be called after all of the 'data_lost' callbacks are complete.
 | 
			
		||||
        Each callback will be called with the service name as the only parameter.
 | 
			
		||||
        This defaults to `[services.close_ports, host.service_stop]`.
 | 
			
		||||
 | 
			
		||||
        The 'ports' value should be a list of ports to manage.  The default
 | 
			
		||||
        'start' handler will open the ports after the service is started,
 | 
			
		||||
        and the default 'stop' handler will close the ports prior to stopping
 | 
			
		||||
        the service.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        Examples:
 | 
			
		||||
 | 
			
		||||
        The following registers an Upstart service called bingod that depends on
 | 
			
		||||
        a mongodb relation and which runs a custom `db_migrate` function prior to
 | 
			
		||||
        restarting the service, and a Runit service called spadesd::
 | 
			
		||||
 | 
			
		||||
            manager = services.ServiceManager([
 | 
			
		||||
                {
 | 
			
		||||
                    'service': 'bingod',
 | 
			
		||||
                    'ports': [80, 443],
 | 
			
		||||
                    'required_data': [MongoRelation(), config(), {'my': 'data'}],
 | 
			
		||||
                    'data_ready': [
 | 
			
		||||
                        services.template(source='bingod.conf'),
 | 
			
		||||
                        services.template(source='bingod.ini',
 | 
			
		||||
                                          target='/etc/bingod.ini',
 | 
			
		||||
                                          owner='bingo', perms=0400),
 | 
			
		||||
                    ],
 | 
			
		||||
                },
 | 
			
		||||
                {
 | 
			
		||||
                    'service': 'spadesd',
 | 
			
		||||
                    'data_ready': services.template(source='spadesd_run.j2',
 | 
			
		||||
                                                    target='/etc/sv/spadesd/run',
 | 
			
		||||
                                                    perms=0555),
 | 
			
		||||
                    'start': runit_start,
 | 
			
		||||
                    'stop': runit_stop,
 | 
			
		||||
                },
 | 
			
		||||
            ])
 | 
			
		||||
            manager.manage()
 | 
			
		||||
        """
 | 
			
		||||
        self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
 | 
			
		||||
        self._ready = None
 | 
			
		||||
        self.services = OrderedDict()
 | 
			
		||||
        for service in services or []:
 | 
			
		||||
            service_name = service['service']
 | 
			
		||||
            self.services[service_name] = service
 | 
			
		||||
 | 
			
		||||
    def manage(self):
 | 
			
		||||
        """
 | 
			
		||||
        Handle the current hook by doing The Right Thing with the registered services.
 | 
			
		||||
        """
 | 
			
		||||
        hookenv._run_atstart()
 | 
			
		||||
        try:
 | 
			
		||||
            hook_name = hookenv.hook_name()
 | 
			
		||||
            if hook_name == 'stop':
 | 
			
		||||
                self.stop_services()
 | 
			
		||||
            else:
 | 
			
		||||
                self.reconfigure_services()
 | 
			
		||||
                self.provide_data()
 | 
			
		||||
        except SystemExit as x:
 | 
			
		||||
            if x.code is None or x.code == 0:
 | 
			
		||||
                hookenv._run_atexit()
 | 
			
		||||
        hookenv._run_atexit()
 | 
			
		||||
 | 
			
		||||
    def provide_data(self):
 | 
			
		||||
        """
 | 
			
		||||
        Set the relation data for each provider in the ``provided_data`` list.
 | 
			
		||||
 | 
			
		||||
        A provider must have a `name` attribute, which indicates which relation
 | 
			
		||||
        to set data on, and a `provide_data()` method, which returns a dict of
 | 
			
		||||
        data to set.
 | 
			
		||||
 | 
			
		||||
        The `provide_data()` method can optionally accept two parameters:
 | 
			
		||||
 | 
			
		||||
          * ``remote_service`` The name of the remote service that the data will
 | 
			
		||||
            be provided to.  The `provide_data()` method will be called once
 | 
			
		||||
            for each connected service (not unit).  This allows the method to
 | 
			
		||||
            tailor its data to the given service.
 | 
			
		||||
          * ``service_ready`` Whether or not the service definition had all of
 | 
			
		||||
            its requirements met, and thus the ``data_ready`` callbacks run.
 | 
			
		||||
 | 
			
		||||
        Note that the ``provided_data`` methods are now called **after** the
 | 
			
		||||
        ``data_ready`` callbacks are run.  This gives the ``data_ready`` callbacks
 | 
			
		||||
        a chance to generate any data necessary for the providing to the remote
 | 
			
		||||
        services.
 | 
			
		||||
        """
 | 
			
		||||
        for service_name, service in self.services.items():
 | 
			
		||||
            service_ready = self.is_ready(service_name)
 | 
			
		||||
            for provider in service.get('provided_data', []):
 | 
			
		||||
                for relid in hookenv.relation_ids(provider.name):
 | 
			
		||||
                    units = hookenv.related_units(relid)
 | 
			
		||||
                    if not units:
 | 
			
		||||
                        continue
 | 
			
		||||
                    remote_service = units[0].split('/')[0]
 | 
			
		||||
                    argspec = getargspec(provider.provide_data)
 | 
			
		||||
                    if len(argspec.args) > 1:
 | 
			
		||||
                        data = provider.provide_data(remote_service, service_ready)
 | 
			
		||||
                    else:
 | 
			
		||||
                        data = provider.provide_data()
 | 
			
		||||
                    if data:
 | 
			
		||||
                        hookenv.relation_set(relid, data)
 | 
			
		||||
 | 
			
		||||
    def reconfigure_services(self, *service_names):
 | 
			
		||||
        """
 | 
			
		||||
        Update all files for one or more registered services, and,
 | 
			
		||||
        if ready, optionally restart them.
 | 
			
		||||
 | 
			
		||||
        If no service names are given, reconfigures all registered services.
 | 
			
		||||
        """
 | 
			
		||||
        for service_name in service_names or self.services.keys():
 | 
			
		||||
            if self.is_ready(service_name):
 | 
			
		||||
                self.fire_event('data_ready', service_name)
 | 
			
		||||
                self.fire_event('start', service_name, default=[
 | 
			
		||||
                    service_restart,
 | 
			
		||||
                    manage_ports])
 | 
			
		||||
                self.save_ready(service_name)
 | 
			
		||||
            else:
 | 
			
		||||
                if self.was_ready(service_name):
 | 
			
		||||
                    self.fire_event('data_lost', service_name)
 | 
			
		||||
                self.fire_event('stop', service_name, default=[
 | 
			
		||||
                    manage_ports,
 | 
			
		||||
                    service_stop])
 | 
			
		||||
                self.save_lost(service_name)
 | 
			
		||||
 | 
			
		||||
    def stop_services(self, *service_names):
 | 
			
		||||
        """
 | 
			
		||||
        Stop one or more registered services, by name.
 | 
			
		||||
 | 
			
		||||
        If no service names are given, stops all registered services.
 | 
			
		||||
        """
 | 
			
		||||
        for service_name in service_names or self.services.keys():
 | 
			
		||||
            self.fire_event('stop', service_name, default=[
 | 
			
		||||
                manage_ports,
 | 
			
		||||
                service_stop])
 | 
			
		||||
 | 
			
		||||
    def get_service(self, service_name):
 | 
			
		||||
        """
 | 
			
		||||
        Given the name of a registered service, return its service definition.
 | 
			
		||||
        """
 | 
			
		||||
        service = self.services.get(service_name)
 | 
			
		||||
        if not service:
 | 
			
		||||
            raise KeyError('Service not registered: %s' % service_name)
 | 
			
		||||
        return service
 | 
			
		||||
 | 
			
		||||
    def fire_event(self, event_name, service_name, default=None):
 | 
			
		||||
        """
 | 
			
		||||
        Fire a data_ready, data_lost, start, or stop event on a given service.
 | 
			
		||||
        """
 | 
			
		||||
        service = self.get_service(service_name)
 | 
			
		||||
        callbacks = service.get(event_name, default)
 | 
			
		||||
        if not callbacks:
 | 
			
		||||
            return
 | 
			
		||||
        if not isinstance(callbacks, Iterable):
 | 
			
		||||
            callbacks = [callbacks]
 | 
			
		||||
        for callback in callbacks:
 | 
			
		||||
            if isinstance(callback, ManagerCallback):
 | 
			
		||||
                callback(self, service_name, event_name)
 | 
			
		||||
            else:
 | 
			
		||||
                callback(service_name)
 | 
			
		||||
 | 
			
		||||
    def is_ready(self, service_name):
 | 
			
		||||
        """
 | 
			
		||||
        Determine if a registered service is ready, by checking its 'required_data'.
 | 
			
		||||
 | 
			
		||||
        A 'required_data' item can be any mapping type, and is considered ready
 | 
			
		||||
        if `bool(item)` evaluates as True.
 | 
			
		||||
        """
 | 
			
		||||
        service = self.get_service(service_name)
 | 
			
		||||
        reqs = service.get('required_data', [])
 | 
			
		||||
        return all(bool(req) for req in reqs)
 | 
			
		||||
 | 
			
		||||
    def _load_ready_file(self):
 | 
			
		||||
        if self._ready is not None:
 | 
			
		||||
            return
 | 
			
		||||
        if os.path.exists(self._ready_file):
 | 
			
		||||
            with open(self._ready_file) as fp:
 | 
			
		||||
                self._ready = set(json.load(fp))
 | 
			
		||||
        else:
 | 
			
		||||
            self._ready = set()
 | 
			
		||||
 | 
			
		||||
    def _save_ready_file(self):
 | 
			
		||||
        if self._ready is None:
 | 
			
		||||
            return
 | 
			
		||||
        with open(self._ready_file, 'w') as fp:
 | 
			
		||||
            json.dump(list(self._ready), fp)
 | 
			
		||||
 | 
			
		||||
    def save_ready(self, service_name):
 | 
			
		||||
        """
 | 
			
		||||
        Save an indicator that the given service is now data_ready.
 | 
			
		||||
        """
 | 
			
		||||
        self._load_ready_file()
 | 
			
		||||
        self._ready.add(service_name)
 | 
			
		||||
        self._save_ready_file()
 | 
			
		||||
 | 
			
		||||
    def save_lost(self, service_name):
 | 
			
		||||
        """
 | 
			
		||||
        Save an indicator that the given service is no longer data_ready.
 | 
			
		||||
        """
 | 
			
		||||
        self._load_ready_file()
 | 
			
		||||
        self._ready.discard(service_name)
 | 
			
		||||
        self._save_ready_file()
 | 
			
		||||
 | 
			
		||||
    def was_ready(self, service_name):
 | 
			
		||||
        """
 | 
			
		||||
        Determine if the given service was previously data_ready.
 | 
			
		||||
        """
 | 
			
		||||
        self._load_ready_file()
 | 
			
		||||
        return service_name in self._ready
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ManagerCallback(object):
 | 
			
		||||
    """
 | 
			
		||||
    Special case of a callback that takes the `ServiceManager` instance
 | 
			
		||||
    in addition to the service name.
 | 
			
		||||
 | 
			
		||||
    Subclasses should implement `__call__` which should accept three parameters:
 | 
			
		||||
 | 
			
		||||
        * `manager`       The `ServiceManager` instance
 | 
			
		||||
        * `service_name`  The name of the service it's being triggered for
 | 
			
		||||
        * `event_name`    The name of the event that this callback is handling
 | 
			
		||||
    """
 | 
			
		||||
    def __call__(self, manager, service_name, event_name):
 | 
			
		||||
        raise NotImplementedError()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class PortManagerCallback(ManagerCallback):
 | 
			
		||||
    """
 | 
			
		||||
    Callback class that will open or close ports, for use as either
 | 
			
		||||
    a start or stop action.
 | 
			
		||||
    """
 | 
			
		||||
    def __call__(self, manager, service_name, event_name):
 | 
			
		||||
        service = manager.get_service(service_name)
 | 
			
		||||
        new_ports = service.get('ports', [])
 | 
			
		||||
        port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
 | 
			
		||||
        if os.path.exists(port_file):
 | 
			
		||||
            with open(port_file) as fp:
 | 
			
		||||
                old_ports = fp.read().split(',')
 | 
			
		||||
            for old_port in old_ports:
 | 
			
		||||
                if bool(old_port):
 | 
			
		||||
                    old_port = int(old_port)
 | 
			
		||||
                    if old_port not in new_ports:
 | 
			
		||||
                        hookenv.close_port(old_port)
 | 
			
		||||
        with open(port_file, 'w') as fp:
 | 
			
		||||
            fp.write(','.join(str(port) for port in new_ports))
 | 
			
		||||
        for port in new_ports:
 | 
			
		||||
            if event_name == 'start':
 | 
			
		||||
                hookenv.open_port(port)
 | 
			
		||||
            elif event_name == 'stop':
 | 
			
		||||
                hookenv.close_port(port)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def service_stop(service_name):
 | 
			
		||||
    """
 | 
			
		||||
    Wrapper around host.service_stop to prevent spurious "unknown service"
 | 
			
		||||
    messages in the logs.
 | 
			
		||||
    """
 | 
			
		||||
    if host.service_running(service_name):
 | 
			
		||||
        host.service_stop(service_name)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def service_restart(service_name):
 | 
			
		||||
    """
 | 
			
		||||
    Wrapper around host.service_restart to prevent spurious "unknown service"
 | 
			
		||||
    messages in the logs.
 | 
			
		||||
    """
 | 
			
		||||
    if host.service_available(service_name):
 | 
			
		||||
        if host.service_running(service_name):
 | 
			
		||||
            host.service_restart(service_name)
 | 
			
		||||
        else:
 | 
			
		||||
            host.service_start(service_name)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Convenience aliases
 | 
			
		||||
open_ports = close_ports = manage_ports = PortManagerCallback()
 | 
			
		||||
							
								
								
									
										283
									
								
								hooks/charmhelpers/core/services/helpers.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										283
									
								
								hooks/charmhelpers/core/services/helpers.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,283 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import yaml
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core import hookenv
 | 
			
		||||
from charmhelpers.core import host
 | 
			
		||||
from charmhelpers.core import templating
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.services.base import ManagerCallback
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
__all__ = ['RelationContext', 'TemplateCallback',
 | 
			
		||||
           'render_template', 'template']
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class RelationContext(dict):
 | 
			
		||||
    """
 | 
			
		||||
    Base class for a context generator that gets relation data from juju.
 | 
			
		||||
 | 
			
		||||
    Subclasses must provide the attributes `name`, which is the name of the
 | 
			
		||||
    interface of interest, `interface`, which is the type of the interface of
 | 
			
		||||
    interest, and `required_keys`, which is the set of keys required for the
 | 
			
		||||
    relation to be considered complete.  The data for all interfaces matching
 | 
			
		||||
    the `name` attribute that are complete will used to populate the dictionary
 | 
			
		||||
    values (see `get_data`, below).
 | 
			
		||||
 | 
			
		||||
    The generated context will be namespaced under the relation :attr:`name`,
 | 
			
		||||
    to prevent potential naming conflicts.
 | 
			
		||||
 | 
			
		||||
    :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
 | 
			
		||||
    :param list additional_required_keys: Extend the list of :attr:`required_keys`
 | 
			
		||||
    """
 | 
			
		||||
    name = None
 | 
			
		||||
    interface = None
 | 
			
		||||
 | 
			
		||||
    def __init__(self, name=None, additional_required_keys=None):
 | 
			
		||||
        if not hasattr(self, 'required_keys'):
 | 
			
		||||
            self.required_keys = []
 | 
			
		||||
 | 
			
		||||
        if name is not None:
 | 
			
		||||
            self.name = name
 | 
			
		||||
        if additional_required_keys:
 | 
			
		||||
            self.required_keys.extend(additional_required_keys)
 | 
			
		||||
        self.get_data()
 | 
			
		||||
 | 
			
		||||
    def __bool__(self):
 | 
			
		||||
        """
 | 
			
		||||
        Returns True if all of the required_keys are available.
 | 
			
		||||
        """
 | 
			
		||||
        return self.is_ready()
 | 
			
		||||
 | 
			
		||||
    __nonzero__ = __bool__
 | 
			
		||||
 | 
			
		||||
    def __repr__(self):
 | 
			
		||||
        return super(RelationContext, self).__repr__()
 | 
			
		||||
 | 
			
		||||
    def is_ready(self):
 | 
			
		||||
        """
 | 
			
		||||
        Returns True if all of the `required_keys` are available from any units.
 | 
			
		||||
        """
 | 
			
		||||
        ready = len(self.get(self.name, [])) > 0
 | 
			
		||||
        if not ready:
 | 
			
		||||
            hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
 | 
			
		||||
        return ready
 | 
			
		||||
 | 
			
		||||
    def _is_ready(self, unit_data):
 | 
			
		||||
        """
 | 
			
		||||
        Helper method that tests a set of relation data and returns True if
 | 
			
		||||
        all of the `required_keys` are present.
 | 
			
		||||
        """
 | 
			
		||||
        return set(unit_data.keys()).issuperset(set(self.required_keys))
 | 
			
		||||
 | 
			
		||||
    def get_data(self):
 | 
			
		||||
        """
 | 
			
		||||
        Retrieve the relation data for each unit involved in a relation and,
 | 
			
		||||
        if complete, store it in a list under `self[self.name]`.  This
 | 
			
		||||
        is automatically called when the RelationContext is instantiated.
 | 
			
		||||
 | 
			
		||||
        The units are sorted lexographically first by the service ID, then by
 | 
			
		||||
        the unit ID.  Thus, if an interface has two other services, 'db:1'
 | 
			
		||||
        and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
 | 
			
		||||
        and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
 | 
			
		||||
        set of data, the relation data for the units will be stored in the
 | 
			
		||||
        order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
 | 
			
		||||
 | 
			
		||||
        If you only care about a single unit on the relation, you can just
 | 
			
		||||
        access it as `{{ interface[0]['key'] }}`.  However, if you can at all
 | 
			
		||||
        support multiple units on a relation, you should iterate over the list,
 | 
			
		||||
        like::
 | 
			
		||||
 | 
			
		||||
            {% for unit in interface -%}
 | 
			
		||||
                {{ unit['key'] }}{% if not loop.last %},{% endif %}
 | 
			
		||||
            {%- endfor %}
 | 
			
		||||
 | 
			
		||||
        Note that since all sets of relation data from all related services and
 | 
			
		||||
        units are in a single list, if you need to know which service or unit a
 | 
			
		||||
        set of data came from, you'll need to extend this class to preserve
 | 
			
		||||
        that information.
 | 
			
		||||
        """
 | 
			
		||||
        if not hookenv.relation_ids(self.name):
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        ns = self.setdefault(self.name, [])
 | 
			
		||||
        for rid in sorted(hookenv.relation_ids(self.name)):
 | 
			
		||||
            for unit in sorted(hookenv.related_units(rid)):
 | 
			
		||||
                reldata = hookenv.relation_get(rid=rid, unit=unit)
 | 
			
		||||
                if self._is_ready(reldata):
 | 
			
		||||
                    ns.append(reldata)
 | 
			
		||||
 | 
			
		||||
    def provide_data(self):
 | 
			
		||||
        """
 | 
			
		||||
        Return data to be relation_set for this interface.
 | 
			
		||||
        """
 | 
			
		||||
        return {}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class MysqlRelation(RelationContext):
 | 
			
		||||
    """
 | 
			
		||||
    Relation context for the `mysql` interface.
 | 
			
		||||
 | 
			
		||||
    :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
 | 
			
		||||
    :param list additional_required_keys: Extend the list of :attr:`required_keys`
 | 
			
		||||
    """
 | 
			
		||||
    name = 'db'
 | 
			
		||||
    interface = 'mysql'
 | 
			
		||||
 | 
			
		||||
    def __init__(self, *args, **kwargs):
 | 
			
		||||
        self.required_keys = ['host', 'user', 'password', 'database']
 | 
			
		||||
        RelationContext.__init__(self, *args, **kwargs)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class HttpRelation(RelationContext):
 | 
			
		||||
    """
 | 
			
		||||
    Relation context for the `http` interface.
 | 
			
		||||
 | 
			
		||||
    :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
 | 
			
		||||
    :param list additional_required_keys: Extend the list of :attr:`required_keys`
 | 
			
		||||
    """
 | 
			
		||||
    name = 'website'
 | 
			
		||||
    interface = 'http'
 | 
			
		||||
 | 
			
		||||
    def __init__(self, *args, **kwargs):
 | 
			
		||||
        self.required_keys = ['host', 'port']
 | 
			
		||||
        RelationContext.__init__(self, *args, **kwargs)
 | 
			
		||||
 | 
			
		||||
    def provide_data(self):
 | 
			
		||||
        return {
 | 
			
		||||
            'host': hookenv.unit_get('private-address'),
 | 
			
		||||
            'port': 80,
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class RequiredConfig(dict):
 | 
			
		||||
    """
 | 
			
		||||
    Data context that loads config options with one or more mandatory options.
 | 
			
		||||
 | 
			
		||||
    Once the required options have been changed from their default values, all
 | 
			
		||||
    config options will be available, namespaced under `config` to prevent
 | 
			
		||||
    potential naming conflicts (for example, between a config option and a
 | 
			
		||||
    relation property).
 | 
			
		||||
 | 
			
		||||
    :param list *args: List of options that must be changed from their default values.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, *args):
 | 
			
		||||
        self.required_options = args
 | 
			
		||||
        self['config'] = hookenv.config()
 | 
			
		||||
        with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
 | 
			
		||||
            self.config = yaml.load(fp).get('options', {})
 | 
			
		||||
 | 
			
		||||
    def __bool__(self):
 | 
			
		||||
        for option in self.required_options:
 | 
			
		||||
            if option not in self['config']:
 | 
			
		||||
                return False
 | 
			
		||||
            current_value = self['config'][option]
 | 
			
		||||
            default_value = self.config[option].get('default')
 | 
			
		||||
            if current_value == default_value:
 | 
			
		||||
                return False
 | 
			
		||||
            if current_value in (None, '') and default_value in (None, ''):
 | 
			
		||||
                return False
 | 
			
		||||
        return True
 | 
			
		||||
 | 
			
		||||
    def __nonzero__(self):
 | 
			
		||||
        return self.__bool__()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class StoredContext(dict):
 | 
			
		||||
    """
 | 
			
		||||
    A data context that always returns the data that it was first created with.
 | 
			
		||||
 | 
			
		||||
    This is useful to do a one-time generation of things like passwords, that
 | 
			
		||||
    will thereafter use the same value that was originally generated, instead
 | 
			
		||||
    of generating a new value each time it is run.
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, file_name, config_data):
 | 
			
		||||
        """
 | 
			
		||||
        If the file exists, populate `self` with the data from the file.
 | 
			
		||||
        Otherwise, populate with the given data and persist it to the file.
 | 
			
		||||
        """
 | 
			
		||||
        if os.path.exists(file_name):
 | 
			
		||||
            self.update(self.read_context(file_name))
 | 
			
		||||
        else:
 | 
			
		||||
            self.store_context(file_name, config_data)
 | 
			
		||||
            self.update(config_data)
 | 
			
		||||
 | 
			
		||||
    def store_context(self, file_name, config_data):
 | 
			
		||||
        if not os.path.isabs(file_name):
 | 
			
		||||
            file_name = os.path.join(hookenv.charm_dir(), file_name)
 | 
			
		||||
        with open(file_name, 'w') as file_stream:
 | 
			
		||||
            os.fchmod(file_stream.fileno(), 0o600)
 | 
			
		||||
            yaml.dump(config_data, file_stream)
 | 
			
		||||
 | 
			
		||||
    def read_context(self, file_name):
 | 
			
		||||
        if not os.path.isabs(file_name):
 | 
			
		||||
            file_name = os.path.join(hookenv.charm_dir(), file_name)
 | 
			
		||||
        with open(file_name, 'r') as file_stream:
 | 
			
		||||
            data = yaml.load(file_stream)
 | 
			
		||||
            if not data:
 | 
			
		||||
                raise OSError("%s is empty" % file_name)
 | 
			
		||||
            return data
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TemplateCallback(ManagerCallback):
 | 
			
		||||
    """
 | 
			
		||||
    Callback class that will render a Jinja2 template, for use as a ready
 | 
			
		||||
    action.
 | 
			
		||||
 | 
			
		||||
    :param str source: The template source file, relative to
 | 
			
		||||
        `$CHARM_DIR/templates`
 | 
			
		||||
 | 
			
		||||
    :param str target: The target to write the rendered template to
 | 
			
		||||
    :param str owner: The owner of the rendered file
 | 
			
		||||
    :param str group: The group of the rendered file
 | 
			
		||||
    :param int perms: The permissions of the rendered file
 | 
			
		||||
    :param partial on_change_action: functools partial to be executed when
 | 
			
		||||
                                     rendered file changes
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, source, target,
 | 
			
		||||
                 owner='root', group='root', perms=0o444,
 | 
			
		||||
                 on_change_action=None):
 | 
			
		||||
        self.source = source
 | 
			
		||||
        self.target = target
 | 
			
		||||
        self.owner = owner
 | 
			
		||||
        self.group = group
 | 
			
		||||
        self.perms = perms
 | 
			
		||||
        self.on_change_action = on_change_action
 | 
			
		||||
 | 
			
		||||
    def __call__(self, manager, service_name, event_name):
 | 
			
		||||
        pre_checksum = ''
 | 
			
		||||
        if self.on_change_action and os.path.isfile(self.target):
 | 
			
		||||
            pre_checksum = host.file_hash(self.target)
 | 
			
		||||
        service = manager.get_service(service_name)
 | 
			
		||||
        context = {}
 | 
			
		||||
        for ctx in service.get('required_data', []):
 | 
			
		||||
            context.update(ctx)
 | 
			
		||||
        templating.render(self.source, self.target, context,
 | 
			
		||||
                          self.owner, self.group, self.perms)
 | 
			
		||||
        if self.on_change_action:
 | 
			
		||||
            if pre_checksum == host.file_hash(self.target):
 | 
			
		||||
                hookenv.log(
 | 
			
		||||
                    'No change detected: {}'.format(self.target),
 | 
			
		||||
                    hookenv.DEBUG)
 | 
			
		||||
            else:
 | 
			
		||||
                self.on_change_action()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Convenience aliases for templates
 | 
			
		||||
render_template = template = TemplateCallback
 | 
			
		||||
							
								
								
									
										72
									
								
								hooks/charmhelpers/core/strutils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										72
									
								
								hooks/charmhelpers/core/strutils.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,72 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import six
 | 
			
		||||
import re
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def bool_from_string(value):
 | 
			
		||||
    """Interpret string value as boolean.
 | 
			
		||||
 | 
			
		||||
    Returns True if value translates to True otherwise False.
 | 
			
		||||
    """
 | 
			
		||||
    if isinstance(value, six.string_types):
 | 
			
		||||
        value = six.text_type(value)
 | 
			
		||||
    else:
 | 
			
		||||
        msg = "Unable to interpret non-string value '%s' as boolean" % (value)
 | 
			
		||||
        raise ValueError(msg)
 | 
			
		||||
 | 
			
		||||
    value = value.strip().lower()
 | 
			
		||||
 | 
			
		||||
    if value in ['y', 'yes', 'true', 't', 'on']:
 | 
			
		||||
        return True
 | 
			
		||||
    elif value in ['n', 'no', 'false', 'f', 'off']:
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
    msg = "Unable to interpret string value '%s' as boolean" % (value)
 | 
			
		||||
    raise ValueError(msg)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def bytes_from_string(value):
 | 
			
		||||
    """Interpret human readable string value as bytes.
 | 
			
		||||
 | 
			
		||||
    Returns int
 | 
			
		||||
    """
 | 
			
		||||
    BYTE_POWER = {
 | 
			
		||||
        'K': 1,
 | 
			
		||||
        'KB': 1,
 | 
			
		||||
        'M': 2,
 | 
			
		||||
        'MB': 2,
 | 
			
		||||
        'G': 3,
 | 
			
		||||
        'GB': 3,
 | 
			
		||||
        'T': 4,
 | 
			
		||||
        'TB': 4,
 | 
			
		||||
        'P': 5,
 | 
			
		||||
        'PB': 5,
 | 
			
		||||
    }
 | 
			
		||||
    if isinstance(value, six.string_types):
 | 
			
		||||
        value = six.text_type(value)
 | 
			
		||||
    else:
 | 
			
		||||
        msg = "Unable to interpret non-string value '%s' as boolean" % (value)
 | 
			
		||||
        raise ValueError(msg)
 | 
			
		||||
    matches = re.match("([0-9]+)([a-zA-Z]+)", value)
 | 
			
		||||
    if not matches:
 | 
			
		||||
        msg = "Unable to interpret string value '%s' as bytes" % (value)
 | 
			
		||||
        raise ValueError(msg)
 | 
			
		||||
    return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
 | 
			
		||||
							
								
								
									
										56
									
								
								hooks/charmhelpers/core/sysctl.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										56
									
								
								hooks/charmhelpers/core/sysctl.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,56 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import yaml
 | 
			
		||||
 | 
			
		||||
from subprocess import check_call
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    log,
 | 
			
		||||
    DEBUG,
 | 
			
		||||
    ERROR,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def create(sysctl_dict, sysctl_file):
 | 
			
		||||
    """Creates a sysctl.conf file from a YAML associative array
 | 
			
		||||
 | 
			
		||||
    :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
 | 
			
		||||
    :type sysctl_dict: str
 | 
			
		||||
    :param sysctl_file: path to the sysctl file to be saved
 | 
			
		||||
    :type sysctl_file: str or unicode
 | 
			
		||||
    :returns: None
 | 
			
		||||
    """
 | 
			
		||||
    try:
 | 
			
		||||
        sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
 | 
			
		||||
    except yaml.YAMLError:
 | 
			
		||||
        log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
 | 
			
		||||
            level=ERROR)
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    with open(sysctl_file, "w") as fd:
 | 
			
		||||
        for key, value in sysctl_dict_parsed.items():
 | 
			
		||||
            fd.write("{}={}\n".format(key, value))
 | 
			
		||||
 | 
			
		||||
    log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
 | 
			
		||||
        level=DEBUG)
 | 
			
		||||
 | 
			
		||||
    check_call(["sysctl", "-p", sysctl_file])
 | 
			
		||||
							
								
								
									
										68
									
								
								hooks/charmhelpers/core/templating.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										68
									
								
								hooks/charmhelpers/core/templating.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,68 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core import host
 | 
			
		||||
from charmhelpers.core import hookenv
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def render(source, target, context, owner='root', group='root',
 | 
			
		||||
           perms=0o444, templates_dir=None, encoding='UTF-8'):
 | 
			
		||||
    """
 | 
			
		||||
    Render a template.
 | 
			
		||||
 | 
			
		||||
    The `source` path, if not absolute, is relative to the `templates_dir`.
 | 
			
		||||
 | 
			
		||||
    The `target` path should be absolute.
 | 
			
		||||
 | 
			
		||||
    The context should be a dict containing the values to be replaced in the
 | 
			
		||||
    template.
 | 
			
		||||
 | 
			
		||||
    The `owner`, `group`, and `perms` options will be passed to `write_file`.
 | 
			
		||||
 | 
			
		||||
    If omitted, `templates_dir` defaults to the `templates` folder in the charm.
 | 
			
		||||
 | 
			
		||||
    Note: Using this requires python-jinja2; if it is not installed, calling
 | 
			
		||||
    this will attempt to use charmhelpers.fetch.apt_install to install it.
 | 
			
		||||
    """
 | 
			
		||||
    try:
 | 
			
		||||
        from jinja2 import FileSystemLoader, Environment, exceptions
 | 
			
		||||
    except ImportError:
 | 
			
		||||
        try:
 | 
			
		||||
            from charmhelpers.fetch import apt_install
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            hookenv.log('Could not import jinja2, and could not import '
 | 
			
		||||
                        'charmhelpers.fetch to install it',
 | 
			
		||||
                        level=hookenv.ERROR)
 | 
			
		||||
            raise
 | 
			
		||||
        apt_install('python-jinja2', fatal=True)
 | 
			
		||||
        from jinja2 import FileSystemLoader, Environment, exceptions
 | 
			
		||||
 | 
			
		||||
    if templates_dir is None:
 | 
			
		||||
        templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
 | 
			
		||||
    loader = Environment(loader=FileSystemLoader(templates_dir))
 | 
			
		||||
    try:
 | 
			
		||||
        source = source
 | 
			
		||||
        template = loader.get_template(source)
 | 
			
		||||
    except exceptions.TemplateNotFound as e:
 | 
			
		||||
        hookenv.log('Could not load template %s from %s.' %
 | 
			
		||||
                    (source, templates_dir),
 | 
			
		||||
                    level=hookenv.ERROR)
 | 
			
		||||
        raise e
 | 
			
		||||
    content = template.render(context)
 | 
			
		||||
    host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
 | 
			
		||||
    host.write_file(target, content.encode(encoding), owner, group, perms)
 | 
			
		||||
							
								
								
									
										521
									
								
								hooks/charmhelpers/core/unitdata.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										521
									
								
								hooks/charmhelpers/core/unitdata.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,521 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
#
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
#
 | 
			
		||||
#
 | 
			
		||||
# Authors:
 | 
			
		||||
#  Kapil Thangavelu <kapil.foss@gmail.com>
 | 
			
		||||
#
 | 
			
		||||
"""
 | 
			
		||||
Intro
 | 
			
		||||
-----
 | 
			
		||||
 | 
			
		||||
A simple way to store state in units. This provides a key value
 | 
			
		||||
storage with support for versioned, transactional operation,
 | 
			
		||||
and can calculate deltas from previous values to simplify unit logic
 | 
			
		||||
when processing changes.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Hook Integration
 | 
			
		||||
----------------
 | 
			
		||||
 | 
			
		||||
There are several extant frameworks for hook execution, including
 | 
			
		||||
 | 
			
		||||
 - charmhelpers.core.hookenv.Hooks
 | 
			
		||||
 - charmhelpers.core.services.ServiceManager
 | 
			
		||||
 | 
			
		||||
The storage classes are framework agnostic, one simple integration is
 | 
			
		||||
via the HookData contextmanager. It will record the current hook
 | 
			
		||||
execution environment (including relation data, config data, etc.),
 | 
			
		||||
setup a transaction and allow easy access to the changes from
 | 
			
		||||
previously seen values. One consequence of the integration is the
 | 
			
		||||
reservation of particular keys ('rels', 'unit', 'env', 'config',
 | 
			
		||||
'charm_revisions') for their respective values.
 | 
			
		||||
 | 
			
		||||
Here's a fully worked integration example using hookenv.Hooks::
 | 
			
		||||
 | 
			
		||||
       from charmhelper.core import hookenv, unitdata
 | 
			
		||||
 | 
			
		||||
       hook_data = unitdata.HookData()
 | 
			
		||||
       db = unitdata.kv()
 | 
			
		||||
       hooks = hookenv.Hooks()
 | 
			
		||||
 | 
			
		||||
       @hooks.hook
 | 
			
		||||
       def config_changed():
 | 
			
		||||
           # Print all changes to configuration from previously seen
 | 
			
		||||
           # values.
 | 
			
		||||
           for changed, (prev, cur) in hook_data.conf.items():
 | 
			
		||||
               print('config changed', changed,
 | 
			
		||||
                     'previous value', prev,
 | 
			
		||||
                     'current value',  cur)
 | 
			
		||||
 | 
			
		||||
           # Get some unit specific bookeeping
 | 
			
		||||
           if not db.get('pkg_key'):
 | 
			
		||||
               key = urllib.urlopen('https://example.com/pkg_key').read()
 | 
			
		||||
               db.set('pkg_key', key)
 | 
			
		||||
 | 
			
		||||
           # Directly access all charm config as a mapping.
 | 
			
		||||
           conf = db.getrange('config', True)
 | 
			
		||||
 | 
			
		||||
           # Directly access all relation data as a mapping
 | 
			
		||||
           rels = db.getrange('rels', True)
 | 
			
		||||
 | 
			
		||||
       if __name__ == '__main__':
 | 
			
		||||
           with hook_data():
 | 
			
		||||
               hook.execute()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
A more basic integration is via the hook_scope context manager which simply
 | 
			
		||||
manages transaction scope (and records hook name, and timestamp)::
 | 
			
		||||
 | 
			
		||||
  >>> from unitdata import kv
 | 
			
		||||
  >>> db = kv()
 | 
			
		||||
  >>> with db.hook_scope('install'):
 | 
			
		||||
  ...    # do work, in transactional scope.
 | 
			
		||||
  ...    db.set('x', 1)
 | 
			
		||||
  >>> db.get('x')
 | 
			
		||||
  1
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Usage
 | 
			
		||||
-----
 | 
			
		||||
 | 
			
		||||
Values are automatically json de/serialized to preserve basic typing
 | 
			
		||||
and complex data struct capabilities (dicts, lists, ints, booleans, etc).
 | 
			
		||||
 | 
			
		||||
Individual values can be manipulated via get/set::
 | 
			
		||||
 | 
			
		||||
   >>> kv.set('y', True)
 | 
			
		||||
   >>> kv.get('y')
 | 
			
		||||
   True
 | 
			
		||||
 | 
			
		||||
   # We can set complex values (dicts, lists) as a single key.
 | 
			
		||||
   >>> kv.set('config', {'a': 1, 'b': True'})
 | 
			
		||||
 | 
			
		||||
   # Also supports returning dictionaries as a record which
 | 
			
		||||
   # provides attribute access.
 | 
			
		||||
   >>> config = kv.get('config', record=True)
 | 
			
		||||
   >>> config.b
 | 
			
		||||
   True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Groups of keys can be manipulated with update/getrange::
 | 
			
		||||
 | 
			
		||||
   >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
 | 
			
		||||
   >>> kv.getrange('gui.', strip=True)
 | 
			
		||||
   {'z': 1, 'y': 2}
 | 
			
		||||
 | 
			
		||||
When updating values, its very helpful to understand which values
 | 
			
		||||
have actually changed and how have they changed. The storage
 | 
			
		||||
provides a delta method to provide for this::
 | 
			
		||||
 | 
			
		||||
   >>> data = {'debug': True, 'option': 2}
 | 
			
		||||
   >>> delta = kv.delta(data, 'config.')
 | 
			
		||||
   >>> delta.debug.previous
 | 
			
		||||
   None
 | 
			
		||||
   >>> delta.debug.current
 | 
			
		||||
   True
 | 
			
		||||
   >>> delta
 | 
			
		||||
   {'debug': (None, True), 'option': (None, 2)}
 | 
			
		||||
 | 
			
		||||
Note the delta method does not persist the actual change, it needs to
 | 
			
		||||
be explicitly saved via 'update' method::
 | 
			
		||||
 | 
			
		||||
   >>> kv.update(data, 'config.')
 | 
			
		||||
 | 
			
		||||
Values modified in the context of a hook scope retain historical values
 | 
			
		||||
associated to the hookname.
 | 
			
		||||
 | 
			
		||||
   >>> with db.hook_scope('config-changed'):
 | 
			
		||||
   ...      db.set('x', 42)
 | 
			
		||||
   >>> db.gethistory('x')
 | 
			
		||||
   [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
 | 
			
		||||
    (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
import collections
 | 
			
		||||
import contextlib
 | 
			
		||||
import datetime
 | 
			
		||||
import itertools
 | 
			
		||||
import json
 | 
			
		||||
import os
 | 
			
		||||
import pprint
 | 
			
		||||
import sqlite3
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Storage(object):
 | 
			
		||||
    """Simple key value database for local unit state within charms.
 | 
			
		||||
 | 
			
		||||
    Modifications are not persisted unless :meth:`flush` is called.
 | 
			
		||||
 | 
			
		||||
    To support dicts, lists, integer, floats, and booleans values
 | 
			
		||||
    are automatically json encoded/decoded.
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, path=None):
 | 
			
		||||
        self.db_path = path
 | 
			
		||||
        if path is None:
 | 
			
		||||
            if 'UNIT_STATE_DB' in os.environ:
 | 
			
		||||
                self.db_path = os.environ['UNIT_STATE_DB']
 | 
			
		||||
            else:
 | 
			
		||||
                self.db_path = os.path.join(
 | 
			
		||||
                    os.environ.get('CHARM_DIR', ''), '.unit-state.db')
 | 
			
		||||
        self.conn = sqlite3.connect('%s' % self.db_path)
 | 
			
		||||
        self.cursor = self.conn.cursor()
 | 
			
		||||
        self.revision = None
 | 
			
		||||
        self._closed = False
 | 
			
		||||
        self._init()
 | 
			
		||||
 | 
			
		||||
    def close(self):
 | 
			
		||||
        if self._closed:
 | 
			
		||||
            return
 | 
			
		||||
        self.flush(False)
 | 
			
		||||
        self.cursor.close()
 | 
			
		||||
        self.conn.close()
 | 
			
		||||
        self._closed = True
 | 
			
		||||
 | 
			
		||||
    def get(self, key, default=None, record=False):
 | 
			
		||||
        self.cursor.execute('select data from kv where key=?', [key])
 | 
			
		||||
        result = self.cursor.fetchone()
 | 
			
		||||
        if not result:
 | 
			
		||||
            return default
 | 
			
		||||
        if record:
 | 
			
		||||
            return Record(json.loads(result[0]))
 | 
			
		||||
        return json.loads(result[0])
 | 
			
		||||
 | 
			
		||||
    def getrange(self, key_prefix, strip=False):
 | 
			
		||||
        """
 | 
			
		||||
        Get a range of keys starting with a common prefix as a mapping of
 | 
			
		||||
        keys to values.
 | 
			
		||||
 | 
			
		||||
        :param str key_prefix: Common prefix among all keys
 | 
			
		||||
        :param bool strip: Optionally strip the common prefix from the key
 | 
			
		||||
            names in the returned dict
 | 
			
		||||
        :return dict: A (possibly empty) dict of key-value mappings
 | 
			
		||||
        """
 | 
			
		||||
        self.cursor.execute("select key, data from kv where key like ?",
 | 
			
		||||
                            ['%s%%' % key_prefix])
 | 
			
		||||
        result = self.cursor.fetchall()
 | 
			
		||||
 | 
			
		||||
        if not result:
 | 
			
		||||
            return {}
 | 
			
		||||
        if not strip:
 | 
			
		||||
            key_prefix = ''
 | 
			
		||||
        return dict([
 | 
			
		||||
            (k[len(key_prefix):], json.loads(v)) for k, v in result])
 | 
			
		||||
 | 
			
		||||
    def update(self, mapping, prefix=""):
 | 
			
		||||
        """
 | 
			
		||||
        Set the values of multiple keys at once.
 | 
			
		||||
 | 
			
		||||
        :param dict mapping: Mapping of keys to values
 | 
			
		||||
        :param str prefix: Optional prefix to apply to all keys in `mapping`
 | 
			
		||||
            before setting
 | 
			
		||||
        """
 | 
			
		||||
        for k, v in mapping.items():
 | 
			
		||||
            self.set("%s%s" % (prefix, k), v)
 | 
			
		||||
 | 
			
		||||
    def unset(self, key):
 | 
			
		||||
        """
 | 
			
		||||
        Remove a key from the database entirely.
 | 
			
		||||
        """
 | 
			
		||||
        self.cursor.execute('delete from kv where key=?', [key])
 | 
			
		||||
        if self.revision and self.cursor.rowcount:
 | 
			
		||||
            self.cursor.execute(
 | 
			
		||||
                'insert into kv_revisions values (?, ?, ?)',
 | 
			
		||||
                [key, self.revision, json.dumps('DELETED')])
 | 
			
		||||
 | 
			
		||||
    def unsetrange(self, keys=None, prefix=""):
 | 
			
		||||
        """
 | 
			
		||||
        Remove a range of keys starting with a common prefix, from the database
 | 
			
		||||
        entirely.
 | 
			
		||||
 | 
			
		||||
        :param list keys: List of keys to remove.
 | 
			
		||||
        :param str prefix: Optional prefix to apply to all keys in ``keys``
 | 
			
		||||
            before removing.
 | 
			
		||||
        """
 | 
			
		||||
        if keys is not None:
 | 
			
		||||
            keys = ['%s%s' % (prefix, key) for key in keys]
 | 
			
		||||
            self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
 | 
			
		||||
            if self.revision and self.cursor.rowcount:
 | 
			
		||||
                self.cursor.execute(
 | 
			
		||||
                    'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
 | 
			
		||||
                    list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
 | 
			
		||||
        else:
 | 
			
		||||
            self.cursor.execute('delete from kv where key like ?',
 | 
			
		||||
                                ['%s%%' % prefix])
 | 
			
		||||
            if self.revision and self.cursor.rowcount:
 | 
			
		||||
                self.cursor.execute(
 | 
			
		||||
                    'insert into kv_revisions values (?, ?, ?)',
 | 
			
		||||
                    ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
 | 
			
		||||
 | 
			
		||||
    def set(self, key, value):
 | 
			
		||||
        """
 | 
			
		||||
        Set a value in the database.
 | 
			
		||||
 | 
			
		||||
        :param str key: Key to set the value for
 | 
			
		||||
        :param value: Any JSON-serializable value to be set
 | 
			
		||||
        """
 | 
			
		||||
        serialized = json.dumps(value)
 | 
			
		||||
 | 
			
		||||
        self.cursor.execute('select data from kv where key=?', [key])
 | 
			
		||||
        exists = self.cursor.fetchone()
 | 
			
		||||
 | 
			
		||||
        # Skip mutations to the same value
 | 
			
		||||
        if exists:
 | 
			
		||||
            if exists[0] == serialized:
 | 
			
		||||
                return value
 | 
			
		||||
 | 
			
		||||
        if not exists:
 | 
			
		||||
            self.cursor.execute(
 | 
			
		||||
                'insert into kv (key, data) values (?, ?)',
 | 
			
		||||
                (key, serialized))
 | 
			
		||||
        else:
 | 
			
		||||
            self.cursor.execute('''
 | 
			
		||||
            update kv
 | 
			
		||||
            set data = ?
 | 
			
		||||
            where key = ?''', [serialized, key])
 | 
			
		||||
 | 
			
		||||
        # Save
 | 
			
		||||
        if not self.revision:
 | 
			
		||||
            return value
 | 
			
		||||
 | 
			
		||||
        self.cursor.execute(
 | 
			
		||||
            'select 1 from kv_revisions where key=? and revision=?',
 | 
			
		||||
            [key, self.revision])
 | 
			
		||||
        exists = self.cursor.fetchone()
 | 
			
		||||
 | 
			
		||||
        if not exists:
 | 
			
		||||
            self.cursor.execute(
 | 
			
		||||
                '''insert into kv_revisions (
 | 
			
		||||
                revision, key, data) values (?, ?, ?)''',
 | 
			
		||||
                (self.revision, key, serialized))
 | 
			
		||||
        else:
 | 
			
		||||
            self.cursor.execute(
 | 
			
		||||
                '''
 | 
			
		||||
                update kv_revisions
 | 
			
		||||
                set data = ?
 | 
			
		||||
                where key = ?
 | 
			
		||||
                and   revision = ?''',
 | 
			
		||||
                [serialized, key, self.revision])
 | 
			
		||||
 | 
			
		||||
        return value
 | 
			
		||||
 | 
			
		||||
    def delta(self, mapping, prefix):
 | 
			
		||||
        """
 | 
			
		||||
        return a delta containing values that have changed.
 | 
			
		||||
        """
 | 
			
		||||
        previous = self.getrange(prefix, strip=True)
 | 
			
		||||
        if not previous:
 | 
			
		||||
            pk = set()
 | 
			
		||||
        else:
 | 
			
		||||
            pk = set(previous.keys())
 | 
			
		||||
        ck = set(mapping.keys())
 | 
			
		||||
        delta = DeltaSet()
 | 
			
		||||
 | 
			
		||||
        # added
 | 
			
		||||
        for k in ck.difference(pk):
 | 
			
		||||
            delta[k] = Delta(None, mapping[k])
 | 
			
		||||
 | 
			
		||||
        # removed
 | 
			
		||||
        for k in pk.difference(ck):
 | 
			
		||||
            delta[k] = Delta(previous[k], None)
 | 
			
		||||
 | 
			
		||||
        # changed
 | 
			
		||||
        for k in pk.intersection(ck):
 | 
			
		||||
            c = mapping[k]
 | 
			
		||||
            p = previous[k]
 | 
			
		||||
            if c != p:
 | 
			
		||||
                delta[k] = Delta(p, c)
 | 
			
		||||
 | 
			
		||||
        return delta
 | 
			
		||||
 | 
			
		||||
    @contextlib.contextmanager
 | 
			
		||||
    def hook_scope(self, name=""):
 | 
			
		||||
        """Scope all future interactions to the current hook execution
 | 
			
		||||
        revision."""
 | 
			
		||||
        assert not self.revision
 | 
			
		||||
        self.cursor.execute(
 | 
			
		||||
            'insert into hooks (hook, date) values (?, ?)',
 | 
			
		||||
            (name or sys.argv[0],
 | 
			
		||||
             datetime.datetime.utcnow().isoformat()))
 | 
			
		||||
        self.revision = self.cursor.lastrowid
 | 
			
		||||
        try:
 | 
			
		||||
            yield self.revision
 | 
			
		||||
            self.revision = None
 | 
			
		||||
        except:
 | 
			
		||||
            self.flush(False)
 | 
			
		||||
            self.revision = None
 | 
			
		||||
            raise
 | 
			
		||||
        else:
 | 
			
		||||
            self.flush()
 | 
			
		||||
 | 
			
		||||
    def flush(self, save=True):
 | 
			
		||||
        if save:
 | 
			
		||||
            self.conn.commit()
 | 
			
		||||
        elif self._closed:
 | 
			
		||||
            return
 | 
			
		||||
        else:
 | 
			
		||||
            self.conn.rollback()
 | 
			
		||||
 | 
			
		||||
    def _init(self):
 | 
			
		||||
        self.cursor.execute('''
 | 
			
		||||
            create table if not exists kv (
 | 
			
		||||
               key text,
 | 
			
		||||
               data text,
 | 
			
		||||
               primary key (key)
 | 
			
		||||
               )''')
 | 
			
		||||
        self.cursor.execute('''
 | 
			
		||||
            create table if not exists kv_revisions (
 | 
			
		||||
               key text,
 | 
			
		||||
               revision integer,
 | 
			
		||||
               data text,
 | 
			
		||||
               primary key (key, revision)
 | 
			
		||||
               )''')
 | 
			
		||||
        self.cursor.execute('''
 | 
			
		||||
            create table if not exists hooks (
 | 
			
		||||
               version integer primary key autoincrement,
 | 
			
		||||
               hook text,
 | 
			
		||||
               date text
 | 
			
		||||
               )''')
 | 
			
		||||
        self.conn.commit()
 | 
			
		||||
 | 
			
		||||
    def gethistory(self, key, deserialize=False):
 | 
			
		||||
        self.cursor.execute(
 | 
			
		||||
            '''
 | 
			
		||||
            select kv.revision, kv.key, kv.data, h.hook, h.date
 | 
			
		||||
            from kv_revisions kv,
 | 
			
		||||
                 hooks h
 | 
			
		||||
            where kv.key=?
 | 
			
		||||
             and kv.revision = h.version
 | 
			
		||||
            ''', [key])
 | 
			
		||||
        if deserialize is False:
 | 
			
		||||
            return self.cursor.fetchall()
 | 
			
		||||
        return map(_parse_history, self.cursor.fetchall())
 | 
			
		||||
 | 
			
		||||
    def debug(self, fh=sys.stderr):
 | 
			
		||||
        self.cursor.execute('select * from kv')
 | 
			
		||||
        pprint.pprint(self.cursor.fetchall(), stream=fh)
 | 
			
		||||
        self.cursor.execute('select * from kv_revisions')
 | 
			
		||||
        pprint.pprint(self.cursor.fetchall(), stream=fh)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _parse_history(d):
 | 
			
		||||
    return (d[0], d[1], json.loads(d[2]), d[3],
 | 
			
		||||
            datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class HookData(object):
 | 
			
		||||
    """Simple integration for existing hook exec frameworks.
 | 
			
		||||
 | 
			
		||||
    Records all unit information, and stores deltas for processing
 | 
			
		||||
    by the hook.
 | 
			
		||||
 | 
			
		||||
    Sample::
 | 
			
		||||
 | 
			
		||||
       from charmhelper.core import hookenv, unitdata
 | 
			
		||||
 | 
			
		||||
       changes = unitdata.HookData()
 | 
			
		||||
       db = unitdata.kv()
 | 
			
		||||
       hooks = hookenv.Hooks()
 | 
			
		||||
 | 
			
		||||
       @hooks.hook
 | 
			
		||||
       def config_changed():
 | 
			
		||||
           # View all changes to configuration
 | 
			
		||||
           for changed, (prev, cur) in changes.conf.items():
 | 
			
		||||
               print('config changed', changed,
 | 
			
		||||
                     'previous value', prev,
 | 
			
		||||
                     'current value',  cur)
 | 
			
		||||
 | 
			
		||||
           # Get some unit specific bookeeping
 | 
			
		||||
           if not db.get('pkg_key'):
 | 
			
		||||
               key = urllib.urlopen('https://example.com/pkg_key').read()
 | 
			
		||||
               db.set('pkg_key', key)
 | 
			
		||||
 | 
			
		||||
       if __name__ == '__main__':
 | 
			
		||||
           with changes():
 | 
			
		||||
               hook.execute()
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        self.kv = kv()
 | 
			
		||||
        self.conf = None
 | 
			
		||||
        self.rels = None
 | 
			
		||||
 | 
			
		||||
    @contextlib.contextmanager
 | 
			
		||||
    def __call__(self):
 | 
			
		||||
        from charmhelpers.core import hookenv
 | 
			
		||||
        hook_name = hookenv.hook_name()
 | 
			
		||||
 | 
			
		||||
        with self.kv.hook_scope(hook_name):
 | 
			
		||||
            self._record_charm_version(hookenv.charm_dir())
 | 
			
		||||
            delta_config, delta_relation = self._record_hook(hookenv)
 | 
			
		||||
            yield self.kv, delta_config, delta_relation
 | 
			
		||||
 | 
			
		||||
    def _record_charm_version(self, charm_dir):
 | 
			
		||||
        # Record revisions.. charm revisions are meaningless
 | 
			
		||||
        # to charm authors as they don't control the revision.
 | 
			
		||||
        # so logic dependnent on revision is not particularly
 | 
			
		||||
        # useful, however it is useful for debugging analysis.
 | 
			
		||||
        charm_rev = open(
 | 
			
		||||
            os.path.join(charm_dir, 'revision')).read().strip()
 | 
			
		||||
        charm_rev = charm_rev or '0'
 | 
			
		||||
        revs = self.kv.get('charm_revisions', [])
 | 
			
		||||
        if charm_rev not in revs:
 | 
			
		||||
            revs.append(charm_rev.strip() or '0')
 | 
			
		||||
            self.kv.set('charm_revisions', revs)
 | 
			
		||||
 | 
			
		||||
    def _record_hook(self, hookenv):
 | 
			
		||||
        data = hookenv.execution_environment()
 | 
			
		||||
        self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
 | 
			
		||||
        self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
 | 
			
		||||
        self.kv.set('env', dict(data['env']))
 | 
			
		||||
        self.kv.set('unit', data['unit'])
 | 
			
		||||
        self.kv.set('relid', data.get('relid'))
 | 
			
		||||
        return conf_delta, rels_delta
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Record(dict):
 | 
			
		||||
 | 
			
		||||
    __slots__ = ()
 | 
			
		||||
 | 
			
		||||
    def __getattr__(self, k):
 | 
			
		||||
        if k in self:
 | 
			
		||||
            return self[k]
 | 
			
		||||
        raise AttributeError(k)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class DeltaSet(Record):
 | 
			
		||||
 | 
			
		||||
    __slots__ = ()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Delta = collections.namedtuple('Delta', ['previous', 'current'])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
_KV = None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def kv():
 | 
			
		||||
    global _KV
 | 
			
		||||
    if _KV is None:
 | 
			
		||||
        _KV = Storage()
 | 
			
		||||
    return _KV
 | 
			
		||||
							
								
								
									
										456
									
								
								hooks/charmhelpers/fetch/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										456
									
								
								hooks/charmhelpers/fetch/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,456 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import importlib
 | 
			
		||||
from tempfile import NamedTemporaryFile
 | 
			
		||||
import time
 | 
			
		||||
from yaml import safe_load
 | 
			
		||||
from charmhelpers.core.host import (
 | 
			
		||||
    lsb_release
 | 
			
		||||
)
 | 
			
		||||
import subprocess
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    config,
 | 
			
		||||
    log,
 | 
			
		||||
)
 | 
			
		||||
import os
 | 
			
		||||
 | 
			
		||||
import six
 | 
			
		||||
if six.PY3:
 | 
			
		||||
    from urllib.parse import urlparse, urlunparse
 | 
			
		||||
else:
 | 
			
		||||
    from urlparse import urlparse, urlunparse
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
 | 
			
		||||
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
 | 
			
		||||
"""
 | 
			
		||||
PROPOSED_POCKET = """# Proposed
 | 
			
		||||
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
 | 
			
		||||
"""
 | 
			
		||||
CLOUD_ARCHIVE_POCKETS = {
 | 
			
		||||
    # Folsom
 | 
			
		||||
    'folsom': 'precise-updates/folsom',
 | 
			
		||||
    'precise-folsom': 'precise-updates/folsom',
 | 
			
		||||
    'precise-folsom/updates': 'precise-updates/folsom',
 | 
			
		||||
    'precise-updates/folsom': 'precise-updates/folsom',
 | 
			
		||||
    'folsom/proposed': 'precise-proposed/folsom',
 | 
			
		||||
    'precise-folsom/proposed': 'precise-proposed/folsom',
 | 
			
		||||
    'precise-proposed/folsom': 'precise-proposed/folsom',
 | 
			
		||||
    # Grizzly
 | 
			
		||||
    'grizzly': 'precise-updates/grizzly',
 | 
			
		||||
    'precise-grizzly': 'precise-updates/grizzly',
 | 
			
		||||
    'precise-grizzly/updates': 'precise-updates/grizzly',
 | 
			
		||||
    'precise-updates/grizzly': 'precise-updates/grizzly',
 | 
			
		||||
    'grizzly/proposed': 'precise-proposed/grizzly',
 | 
			
		||||
    'precise-grizzly/proposed': 'precise-proposed/grizzly',
 | 
			
		||||
    'precise-proposed/grizzly': 'precise-proposed/grizzly',
 | 
			
		||||
    # Havana
 | 
			
		||||
    'havana': 'precise-updates/havana',
 | 
			
		||||
    'precise-havana': 'precise-updates/havana',
 | 
			
		||||
    'precise-havana/updates': 'precise-updates/havana',
 | 
			
		||||
    'precise-updates/havana': 'precise-updates/havana',
 | 
			
		||||
    'havana/proposed': 'precise-proposed/havana',
 | 
			
		||||
    'precise-havana/proposed': 'precise-proposed/havana',
 | 
			
		||||
    'precise-proposed/havana': 'precise-proposed/havana',
 | 
			
		||||
    # Icehouse
 | 
			
		||||
    'icehouse': 'precise-updates/icehouse',
 | 
			
		||||
    'precise-icehouse': 'precise-updates/icehouse',
 | 
			
		||||
    'precise-icehouse/updates': 'precise-updates/icehouse',
 | 
			
		||||
    'precise-updates/icehouse': 'precise-updates/icehouse',
 | 
			
		||||
    'icehouse/proposed': 'precise-proposed/icehouse',
 | 
			
		||||
    'precise-icehouse/proposed': 'precise-proposed/icehouse',
 | 
			
		||||
    'precise-proposed/icehouse': 'precise-proposed/icehouse',
 | 
			
		||||
    # Juno
 | 
			
		||||
    'juno': 'trusty-updates/juno',
 | 
			
		||||
    'trusty-juno': 'trusty-updates/juno',
 | 
			
		||||
    'trusty-juno/updates': 'trusty-updates/juno',
 | 
			
		||||
    'trusty-updates/juno': 'trusty-updates/juno',
 | 
			
		||||
    'juno/proposed': 'trusty-proposed/juno',
 | 
			
		||||
    'trusty-juno/proposed': 'trusty-proposed/juno',
 | 
			
		||||
    'trusty-proposed/juno': 'trusty-proposed/juno',
 | 
			
		||||
    # Kilo
 | 
			
		||||
    'kilo': 'trusty-updates/kilo',
 | 
			
		||||
    'trusty-kilo': 'trusty-updates/kilo',
 | 
			
		||||
    'trusty-kilo/updates': 'trusty-updates/kilo',
 | 
			
		||||
    'trusty-updates/kilo': 'trusty-updates/kilo',
 | 
			
		||||
    'kilo/proposed': 'trusty-proposed/kilo',
 | 
			
		||||
    'trusty-kilo/proposed': 'trusty-proposed/kilo',
 | 
			
		||||
    'trusty-proposed/kilo': 'trusty-proposed/kilo',
 | 
			
		||||
    # Liberty
 | 
			
		||||
    'liberty': 'trusty-updates/liberty',
 | 
			
		||||
    'trusty-liberty': 'trusty-updates/liberty',
 | 
			
		||||
    'trusty-liberty/updates': 'trusty-updates/liberty',
 | 
			
		||||
    'trusty-updates/liberty': 'trusty-updates/liberty',
 | 
			
		||||
    'liberty/proposed': 'trusty-proposed/liberty',
 | 
			
		||||
    'trusty-liberty/proposed': 'trusty-proposed/liberty',
 | 
			
		||||
    'trusty-proposed/liberty': 'trusty-proposed/liberty',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# The order of this list is very important. Handlers should be listed in from
 | 
			
		||||
# least- to most-specific URL matching.
 | 
			
		||||
FETCH_HANDLERS = (
 | 
			
		||||
    'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
 | 
			
		||||
    'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
 | 
			
		||||
    'charmhelpers.fetch.giturl.GitUrlFetchHandler',
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
APT_NO_LOCK = 100  # The return code for "couldn't acquire lock" in APT.
 | 
			
		||||
APT_NO_LOCK_RETRY_DELAY = 10  # Wait 10 seconds between apt lock checks.
 | 
			
		||||
APT_NO_LOCK_RETRY_COUNT = 30  # Retry to acquire the lock X times.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SourceConfigError(Exception):
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class UnhandledSource(Exception):
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class AptLockError(Exception):
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BaseFetchHandler(object):
 | 
			
		||||
 | 
			
		||||
    """Base class for FetchHandler implementations in fetch plugins"""
 | 
			
		||||
 | 
			
		||||
    def can_handle(self, source):
 | 
			
		||||
        """Returns True if the source can be handled. Otherwise returns
 | 
			
		||||
        a string explaining why it cannot"""
 | 
			
		||||
        return "Wrong source type"
 | 
			
		||||
 | 
			
		||||
    def install(self, source):
 | 
			
		||||
        """Try to download and unpack the source. Return the path to the
 | 
			
		||||
        unpacked files or raise UnhandledSource."""
 | 
			
		||||
        raise UnhandledSource("Wrong source type {}".format(source))
 | 
			
		||||
 | 
			
		||||
    def parse_url(self, url):
 | 
			
		||||
        return urlparse(url)
 | 
			
		||||
 | 
			
		||||
    def base_url(self, url):
 | 
			
		||||
        """Return url without querystring or fragment"""
 | 
			
		||||
        parts = list(self.parse_url(url))
 | 
			
		||||
        parts[4:] = ['' for i in parts[4:]]
 | 
			
		||||
        return urlunparse(parts)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def filter_installed_packages(packages):
 | 
			
		||||
    """Returns a list of packages that require installation"""
 | 
			
		||||
    cache = apt_cache()
 | 
			
		||||
    _pkgs = []
 | 
			
		||||
    for package in packages:
 | 
			
		||||
        try:
 | 
			
		||||
            p = cache[package]
 | 
			
		||||
            p.current_ver or _pkgs.append(package)
 | 
			
		||||
        except KeyError:
 | 
			
		||||
            log('Package {} has no installation candidate.'.format(package),
 | 
			
		||||
                level='WARNING')
 | 
			
		||||
            _pkgs.append(package)
 | 
			
		||||
    return _pkgs
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def apt_cache(in_memory=True):
 | 
			
		||||
    """Build and return an apt cache"""
 | 
			
		||||
    from apt import apt_pkg
 | 
			
		||||
    apt_pkg.init()
 | 
			
		||||
    if in_memory:
 | 
			
		||||
        apt_pkg.config.set("Dir::Cache::pkgcache", "")
 | 
			
		||||
        apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
 | 
			
		||||
    return apt_pkg.Cache()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def apt_install(packages, options=None, fatal=False):
 | 
			
		||||
    """Install one or more packages"""
 | 
			
		||||
    if options is None:
 | 
			
		||||
        options = ['--option=Dpkg::Options::=--force-confold']
 | 
			
		||||
 | 
			
		||||
    cmd = ['apt-get', '--assume-yes']
 | 
			
		||||
    cmd.extend(options)
 | 
			
		||||
    cmd.append('install')
 | 
			
		||||
    if isinstance(packages, six.string_types):
 | 
			
		||||
        cmd.append(packages)
 | 
			
		||||
    else:
 | 
			
		||||
        cmd.extend(packages)
 | 
			
		||||
    log("Installing {} with options: {}".format(packages,
 | 
			
		||||
                                                options))
 | 
			
		||||
    _run_apt_command(cmd, fatal)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def apt_upgrade(options=None, fatal=False, dist=False):
 | 
			
		||||
    """Upgrade all packages"""
 | 
			
		||||
    if options is None:
 | 
			
		||||
        options = ['--option=Dpkg::Options::=--force-confold']
 | 
			
		||||
 | 
			
		||||
    cmd = ['apt-get', '--assume-yes']
 | 
			
		||||
    cmd.extend(options)
 | 
			
		||||
    if dist:
 | 
			
		||||
        cmd.append('dist-upgrade')
 | 
			
		||||
    else:
 | 
			
		||||
        cmd.append('upgrade')
 | 
			
		||||
    log("Upgrading with options: {}".format(options))
 | 
			
		||||
    _run_apt_command(cmd, fatal)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def apt_update(fatal=False):
 | 
			
		||||
    """Update local apt cache"""
 | 
			
		||||
    cmd = ['apt-get', 'update']
 | 
			
		||||
    _run_apt_command(cmd, fatal)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def apt_purge(packages, fatal=False):
 | 
			
		||||
    """Purge one or more packages"""
 | 
			
		||||
    cmd = ['apt-get', '--assume-yes', 'purge']
 | 
			
		||||
    if isinstance(packages, six.string_types):
 | 
			
		||||
        cmd.append(packages)
 | 
			
		||||
    else:
 | 
			
		||||
        cmd.extend(packages)
 | 
			
		||||
    log("Purging {}".format(packages))
 | 
			
		||||
    _run_apt_command(cmd, fatal)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def apt_mark(packages, mark, fatal=False):
 | 
			
		||||
    """Flag one or more packages using apt-mark"""
 | 
			
		||||
    cmd = ['apt-mark', mark]
 | 
			
		||||
    if isinstance(packages, six.string_types):
 | 
			
		||||
        cmd.append(packages)
 | 
			
		||||
    else:
 | 
			
		||||
        cmd.extend(packages)
 | 
			
		||||
    log("Holding {}".format(packages))
 | 
			
		||||
 | 
			
		||||
    if fatal:
 | 
			
		||||
        subprocess.check_call(cmd, universal_newlines=True)
 | 
			
		||||
    else:
 | 
			
		||||
        subprocess.call(cmd, universal_newlines=True)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def apt_hold(packages, fatal=False):
 | 
			
		||||
    return apt_mark(packages, 'hold', fatal=fatal)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def apt_unhold(packages, fatal=False):
 | 
			
		||||
    return apt_mark(packages, 'unhold', fatal=fatal)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def add_source(source, key=None):
 | 
			
		||||
    """Add a package source to this system.
 | 
			
		||||
 | 
			
		||||
    @param source: a URL or sources.list entry, as supported by
 | 
			
		||||
    add-apt-repository(1). Examples::
 | 
			
		||||
 | 
			
		||||
        ppa:charmers/example
 | 
			
		||||
        deb https://stub:key@private.example.com/ubuntu trusty main
 | 
			
		||||
 | 
			
		||||
    In addition:
 | 
			
		||||
        'proposed:' may be used to enable the standard 'proposed'
 | 
			
		||||
        pocket for the release.
 | 
			
		||||
        'cloud:' may be used to activate official cloud archive pockets,
 | 
			
		||||
        such as 'cloud:icehouse'
 | 
			
		||||
        'distro' may be used as a noop
 | 
			
		||||
 | 
			
		||||
    @param key: A key to be added to the system's APT keyring and used
 | 
			
		||||
    to verify the signatures on packages. Ideally, this should be an
 | 
			
		||||
    ASCII format GPG public key including the block headers. A GPG key
 | 
			
		||||
    id may also be used, but be aware that only insecure protocols are
 | 
			
		||||
    available to retrieve the actual public key from a public keyserver
 | 
			
		||||
    placing your Juju environment at risk. ppa and cloud archive keys
 | 
			
		||||
    are securely added automtically, so sould not be provided.
 | 
			
		||||
    """
 | 
			
		||||
    if source is None:
 | 
			
		||||
        log('Source is not present. Skipping')
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    if (source.startswith('ppa:') or
 | 
			
		||||
        source.startswith('http') or
 | 
			
		||||
        source.startswith('deb ') or
 | 
			
		||||
            source.startswith('cloud-archive:')):
 | 
			
		||||
        subprocess.check_call(['add-apt-repository', '--yes', source])
 | 
			
		||||
    elif source.startswith('cloud:'):
 | 
			
		||||
        apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
 | 
			
		||||
                    fatal=True)
 | 
			
		||||
        pocket = source.split(':')[-1]
 | 
			
		||||
        if pocket not in CLOUD_ARCHIVE_POCKETS:
 | 
			
		||||
            raise SourceConfigError(
 | 
			
		||||
                'Unsupported cloud: source option %s' %
 | 
			
		||||
                pocket)
 | 
			
		||||
        actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
 | 
			
		||||
        with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
 | 
			
		||||
            apt.write(CLOUD_ARCHIVE.format(actual_pocket))
 | 
			
		||||
    elif source == 'proposed':
 | 
			
		||||
        release = lsb_release()['DISTRIB_CODENAME']
 | 
			
		||||
        with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
 | 
			
		||||
            apt.write(PROPOSED_POCKET.format(release))
 | 
			
		||||
    elif source == 'distro':
 | 
			
		||||
        pass
 | 
			
		||||
    else:
 | 
			
		||||
        log("Unknown source: {!r}".format(source))
 | 
			
		||||
 | 
			
		||||
    if key:
 | 
			
		||||
        if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
 | 
			
		||||
            with NamedTemporaryFile('w+') as key_file:
 | 
			
		||||
                key_file.write(key)
 | 
			
		||||
                key_file.flush()
 | 
			
		||||
                key_file.seek(0)
 | 
			
		||||
                subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
 | 
			
		||||
        else:
 | 
			
		||||
            # Note that hkp: is in no way a secure protocol. Using a
 | 
			
		||||
            # GPG key id is pointless from a security POV unless you
 | 
			
		||||
            # absolutely trust your network and DNS.
 | 
			
		||||
            subprocess.check_call(['apt-key', 'adv', '--keyserver',
 | 
			
		||||
                                   'hkp://keyserver.ubuntu.com:80', '--recv',
 | 
			
		||||
                                   key])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def configure_sources(update=False,
 | 
			
		||||
                      sources_var='install_sources',
 | 
			
		||||
                      keys_var='install_keys'):
 | 
			
		||||
    """
 | 
			
		||||
    Configure multiple sources from charm configuration.
 | 
			
		||||
 | 
			
		||||
    The lists are encoded as yaml fragments in the configuration.
 | 
			
		||||
    The frament needs to be included as a string. Sources and their
 | 
			
		||||
    corresponding keys are of the types supported by add_source().
 | 
			
		||||
 | 
			
		||||
    Example config:
 | 
			
		||||
        install_sources: |
 | 
			
		||||
          - "ppa:foo"
 | 
			
		||||
          - "http://example.com/repo precise main"
 | 
			
		||||
        install_keys: |
 | 
			
		||||
          - null
 | 
			
		||||
          - "a1b2c3d4"
 | 
			
		||||
 | 
			
		||||
    Note that 'null' (a.k.a. None) should not be quoted.
 | 
			
		||||
    """
 | 
			
		||||
    sources = safe_load((config(sources_var) or '').strip()) or []
 | 
			
		||||
    keys = safe_load((config(keys_var) or '').strip()) or None
 | 
			
		||||
 | 
			
		||||
    if isinstance(sources, six.string_types):
 | 
			
		||||
        sources = [sources]
 | 
			
		||||
 | 
			
		||||
    if keys is None:
 | 
			
		||||
        for source in sources:
 | 
			
		||||
            add_source(source, None)
 | 
			
		||||
    else:
 | 
			
		||||
        if isinstance(keys, six.string_types):
 | 
			
		||||
            keys = [keys]
 | 
			
		||||
 | 
			
		||||
        if len(sources) != len(keys):
 | 
			
		||||
            raise SourceConfigError(
 | 
			
		||||
                'Install sources and keys lists are different lengths')
 | 
			
		||||
        for source, key in zip(sources, keys):
 | 
			
		||||
            add_source(source, key)
 | 
			
		||||
    if update:
 | 
			
		||||
        apt_update(fatal=True)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def install_remote(source, *args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    Install a file tree from a remote source
 | 
			
		||||
 | 
			
		||||
    The specified source should be a url of the form:
 | 
			
		||||
        scheme://[host]/path[#[option=value][&...]]
 | 
			
		||||
 | 
			
		||||
    Schemes supported are based on this modules submodules.
 | 
			
		||||
    Options supported are submodule-specific.
 | 
			
		||||
    Additional arguments are passed through to the submodule.
 | 
			
		||||
 | 
			
		||||
    For example::
 | 
			
		||||
 | 
			
		||||
        dest = install_remote('http://example.com/archive.tgz',
 | 
			
		||||
                              checksum='deadbeef',
 | 
			
		||||
                              hash_type='sha1')
 | 
			
		||||
 | 
			
		||||
    This will download `archive.tgz`, validate it using SHA1 and, if
 | 
			
		||||
    the file is ok, extract it and return the directory in which it
 | 
			
		||||
    was extracted.  If the checksum fails, it will raise
 | 
			
		||||
    :class:`charmhelpers.core.host.ChecksumError`.
 | 
			
		||||
    """
 | 
			
		||||
    # We ONLY check for True here because can_handle may return a string
 | 
			
		||||
    # explaining why it can't handle a given source.
 | 
			
		||||
    handlers = [h for h in plugins() if h.can_handle(source) is True]
 | 
			
		||||
    installed_to = None
 | 
			
		||||
    for handler in handlers:
 | 
			
		||||
        try:
 | 
			
		||||
            installed_to = handler.install(source, *args, **kwargs)
 | 
			
		||||
        except UnhandledSource as e:
 | 
			
		||||
            log('Install source attempt unsuccessful: {}'.format(e),
 | 
			
		||||
                level='WARNING')
 | 
			
		||||
    if not installed_to:
 | 
			
		||||
        raise UnhandledSource("No handler found for source {}".format(source))
 | 
			
		||||
    return installed_to
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def install_from_config(config_var_name):
 | 
			
		||||
    charm_config = config()
 | 
			
		||||
    source = charm_config[config_var_name]
 | 
			
		||||
    return install_remote(source)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def plugins(fetch_handlers=None):
 | 
			
		||||
    if not fetch_handlers:
 | 
			
		||||
        fetch_handlers = FETCH_HANDLERS
 | 
			
		||||
    plugin_list = []
 | 
			
		||||
    for handler_name in fetch_handlers:
 | 
			
		||||
        package, classname = handler_name.rsplit('.', 1)
 | 
			
		||||
        try:
 | 
			
		||||
            handler_class = getattr(
 | 
			
		||||
                importlib.import_module(package),
 | 
			
		||||
                classname)
 | 
			
		||||
            plugin_list.append(handler_class())
 | 
			
		||||
        except (ImportError, AttributeError):
 | 
			
		||||
            # Skip missing plugins so that they can be ommitted from
 | 
			
		||||
            # installation if desired
 | 
			
		||||
            log("FetchHandler {} not found, skipping plugin".format(
 | 
			
		||||
                handler_name))
 | 
			
		||||
    return plugin_list
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _run_apt_command(cmd, fatal=False):
 | 
			
		||||
    """
 | 
			
		||||
    Run an APT command, checking output and retrying if the fatal flag is set
 | 
			
		||||
    to True.
 | 
			
		||||
 | 
			
		||||
    :param: cmd: str: The apt command to run.
 | 
			
		||||
    :param: fatal: bool: Whether the command's output should be checked and
 | 
			
		||||
        retried.
 | 
			
		||||
    """
 | 
			
		||||
    env = os.environ.copy()
 | 
			
		||||
 | 
			
		||||
    if 'DEBIAN_FRONTEND' not in env:
 | 
			
		||||
        env['DEBIAN_FRONTEND'] = 'noninteractive'
 | 
			
		||||
 | 
			
		||||
    if fatal:
 | 
			
		||||
        retry_count = 0
 | 
			
		||||
        result = None
 | 
			
		||||
 | 
			
		||||
        # If the command is considered "fatal", we need to retry if the apt
 | 
			
		||||
        # lock was not acquired.
 | 
			
		||||
 | 
			
		||||
        while result is None or result == APT_NO_LOCK:
 | 
			
		||||
            try:
 | 
			
		||||
                result = subprocess.check_call(cmd, env=env)
 | 
			
		||||
            except subprocess.CalledProcessError as e:
 | 
			
		||||
                retry_count = retry_count + 1
 | 
			
		||||
                if retry_count > APT_NO_LOCK_RETRY_COUNT:
 | 
			
		||||
                    raise
 | 
			
		||||
                result = e.returncode
 | 
			
		||||
                log("Couldn't acquire DPKG lock. Will retry in {} seconds."
 | 
			
		||||
                    "".format(APT_NO_LOCK_RETRY_DELAY))
 | 
			
		||||
                time.sleep(APT_NO_LOCK_RETRY_DELAY)
 | 
			
		||||
 | 
			
		||||
    else:
 | 
			
		||||
        subprocess.call(cmd, env=env)
 | 
			
		||||
							
								
								
									
										167
									
								
								hooks/charmhelpers/fetch/archiveurl.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										167
									
								
								hooks/charmhelpers/fetch/archiveurl.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,167 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import hashlib
 | 
			
		||||
import re
 | 
			
		||||
 | 
			
		||||
from charmhelpers.fetch import (
 | 
			
		||||
    BaseFetchHandler,
 | 
			
		||||
    UnhandledSource
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.payload.archive import (
 | 
			
		||||
    get_archive_handler,
 | 
			
		||||
    extract,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.core.host import mkdir, check_hash
 | 
			
		||||
 | 
			
		||||
import six
 | 
			
		||||
if six.PY3:
 | 
			
		||||
    from urllib.request import (
 | 
			
		||||
        build_opener, install_opener, urlopen, urlretrieve,
 | 
			
		||||
        HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
 | 
			
		||||
    )
 | 
			
		||||
    from urllib.parse import urlparse, urlunparse, parse_qs
 | 
			
		||||
    from urllib.error import URLError
 | 
			
		||||
else:
 | 
			
		||||
    from urllib import urlretrieve
 | 
			
		||||
    from urllib2 import (
 | 
			
		||||
        build_opener, install_opener, urlopen,
 | 
			
		||||
        HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
 | 
			
		||||
        URLError
 | 
			
		||||
    )
 | 
			
		||||
    from urlparse import urlparse, urlunparse, parse_qs
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def splituser(host):
 | 
			
		||||
    '''urllib.splituser(), but six's support of this seems broken'''
 | 
			
		||||
    _userprog = re.compile('^(.*)@(.*)$')
 | 
			
		||||
    match = _userprog.match(host)
 | 
			
		||||
    if match:
 | 
			
		||||
        return match.group(1, 2)
 | 
			
		||||
    return None, host
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def splitpasswd(user):
 | 
			
		||||
    '''urllib.splitpasswd(), but six's support of this is missing'''
 | 
			
		||||
    _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
 | 
			
		||||
    match = _passwdprog.match(user)
 | 
			
		||||
    if match:
 | 
			
		||||
        return match.group(1, 2)
 | 
			
		||||
    return user, None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ArchiveUrlFetchHandler(BaseFetchHandler):
 | 
			
		||||
    """
 | 
			
		||||
    Handler to download archive files from arbitrary URLs.
 | 
			
		||||
 | 
			
		||||
    Can fetch from http, https, ftp, and file URLs.
 | 
			
		||||
 | 
			
		||||
    Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
 | 
			
		||||
 | 
			
		||||
    Installs the contents of the archive in $CHARM_DIR/fetched/.
 | 
			
		||||
    """
 | 
			
		||||
    def can_handle(self, source):
 | 
			
		||||
        url_parts = self.parse_url(source)
 | 
			
		||||
        if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
 | 
			
		||||
            # XXX: Why is this returning a boolean and a string? It's
 | 
			
		||||
            # doomed to fail since "bool(can_handle('foo://'))"  will be True.
 | 
			
		||||
            return "Wrong source type"
 | 
			
		||||
        if get_archive_handler(self.base_url(source)):
 | 
			
		||||
            return True
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
    def download(self, source, dest):
 | 
			
		||||
        """
 | 
			
		||||
        Download an archive file.
 | 
			
		||||
 | 
			
		||||
        :param str source: URL pointing to an archive file.
 | 
			
		||||
        :param str dest: Local path location to download archive file to.
 | 
			
		||||
        """
 | 
			
		||||
        # propogate all exceptions
 | 
			
		||||
        # URLError, OSError, etc
 | 
			
		||||
        proto, netloc, path, params, query, fragment = urlparse(source)
 | 
			
		||||
        if proto in ('http', 'https'):
 | 
			
		||||
            auth, barehost = splituser(netloc)
 | 
			
		||||
            if auth is not None:
 | 
			
		||||
                source = urlunparse((proto, barehost, path, params, query, fragment))
 | 
			
		||||
                username, password = splitpasswd(auth)
 | 
			
		||||
                passman = HTTPPasswordMgrWithDefaultRealm()
 | 
			
		||||
                # Realm is set to None in add_password to force the username and password
 | 
			
		||||
                # to be used whatever the realm
 | 
			
		||||
                passman.add_password(None, source, username, password)
 | 
			
		||||
                authhandler = HTTPBasicAuthHandler(passman)
 | 
			
		||||
                opener = build_opener(authhandler)
 | 
			
		||||
                install_opener(opener)
 | 
			
		||||
        response = urlopen(source)
 | 
			
		||||
        try:
 | 
			
		||||
            with open(dest, 'w') as dest_file:
 | 
			
		||||
                dest_file.write(response.read())
 | 
			
		||||
        except Exception as e:
 | 
			
		||||
            if os.path.isfile(dest):
 | 
			
		||||
                os.unlink(dest)
 | 
			
		||||
            raise e
 | 
			
		||||
 | 
			
		||||
    # Mandatory file validation via Sha1 or MD5 hashing.
 | 
			
		||||
    def download_and_validate(self, url, hashsum, validate="sha1"):
 | 
			
		||||
        tempfile, headers = urlretrieve(url)
 | 
			
		||||
        check_hash(tempfile, hashsum, validate)
 | 
			
		||||
        return tempfile
 | 
			
		||||
 | 
			
		||||
    def install(self, source, dest=None, checksum=None, hash_type='sha1'):
 | 
			
		||||
        """
 | 
			
		||||
        Download and install an archive file, with optional checksum validation.
 | 
			
		||||
 | 
			
		||||
        The checksum can also be given on the `source` URL's fragment.
 | 
			
		||||
        For example::
 | 
			
		||||
 | 
			
		||||
            handler.install('http://example.com/file.tgz#sha1=deadbeef')
 | 
			
		||||
 | 
			
		||||
        :param str source: URL pointing to an archive file.
 | 
			
		||||
        :param str dest: Local destination path to install to. If not given,
 | 
			
		||||
            installs to `$CHARM_DIR/archives/archive_file_name`.
 | 
			
		||||
        :param str checksum: If given, validate the archive file after download.
 | 
			
		||||
        :param str hash_type: Algorithm used to generate `checksum`.
 | 
			
		||||
            Can be any hash alrgorithm supported by :mod:`hashlib`,
 | 
			
		||||
            such as md5, sha1, sha256, sha512, etc.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        url_parts = self.parse_url(source)
 | 
			
		||||
        dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
 | 
			
		||||
        if not os.path.exists(dest_dir):
 | 
			
		||||
            mkdir(dest_dir, perms=0o755)
 | 
			
		||||
        dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
 | 
			
		||||
        try:
 | 
			
		||||
            self.download(source, dld_file)
 | 
			
		||||
        except URLError as e:
 | 
			
		||||
            raise UnhandledSource(e.reason)
 | 
			
		||||
        except OSError as e:
 | 
			
		||||
            raise UnhandledSource(e.strerror)
 | 
			
		||||
        options = parse_qs(url_parts.fragment)
 | 
			
		||||
        for key, value in options.items():
 | 
			
		||||
            if not six.PY3:
 | 
			
		||||
                algorithms = hashlib.algorithms
 | 
			
		||||
            else:
 | 
			
		||||
                algorithms = hashlib.algorithms_available
 | 
			
		||||
            if key in algorithms:
 | 
			
		||||
                if len(value) != 1:
 | 
			
		||||
                    raise TypeError(
 | 
			
		||||
                        "Expected 1 hash value, not %d" % len(value))
 | 
			
		||||
                expected = value[0]
 | 
			
		||||
                check_hash(dld_file, expected, key)
 | 
			
		||||
        if checksum:
 | 
			
		||||
            check_hash(dld_file, checksum, hash_type)
 | 
			
		||||
        return extract(dld_file, dest)
 | 
			
		||||
							
								
								
									
										78
									
								
								hooks/charmhelpers/fetch/bzrurl.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										78
									
								
								hooks/charmhelpers/fetch/bzrurl.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,78 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
from charmhelpers.fetch import (
 | 
			
		||||
    BaseFetchHandler,
 | 
			
		||||
    UnhandledSource
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.core.host import mkdir
 | 
			
		||||
 | 
			
		||||
import six
 | 
			
		||||
if six.PY3:
 | 
			
		||||
    raise ImportError('bzrlib does not support Python3')
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    from bzrlib.branch import Branch
 | 
			
		||||
    from bzrlib import bzrdir, workingtree, errors
 | 
			
		||||
except ImportError:
 | 
			
		||||
    from charmhelpers.fetch import apt_install
 | 
			
		||||
    apt_install("python-bzrlib")
 | 
			
		||||
    from bzrlib.branch import Branch
 | 
			
		||||
    from bzrlib import bzrdir, workingtree, errors
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BzrUrlFetchHandler(BaseFetchHandler):
 | 
			
		||||
    """Handler for bazaar branches via generic and lp URLs"""
 | 
			
		||||
    def can_handle(self, source):
 | 
			
		||||
        url_parts = self.parse_url(source)
 | 
			
		||||
        if url_parts.scheme not in ('bzr+ssh', 'lp'):
 | 
			
		||||
            return False
 | 
			
		||||
        else:
 | 
			
		||||
            return True
 | 
			
		||||
 | 
			
		||||
    def branch(self, source, dest):
 | 
			
		||||
        url_parts = self.parse_url(source)
 | 
			
		||||
        # If we use lp:branchname scheme we need to load plugins
 | 
			
		||||
        if not self.can_handle(source):
 | 
			
		||||
            raise UnhandledSource("Cannot handle {}".format(source))
 | 
			
		||||
        if url_parts.scheme == "lp":
 | 
			
		||||
            from bzrlib.plugin import load_plugins
 | 
			
		||||
            load_plugins()
 | 
			
		||||
        try:
 | 
			
		||||
            local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
 | 
			
		||||
        except errors.AlreadyControlDirError:
 | 
			
		||||
            local_branch = Branch.open(dest)
 | 
			
		||||
        try:
 | 
			
		||||
            remote_branch = Branch.open(source)
 | 
			
		||||
            remote_branch.push(local_branch)
 | 
			
		||||
            tree = workingtree.WorkingTree.open(dest)
 | 
			
		||||
            tree.update()
 | 
			
		||||
        except Exception as e:
 | 
			
		||||
            raise e
 | 
			
		||||
 | 
			
		||||
    def install(self, source):
 | 
			
		||||
        url_parts = self.parse_url(source)
 | 
			
		||||
        branch_name = url_parts.path.strip("/").split("/")[-1]
 | 
			
		||||
        dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
 | 
			
		||||
                                branch_name)
 | 
			
		||||
        if not os.path.exists(dest_dir):
 | 
			
		||||
            mkdir(dest_dir, perms=0o755)
 | 
			
		||||
        try:
 | 
			
		||||
            self.branch(source, dest_dir)
 | 
			
		||||
        except OSError as e:
 | 
			
		||||
            raise UnhandledSource(e.strerror)
 | 
			
		||||
        return dest_dir
 | 
			
		||||
							
								
								
									
										73
									
								
								hooks/charmhelpers/fetch/giturl.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										73
									
								
								hooks/charmhelpers/fetch/giturl.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,73 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
from charmhelpers.fetch import (
 | 
			
		||||
    BaseFetchHandler,
 | 
			
		||||
    UnhandledSource
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.core.host import mkdir
 | 
			
		||||
 | 
			
		||||
import six
 | 
			
		||||
if six.PY3:
 | 
			
		||||
    raise ImportError('GitPython does not support Python 3')
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    from git import Repo
 | 
			
		||||
except ImportError:
 | 
			
		||||
    from charmhelpers.fetch import apt_install
 | 
			
		||||
    apt_install("python-git")
 | 
			
		||||
    from git import Repo
 | 
			
		||||
 | 
			
		||||
from git.exc import GitCommandError  # noqa E402
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GitUrlFetchHandler(BaseFetchHandler):
 | 
			
		||||
    """Handler for git branches via generic and github URLs"""
 | 
			
		||||
    def can_handle(self, source):
 | 
			
		||||
        url_parts = self.parse_url(source)
 | 
			
		||||
        # TODO (mattyw) no support for ssh git@ yet
 | 
			
		||||
        if url_parts.scheme not in ('http', 'https', 'git'):
 | 
			
		||||
            return False
 | 
			
		||||
        else:
 | 
			
		||||
            return True
 | 
			
		||||
 | 
			
		||||
    def clone(self, source, dest, branch, depth=None):
 | 
			
		||||
        if not self.can_handle(source):
 | 
			
		||||
            raise UnhandledSource("Cannot handle {}".format(source))
 | 
			
		||||
 | 
			
		||||
        if depth:
 | 
			
		||||
            Repo.clone_from(source, dest, branch=branch, depth=depth)
 | 
			
		||||
        else:
 | 
			
		||||
            Repo.clone_from(source, dest, branch=branch)
 | 
			
		||||
 | 
			
		||||
    def install(self, source, branch="master", dest=None, depth=None):
 | 
			
		||||
        url_parts = self.parse_url(source)
 | 
			
		||||
        branch_name = url_parts.path.strip("/").split("/")[-1]
 | 
			
		||||
        if dest:
 | 
			
		||||
            dest_dir = os.path.join(dest, branch_name)
 | 
			
		||||
        else:
 | 
			
		||||
            dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
 | 
			
		||||
                                    branch_name)
 | 
			
		||||
        if not os.path.exists(dest_dir):
 | 
			
		||||
            mkdir(dest_dir, perms=0o755)
 | 
			
		||||
        try:
 | 
			
		||||
            self.clone(source, dest_dir, branch, depth)
 | 
			
		||||
        except GitCommandError as e:
 | 
			
		||||
            raise UnhandledSource(e)
 | 
			
		||||
        except OSError as e:
 | 
			
		||||
            raise UnhandledSource(e.strerror)
 | 
			
		||||
        return dest_dir
 | 
			
		||||
							
								
								
									
										17
									
								
								hooks/charmhelpers/payload/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								hooks/charmhelpers/payload/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,17 @@
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
"Tools for working with files injected into a charm just before deployment."
 | 
			
		||||
							
								
								
									
										66
									
								
								hooks/charmhelpers/payload/execd.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										66
									
								
								hooks/charmhelpers/payload/execd.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,66 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
 | 
			
		||||
# Copyright 2014-2015 Canonical Limited.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of charm-helpers.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is free software: you can redistribute it and/or modify
 | 
			
		||||
# it under the terms of the GNU Lesser General Public License version 3 as
 | 
			
		||||
# published by the Free Software Foundation.
 | 
			
		||||
#
 | 
			
		||||
# charm-helpers is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU Lesser General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import sys
 | 
			
		||||
import subprocess
 | 
			
		||||
from charmhelpers.core import hookenv
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def default_execd_dir():
 | 
			
		||||
    return os.path.join(os.environ['CHARM_DIR'], 'exec.d')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def execd_module_paths(execd_dir=None):
 | 
			
		||||
    """Generate a list of full paths to modules within execd_dir."""
 | 
			
		||||
    if not execd_dir:
 | 
			
		||||
        execd_dir = default_execd_dir()
 | 
			
		||||
 | 
			
		||||
    if not os.path.exists(execd_dir):
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    for subpath in os.listdir(execd_dir):
 | 
			
		||||
        module = os.path.join(execd_dir, subpath)
 | 
			
		||||
        if os.path.isdir(module):
 | 
			
		||||
            yield module
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def execd_submodule_paths(command, execd_dir=None):
 | 
			
		||||
    """Generate a list of full paths to the specified command within exec_dir.
 | 
			
		||||
    """
 | 
			
		||||
    for module_path in execd_module_paths(execd_dir):
 | 
			
		||||
        path = os.path.join(module_path, command)
 | 
			
		||||
        if os.access(path, os.X_OK) and os.path.isfile(path):
 | 
			
		||||
            yield path
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def execd_run(command, execd_dir=None, die_on_error=False, stderr=None):
 | 
			
		||||
    """Run command for each module within execd_dir which defines it."""
 | 
			
		||||
    for submodule_path in execd_submodule_paths(command, execd_dir):
 | 
			
		||||
        try:
 | 
			
		||||
            subprocess.check_call(submodule_path, shell=True, stderr=stderr)
 | 
			
		||||
        except subprocess.CalledProcessError as e:
 | 
			
		||||
            hookenv.log("Error ({}) running  {}. Output: {}".format(
 | 
			
		||||
                e.returncode, e.cmd, e.output))
 | 
			
		||||
            if die_on_error:
 | 
			
		||||
                sys.exit(e.returncode)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def execd_preinstall(execd_dir=None):
 | 
			
		||||
    """Run charm-pre-install for each module within execd_dir."""
 | 
			
		||||
    execd_run('charm-pre-install', execd_dir=execd_dir)
 | 
			
		||||
							
								
								
									
										38
									
								
								hooks/cinder_backup_contexts.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								hooks/cinder_backup_contexts.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,38 @@
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    service_name,
 | 
			
		||||
    is_relation_made,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.contrib.openstack.context import (
 | 
			
		||||
    OSContextGenerator,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.contrib.openstack.utils import get_os_codename_package
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class CephBackupSubordinateContext(OSContextGenerator):
 | 
			
		||||
    interfaces = ['ceph-cinder']
 | 
			
		||||
 | 
			
		||||
    def __call__(self):
 | 
			
		||||
        """Used to generate template context to be added to cinder.conf in the
 | 
			
		||||
        presence of a ceph relation.
 | 
			
		||||
        """
 | 
			
		||||
        if not is_relation_made('ceph', 'key'):
 | 
			
		||||
            return {}
 | 
			
		||||
 | 
			
		||||
        if get_os_codename_package('cinder-common') < "icehouse":
 | 
			
		||||
            raise Exception("Unsupported version of Openstack")
 | 
			
		||||
 | 
			
		||||
        service = service_name()
 | 
			
		||||
        backup_driver = 'cinder.backup.drivers.ceph'
 | 
			
		||||
        return {
 | 
			
		||||
            "cinder": {
 | 
			
		||||
                "/etc/cinder/cinder.conf": {
 | 
			
		||||
                    "sections": {
 | 
			
		||||
                        'DEFAULT': [
 | 
			
		||||
                            ('backup_driver', backup_driver),
 | 
			
		||||
                            ('backup_ceph_pool', service),
 | 
			
		||||
                            ('backup_ceph_user', service),
 | 
			
		||||
                        ]
 | 
			
		||||
                    }
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
							
								
								
									
										141
									
								
								hooks/cinder_backup_hooks.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										141
									
								
								hooks/cinder_backup_hooks.py
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,141 @@
 | 
			
		||||
#!/usr/bin/python
 | 
			
		||||
import os
 | 
			
		||||
import sys
 | 
			
		||||
import json
 | 
			
		||||
 | 
			
		||||
from cinder_backup_utils import (
 | 
			
		||||
    register_configs,
 | 
			
		||||
    restart_map,
 | 
			
		||||
    set_ceph_env_variables,
 | 
			
		||||
    PACKAGES
 | 
			
		||||
)
 | 
			
		||||
from cinder_backup_contexts import (
 | 
			
		||||
    CephBackupSubordinateContext
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    config,
 | 
			
		||||
    Hooks,
 | 
			
		||||
    UnregisteredHookError,
 | 
			
		||||
    service_name,
 | 
			
		||||
    relation_set,
 | 
			
		||||
    relation_ids,
 | 
			
		||||
    log,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.fetch import apt_install, apt_update
 | 
			
		||||
from charmhelpers.core.host import (
 | 
			
		||||
    restart_on_change,
 | 
			
		||||
    service_restart,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.contrib.storage.linux.ceph import (
 | 
			
		||||
    delete_keyring,
 | 
			
		||||
    ensure_ceph_keyring,
 | 
			
		||||
    is_request_complete,
 | 
			
		||||
    CephBrokerRq,
 | 
			
		||||
    send_request_if_needed,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.payload.execd import execd_preinstall
 | 
			
		||||
 | 
			
		||||
hooks = Hooks()
 | 
			
		||||
CONFIGS = register_configs()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@hooks.hook('install')
 | 
			
		||||
def install():
 | 
			
		||||
    execd_preinstall()
 | 
			
		||||
    apt_update(fatal=True)
 | 
			
		||||
    apt_install(PACKAGES, fatal=True)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@hooks.hook('ceph-relation-joined')
 | 
			
		||||
def ceph_joined():
 | 
			
		||||
    if not os.path.isdir('/etc/ceph'):
 | 
			
		||||
        os.mkdir('/etc/ceph')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_ceph_request():
 | 
			
		||||
    service = service_name()
 | 
			
		||||
    rq = CephBrokerRq()
 | 
			
		||||
    replicas = config('ceph-osd-replication-count')
 | 
			
		||||
    rq.add_op_create_pool(name=service, replica_count=replicas)
 | 
			
		||||
    return rq
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@hooks.hook('ceph-relation-changed')
 | 
			
		||||
@restart_on_change(restart_map())
 | 
			
		||||
def ceph_changed():
 | 
			
		||||
    if 'ceph' not in CONFIGS.complete_contexts():
 | 
			
		||||
        log('ceph relation incomplete. Peer not ready?')
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    service = service_name()
 | 
			
		||||
    if not ensure_ceph_keyring(service=service,
 | 
			
		||||
                               user='cinder', group='cinder'):
 | 
			
		||||
        log('Could not create ceph keyring: peer not ready?')
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    if is_request_complete(get_ceph_request()):
 | 
			
		||||
        log('Request complete')
 | 
			
		||||
        CONFIGS.write_all()
 | 
			
		||||
        set_ceph_env_variables(service=service)
 | 
			
		||||
        for rid in relation_ids('backup-backend'):
 | 
			
		||||
            backup_backend(rid)
 | 
			
		||||
 | 
			
		||||
        # Ensure that cinder services are restarted since only now can we
 | 
			
		||||
        # guarantee that ceph resources are ready. Note that the order of
 | 
			
		||||
        # restart is important here.
 | 
			
		||||
        for svc in ['cinder-volume', 'cinder-backup']:
 | 
			
		||||
            service_restart(svc)
 | 
			
		||||
 | 
			
		||||
    else:
 | 
			
		||||
        send_request_if_needed(get_ceph_request())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@hooks.hook('ceph-relation-broken')
 | 
			
		||||
def ceph_broken():
 | 
			
		||||
    service = service_name()
 | 
			
		||||
    delete_keyring(service=service)
 | 
			
		||||
    CONFIGS.write_all()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@hooks.hook('config-changed')
 | 
			
		||||
@restart_on_change(restart_map())
 | 
			
		||||
def write_and_restart():
 | 
			
		||||
    CONFIGS.write_all()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@hooks.hook('backup-backend-relation-joined')
 | 
			
		||||
def backup_backend(rel_id=None):
 | 
			
		||||
    if 'ceph' not in CONFIGS.complete_contexts():
 | 
			
		||||
        log('ceph relation incomplete. Peer not ready?')
 | 
			
		||||
    else:
 | 
			
		||||
        ctxt = CephBackupSubordinateContext()()
 | 
			
		||||
        relation_set(
 | 
			
		||||
            relation_id=rel_id,
 | 
			
		||||
            backend_name=service_name(),
 | 
			
		||||
            subordinate_configuration=json.dumps(ctxt)
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@hooks.hook('backup-backend-relation-changed')
 | 
			
		||||
def backup_backend_changed():
 | 
			
		||||
    # NOTE(jamespage) recall backup_backend as this only ever
 | 
			
		||||
    # changes post initial creation if the cinder charm is upgraded to a new
 | 
			
		||||
    # version of openstack.
 | 
			
		||||
    backup_backend()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@hooks.hook('upgrade-charm')
 | 
			
		||||
@restart_on_change(restart_map())
 | 
			
		||||
def upgrade_charm():
 | 
			
		||||
    if 'ceph' in CONFIGS.complete_contexts():
 | 
			
		||||
        CONFIGS.write_all()
 | 
			
		||||
        set_ceph_env_variables(service=service_name())
 | 
			
		||||
        for rid in relation_ids('backup-backend'):
 | 
			
		||||
            backup_backend(rid)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    try:
 | 
			
		||||
        hooks.execute(sys.argv)
 | 
			
		||||
    except UnregisteredHookError as e:
 | 
			
		||||
        log('Unknown hook {} - skipping.'.format(e))
 | 
			
		||||
							
								
								
									
										101
									
								
								hooks/cinder_backup_utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										101
									
								
								hooks/cinder_backup_utils.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,101 @@
 | 
			
		||||
import os
 | 
			
		||||
from collections import OrderedDict
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    relation_ids,
 | 
			
		||||
    service_name,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.contrib.openstack import (
 | 
			
		||||
    templating,
 | 
			
		||||
    context,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.contrib.openstack.utils import (
 | 
			
		||||
    get_os_codename_package,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.contrib.openstack.alternatives import install_alternative
 | 
			
		||||
from charmhelpers.core.host import mkdir
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
PACKAGES = [
 | 
			
		||||
    'ceph-common',
 | 
			
		||||
    'cinder-backup',
 | 
			
		||||
]
 | 
			
		||||
CHARM_CEPH_CONF = '/var/lib/charm/{}/ceph.conf'
 | 
			
		||||
CEPH_CONF = '/etc/ceph/ceph.conf'
 | 
			
		||||
TEMPLATES = 'templates/'
 | 
			
		||||
 | 
			
		||||
# Map config files to hook contexts and services that will be associated
 | 
			
		||||
# with file in restart_on_changes()'s service map.
 | 
			
		||||
CONFIG_FILES = {}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def ceph_config_file():
 | 
			
		||||
    return CHARM_CEPH_CONF.format(service_name())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def register_configs():
 | 
			
		||||
    """Register config files with their respective contexts.
 | 
			
		||||
 | 
			
		||||
    Registration of some configs may not be required depending on
 | 
			
		||||
    existing of certain relations.
 | 
			
		||||
    """
 | 
			
		||||
    # if called without anything installed (eg during install hook)
 | 
			
		||||
    # just default to earliest supported release. configs dont get touched
 | 
			
		||||
    # till post-install, anyway.
 | 
			
		||||
    release = get_os_codename_package('cinder-common', fatal=False) or 'folsom'
 | 
			
		||||
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
 | 
			
		||||
                                          openstack_release=release)
 | 
			
		||||
 | 
			
		||||
    confs = []
 | 
			
		||||
 | 
			
		||||
    if relation_ids('ceph'):
 | 
			
		||||
        # Add charm ceph configuration to resources and
 | 
			
		||||
        # ensure directory actually exists
 | 
			
		||||
        mkdir(os.path.dirname(ceph_config_file()))
 | 
			
		||||
        mkdir(os.path.dirname(CEPH_CONF))
 | 
			
		||||
        # Install ceph config as an alternative for co-location with
 | 
			
		||||
        # ceph and ceph-osd charms - nova-compute ceph.conf will be
 | 
			
		||||
        # lower priority that both of these but thats OK
 | 
			
		||||
        if not os.path.exists(ceph_config_file()):
 | 
			
		||||
            # touch file for pre-templated generation
 | 
			
		||||
            open(ceph_config_file(), 'w').close()
 | 
			
		||||
        install_alternative(os.path.basename(CEPH_CONF),
 | 
			
		||||
                            CEPH_CONF, ceph_config_file())
 | 
			
		||||
        CONFIG_FILES[ceph_config_file()] = {
 | 
			
		||||
            'hook_contexts': [context.CephContext()],
 | 
			
		||||
            'services': ['cinder-backup'],
 | 
			
		||||
        }
 | 
			
		||||
        confs.append(ceph_config_file())
 | 
			
		||||
 | 
			
		||||
    for conf in confs:
 | 
			
		||||
        configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])
 | 
			
		||||
 | 
			
		||||
    return configs
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def restart_map():
 | 
			
		||||
    """Determine the correct resource map to be passed to
 | 
			
		||||
    charmhelpers.core.restart_on_change() based on the services configured.
 | 
			
		||||
 | 
			
		||||
    :returns: dict: A dictionary mapping config file to lists of services
 | 
			
		||||
                    that should be restarted when file changes.
 | 
			
		||||
    """
 | 
			
		||||
    _map = []
 | 
			
		||||
    for f, ctxt in CONFIG_FILES.iteritems():
 | 
			
		||||
        svcs = []
 | 
			
		||||
        for svc in ctxt['services']:
 | 
			
		||||
            svcs.append(svc)
 | 
			
		||||
        if svcs:
 | 
			
		||||
            _map.append((f, svcs))
 | 
			
		||||
    return OrderedDict(_map)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def set_ceph_env_variables(service):
 | 
			
		||||
    # XXX: Horrid kludge to make cinder-backup use
 | 
			
		||||
    # a different ceph username than admin
 | 
			
		||||
    env = open('/etc/environment', 'r').read()
 | 
			
		||||
    if 'CEPH_ARGS' not in env:
 | 
			
		||||
        with open('/etc/environment', 'a') as out:
 | 
			
		||||
            out.write('CEPH_ARGS="--id %s"\n' % service)
 | 
			
		||||
    with open('/etc/init/cinder-backup.override', 'w') as out:
 | 
			
		||||
            out.write('env CEPH_ARGS="--id %s"\n' % service)
 | 
			
		||||
							
								
								
									
										1
									
								
								hooks/config-changed
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								hooks/config-changed
									
									
									
									
									
										Symbolic link
									
								
							@@ -0,0 +1 @@
 | 
			
		||||
cinder_backup_hooks.py
 | 
			
		||||
							
								
								
									
										1
									
								
								hooks/install
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								hooks/install
									
									
									
									
									
										Symbolic link
									
								
							@@ -0,0 +1 @@
 | 
			
		||||
cinder_backup_hooks.py
 | 
			
		||||
							
								
								
									
										1
									
								
								hooks/start
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								hooks/start
									
									
									
									
									
										Symbolic link
									
								
							@@ -0,0 +1 @@
 | 
			
		||||
cinder_backup_hooks.py
 | 
			
		||||
							
								
								
									
										1
									
								
								hooks/stop
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								hooks/stop
									
									
									
									
									
										Symbolic link
									
								
							@@ -0,0 +1 @@
 | 
			
		||||
cinder_backup_hooks.py
 | 
			
		||||
							
								
								
									
										1
									
								
								hooks/upgrade-charm
									
									
									
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								hooks/upgrade-charm
									
									
									
									
									
										Symbolic link
									
								
							@@ -0,0 +1 @@
 | 
			
		||||
cinder_backup_hooks.py
 | 
			
		||||
							
								
								
									
										636
									
								
								icon.svg
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										636
									
								
								icon.svg
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,636 @@
 | 
			
		||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
 | 
			
		||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
 | 
			
		||||
 | 
			
		||||
<svg
 | 
			
		||||
   xmlns:dc="http://purl.org/dc/elements/1.1/"
 | 
			
		||||
   xmlns:cc="http://creativecommons.org/ns#"
 | 
			
		||||
   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
 | 
			
		||||
   xmlns:svg="http://www.w3.org/2000/svg"
 | 
			
		||||
   xmlns="http://www.w3.org/2000/svg"
 | 
			
		||||
   xmlns:xlink="http://www.w3.org/1999/xlink"
 | 
			
		||||
   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
 | 
			
		||||
   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
 | 
			
		||||
   sodipodi:docname="openstack-cinder.svg"
 | 
			
		||||
   inkscape:version="0.48+devel r12591"
 | 
			
		||||
   version="1.1"
 | 
			
		||||
   id="svg6517"
 | 
			
		||||
   height="96"
 | 
			
		||||
   width="96">
 | 
			
		||||
  <sodipodi:namedview
 | 
			
		||||
     id="base"
 | 
			
		||||
     pagecolor="#ffffff"
 | 
			
		||||
     bordercolor="#666666"
 | 
			
		||||
     borderopacity="1.0"
 | 
			
		||||
     inkscape:pageopacity="0.0"
 | 
			
		||||
     inkscape:pageshadow="2"
 | 
			
		||||
     inkscape:zoom="2.0861625"
 | 
			
		||||
     inkscape:cx="100.56201"
 | 
			
		||||
     inkscape:cy="47.468164"
 | 
			
		||||
     inkscape:document-units="px"
 | 
			
		||||
     inkscape:current-layer="layer1"
 | 
			
		||||
     showgrid="false"
 | 
			
		||||
     fit-margin-top="0"
 | 
			
		||||
     fit-margin-left="0"
 | 
			
		||||
     fit-margin-right="0"
 | 
			
		||||
     fit-margin-bottom="0"
 | 
			
		||||
     inkscape:window-width="1920"
 | 
			
		||||
     inkscape:window-height="1029"
 | 
			
		||||
     inkscape:window-x="0"
 | 
			
		||||
     inkscape:window-y="24"
 | 
			
		||||
     inkscape:window-maximized="1"
 | 
			
		||||
     showborder="true"
 | 
			
		||||
     showguides="false"
 | 
			
		||||
     inkscape:guide-bbox="true"
 | 
			
		||||
     inkscape:showpageshadow="false"
 | 
			
		||||
     inkscape:snap-global="true"
 | 
			
		||||
     inkscape:snap-bbox="true"
 | 
			
		||||
     inkscape:bbox-paths="true"
 | 
			
		||||
     inkscape:bbox-nodes="true"
 | 
			
		||||
     inkscape:snap-bbox-edge-midpoints="true"
 | 
			
		||||
     inkscape:snap-bbox-midpoints="true"
 | 
			
		||||
     inkscape:object-paths="true"
 | 
			
		||||
     inkscape:snap-intersection-paths="true"
 | 
			
		||||
     inkscape:object-nodes="true"
 | 
			
		||||
     inkscape:snap-smooth-nodes="true"
 | 
			
		||||
     inkscape:snap-midpoints="true"
 | 
			
		||||
     inkscape:snap-object-midpoints="true"
 | 
			
		||||
     inkscape:snap-center="true"
 | 
			
		||||
     inkscape:snap-grids="false"
 | 
			
		||||
     inkscape:snap-nodes="true"
 | 
			
		||||
     inkscape:snap-others="false">
 | 
			
		||||
    <inkscape:grid
 | 
			
		||||
       id="grid821"
 | 
			
		||||
       type="xygrid" />
 | 
			
		||||
    <sodipodi:guide
 | 
			
		||||
       id="guide823"
 | 
			
		||||
       position="18.34962,45.78585"
 | 
			
		||||
       orientation="1,0" />
 | 
			
		||||
    <sodipodi:guide
 | 
			
		||||
       id="guide827"
 | 
			
		||||
       position="78.02001,46.32673"
 | 
			
		||||
       orientation="1,0" />
 | 
			
		||||
    <sodipodi:guide
 | 
			
		||||
       inkscape:label=""
 | 
			
		||||
       id="guide4184"
 | 
			
		||||
       position="65.586619,19.307"
 | 
			
		||||
       orientation="-0.087155743,0.9961947" />
 | 
			
		||||
    <sodipodi:guide
 | 
			
		||||
       inkscape:label=""
 | 
			
		||||
       id="guide4188"
 | 
			
		||||
       position="62.756032,71.583147"
 | 
			
		||||
       orientation="-0.087155743,0.9961947" />
 | 
			
		||||
    <sodipodi:guide
 | 
			
		||||
       inkscape:label=""
 | 
			
		||||
       id="guide4190"
 | 
			
		||||
       position="47.812194,78.049658"
 | 
			
		||||
       orientation="-0.087155743,0.9961947" />
 | 
			
		||||
    <sodipodi:guide
 | 
			
		||||
       id="guide4194"
 | 
			
		||||
       position="25.60516,42.21665"
 | 
			
		||||
       orientation="1,0" />
 | 
			
		||||
    <sodipodi:guide
 | 
			
		||||
       inkscape:label=""
 | 
			
		||||
       id="guide4202"
 | 
			
		||||
       position="25.60516,42.070975"
 | 
			
		||||
       orientation="-0.087155743,0.9961947" />
 | 
			
		||||
    <sodipodi:guide
 | 
			
		||||
       inkscape:label=""
 | 
			
		||||
       id="guide4204"
 | 
			
		||||
       position="25.60516,42.070975"
 | 
			
		||||
       orientation="-0.70710678,-0.70710678" />
 | 
			
		||||
    <sodipodi:guide
 | 
			
		||||
       inkscape:label=""
 | 
			
		||||
       id="guide4242"
 | 
			
		||||
       position="51.81985,44.36226"
 | 
			
		||||
       orientation="-0.70710678,-0.70710678" />
 | 
			
		||||
    <sodipodi:guide
 | 
			
		||||
       inkscape:label=""
 | 
			
		||||
       id="guide4252"
 | 
			
		||||
       position="73.5625,75.210937"
 | 
			
		||||
       orientation="-0.70710678,-0.70710678" />
 | 
			
		||||
    <sodipodi:guide
 | 
			
		||||
       inkscape:label=""
 | 
			
		||||
       inkscape:color="rgb(140,140,240)"
 | 
			
		||||
       id="guide4254"
 | 
			
		||||
       position="18.34962,75.472017"
 | 
			
		||||
       orientation="-0.70710678,-0.70710678" />
 | 
			
		||||
    <sodipodi:guide
 | 
			
		||||
       inkscape:label=""
 | 
			
		||||
       id="guide4288"
 | 
			
		||||
       position="21.871042,21.577512"
 | 
			
		||||
       orientation="-0.70710678,-0.70710678" />
 | 
			
		||||
  </sodipodi:namedview>
 | 
			
		||||
  <defs
 | 
			
		||||
     id="defs6519">
 | 
			
		||||
    <filter
 | 
			
		||||
       id="filter1121"
 | 
			
		||||
       inkscape:label="Inner Shadow"
 | 
			
		||||
       style="color-interpolation-filters:sRGB;">
 | 
			
		||||
      <feFlood
 | 
			
		||||
         id="feFlood1123"
 | 
			
		||||
         result="flood"
 | 
			
		||||
         flood-color="rgb(0,0,0)"
 | 
			
		||||
         flood-opacity="0.59999999999999998" />
 | 
			
		||||
      <feComposite
 | 
			
		||||
         id="feComposite1125"
 | 
			
		||||
         result="composite1"
 | 
			
		||||
         operator="out"
 | 
			
		||||
         in2="SourceGraphic"
 | 
			
		||||
         in="flood" />
 | 
			
		||||
      <feGaussianBlur
 | 
			
		||||
         id="feGaussianBlur1127"
 | 
			
		||||
         result="blur"
 | 
			
		||||
         stdDeviation="1"
 | 
			
		||||
         in="composite1" />
 | 
			
		||||
      <feOffset
 | 
			
		||||
         id="feOffset1129"
 | 
			
		||||
         result="offset"
 | 
			
		||||
         dy="2"
 | 
			
		||||
         dx="0" />
 | 
			
		||||
      <feComposite
 | 
			
		||||
         id="feComposite1131"
 | 
			
		||||
         result="composite2"
 | 
			
		||||
         operator="atop"
 | 
			
		||||
         in2="SourceGraphic"
 | 
			
		||||
         in="offset" />
 | 
			
		||||
    </filter>
 | 
			
		||||
    <filter
 | 
			
		||||
       id="filter950"
 | 
			
		||||
       inkscape:label="Drop Shadow"
 | 
			
		||||
       style="color-interpolation-filters:sRGB;">
 | 
			
		||||
      <feFlood
 | 
			
		||||
         id="feFlood952"
 | 
			
		||||
         result="flood"
 | 
			
		||||
         flood-color="rgb(0,0,0)"
 | 
			
		||||
         flood-opacity="0.25" />
 | 
			
		||||
      <feComposite
 | 
			
		||||
         id="feComposite954"
 | 
			
		||||
         result="composite1"
 | 
			
		||||
         operator="in"
 | 
			
		||||
         in2="SourceGraphic"
 | 
			
		||||
         in="flood" />
 | 
			
		||||
      <feGaussianBlur
 | 
			
		||||
         id="feGaussianBlur956"
 | 
			
		||||
         result="blur"
 | 
			
		||||
         stdDeviation="1"
 | 
			
		||||
         in="composite1" />
 | 
			
		||||
      <feOffset
 | 
			
		||||
         id="feOffset958"
 | 
			
		||||
         result="offset"
 | 
			
		||||
         dy="1"
 | 
			
		||||
         dx="0" />
 | 
			
		||||
      <feComposite
 | 
			
		||||
         id="feComposite960"
 | 
			
		||||
         result="composite2"
 | 
			
		||||
         operator="over"
 | 
			
		||||
         in2="offset"
 | 
			
		||||
         in="SourceGraphic" />
 | 
			
		||||
    </filter>
 | 
			
		||||
    <filter
 | 
			
		||||
       inkscape:label="Badge Shadow"
 | 
			
		||||
       id="filter891"
 | 
			
		||||
       inkscape:collect="always">
 | 
			
		||||
      <feGaussianBlur
 | 
			
		||||
         id="feGaussianBlur893"
 | 
			
		||||
         stdDeviation="0.71999962"
 | 
			
		||||
         inkscape:collect="always" />
 | 
			
		||||
    </filter>
 | 
			
		||||
    <filter
 | 
			
		||||
       inkscape:collect="always"
 | 
			
		||||
       id="filter3831">
 | 
			
		||||
      <feGaussianBlur
 | 
			
		||||
         inkscape:collect="always"
 | 
			
		||||
         stdDeviation="0.86309522"
 | 
			
		||||
         id="feGaussianBlur3833" />
 | 
			
		||||
    </filter>
 | 
			
		||||
    <filter
 | 
			
		||||
       inkscape:collect="always"
 | 
			
		||||
       id="filter3868"
 | 
			
		||||
       x="-0.17186206"
 | 
			
		||||
       width="1.3437241"
 | 
			
		||||
       y="-0.1643077"
 | 
			
		||||
       height="1.3286154">
 | 
			
		||||
      <feGaussianBlur
 | 
			
		||||
         inkscape:collect="always"
 | 
			
		||||
         stdDeviation="0.62628186"
 | 
			
		||||
         id="feGaussianBlur3870" />
 | 
			
		||||
    </filter>
 | 
			
		||||
    <linearGradient
 | 
			
		||||
       id="linearGradient4328"
 | 
			
		||||
       inkscape:collect="always">
 | 
			
		||||
      <stop
 | 
			
		||||
         id="stop4330"
 | 
			
		||||
         offset="0"
 | 
			
		||||
         style="stop-color:#871f1c;stop-opacity:1;" />
 | 
			
		||||
      <stop
 | 
			
		||||
         id="stop4332"
 | 
			
		||||
         offset="1"
 | 
			
		||||
         style="stop-color:#651715;stop-opacity:1" />
 | 
			
		||||
    </linearGradient>
 | 
			
		||||
    <linearGradient
 | 
			
		||||
       id="linearGradient902"
 | 
			
		||||
       inkscape:collect="always">
 | 
			
		||||
      <stop
 | 
			
		||||
         id="stop904"
 | 
			
		||||
         offset="0"
 | 
			
		||||
         style="stop-color:#cccccc;stop-opacity:1" />
 | 
			
		||||
      <stop
 | 
			
		||||
         id="stop906"
 | 
			
		||||
         offset="1"
 | 
			
		||||
         style="stop-color:#e6e6e6;stop-opacity:1" />
 | 
			
		||||
    </linearGradient>
 | 
			
		||||
    <linearGradient
 | 
			
		||||
       id="Background">
 | 
			
		||||
      <stop
 | 
			
		||||
         style="stop-color:#22779e;stop-opacity:1"
 | 
			
		||||
         offset="0"
 | 
			
		||||
         id="stop4178" />
 | 
			
		||||
      <stop
 | 
			
		||||
         style="stop-color:#2991c0;stop-opacity:1"
 | 
			
		||||
         offset="1"
 | 
			
		||||
         id="stop4180" />
 | 
			
		||||
    </linearGradient>
 | 
			
		||||
    <clipPath
 | 
			
		||||
       id="clipPath873"
 | 
			
		||||
       clipPathUnits="userSpaceOnUse">
 | 
			
		||||
      <g
 | 
			
		||||
         style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
 | 
			
		||||
         inkscape:label="Layer 1"
 | 
			
		||||
         id="g875"
 | 
			
		||||
         transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)">
 | 
			
		||||
        <path
 | 
			
		||||
           sodipodi:nodetypes="sssssssss"
 | 
			
		||||
           inkscape:connector-curvature="0"
 | 
			
		||||
           id="path877"
 | 
			
		||||
           d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
 | 
			
		||||
           style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline" />
 | 
			
		||||
      </g>
 | 
			
		||||
    </clipPath>
 | 
			
		||||
    <style
 | 
			
		||||
       type="text/css"
 | 
			
		||||
       id="style867">
 | 
			
		||||
    .fil0 {fill:#1F1A17}
 | 
			
		||||
   </style>
 | 
			
		||||
    <linearGradient
 | 
			
		||||
       gradientUnits="userSpaceOnUse"
 | 
			
		||||
       y2="635.29077"
 | 
			
		||||
       x2="-220"
 | 
			
		||||
       y1="731.29077"
 | 
			
		||||
       x1="-220"
 | 
			
		||||
       id="linearGradient908"
 | 
			
		||||
       xlink:href="#linearGradient902"
 | 
			
		||||
       inkscape:collect="always" />
 | 
			
		||||
    <clipPath
 | 
			
		||||
       id="clipPath16">
 | 
			
		||||
      <path
 | 
			
		||||
         d="m -9,-9 614,0 0,231 -614,0 0,-231 z"
 | 
			
		||||
         id="path18" />
 | 
			
		||||
    </clipPath>
 | 
			
		||||
    <clipPath
 | 
			
		||||
       id="clipPath116">
 | 
			
		||||
      <path
 | 
			
		||||
         d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129"
 | 
			
		||||
         id="path118" />
 | 
			
		||||
    </clipPath>
 | 
			
		||||
    <clipPath
 | 
			
		||||
       id="clipPath128">
 | 
			
		||||
      <path
 | 
			
		||||
         d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129"
 | 
			
		||||
         id="path130" />
 | 
			
		||||
    </clipPath>
 | 
			
		||||
    <linearGradient
 | 
			
		||||
       inkscape:collect="always"
 | 
			
		||||
       id="linearGradient3850">
 | 
			
		||||
      <stop
 | 
			
		||||
         style="stop-color:#000000;stop-opacity:1;"
 | 
			
		||||
         offset="0"
 | 
			
		||||
         id="stop3852" />
 | 
			
		||||
      <stop
 | 
			
		||||
         style="stop-color:#000000;stop-opacity:0;"
 | 
			
		||||
         offset="1"
 | 
			
		||||
         id="stop3854" />
 | 
			
		||||
    </linearGradient>
 | 
			
		||||
    <clipPath
 | 
			
		||||
       id="clipPath3095"
 | 
			
		||||
       clipPathUnits="userSpaceOnUse">
 | 
			
		||||
      <path
 | 
			
		||||
         inkscape:connector-curvature="0"
 | 
			
		||||
         id="path3097"
 | 
			
		||||
         d="m 976.648,389.551 -842.402,0 0,839.999 842.402,0 0,-839.999" />
 | 
			
		||||
    </clipPath>
 | 
			
		||||
    <clipPath
 | 
			
		||||
       id="clipPath3195"
 | 
			
		||||
       clipPathUnits="userSpaceOnUse">
 | 
			
		||||
      <path
 | 
			
		||||
         inkscape:connector-curvature="0"
 | 
			
		||||
         id="path3197"
 | 
			
		||||
         d="m 611.836,756.738 -106.34,105.207 c -8.473,8.289 -13.617,20.102 -13.598,33.379 L 598.301,790.207 c -0.031,-13.418 5.094,-25.031 13.535,-33.469" />
 | 
			
		||||
    </clipPath>
 | 
			
		||||
    <clipPath
 | 
			
		||||
       id="clipPath3235"
 | 
			
		||||
       clipPathUnits="userSpaceOnUse">
 | 
			
		||||
      <path
 | 
			
		||||
         inkscape:connector-curvature="0"
 | 
			
		||||
         id="path3237"
 | 
			
		||||
         d="m 1095.64,1501.81 c 35.46,-35.07 70.89,-70.11 106.35,-105.17 4.4,-4.38 7.11,-10.53 7.11,-17.55 l -106.37,105.21 c 0,7 -2.71,13.11 -7.09,17.51" />
 | 
			
		||||
    </clipPath>
 | 
			
		||||
    <linearGradient
 | 
			
		||||
       inkscape:collect="always"
 | 
			
		||||
       id="linearGradient4389">
 | 
			
		||||
      <stop
 | 
			
		||||
         style="stop-color:#871f1c;stop-opacity:1"
 | 
			
		||||
         offset="0"
 | 
			
		||||
         id="stop4391" />
 | 
			
		||||
      <stop
 | 
			
		||||
         style="stop-color:#c42e24;stop-opacity:1"
 | 
			
		||||
         offset="1"
 | 
			
		||||
         id="stop4393" />
 | 
			
		||||
    </linearGradient>
 | 
			
		||||
    <clipPath
 | 
			
		||||
       clipPathUnits="userSpaceOnUse"
 | 
			
		||||
       id="clipPath4591">
 | 
			
		||||
      <path
 | 
			
		||||
         id="path4593"
 | 
			
		||||
         style="fill:#ff00ff;fill-opacity:1;fill-rule:nonzero;stroke:none"
 | 
			
		||||
         d="m 1106.6009,730.43734 -0.036,21.648 c -0.01,3.50825 -2.8675,6.61375 -6.4037,6.92525 l -83.6503,7.33162 c -3.5205,0.30763 -6.3812,-2.29987 -6.3671,-5.8145 l 0.036,-21.6475 20.1171,-1.76662 -0.011,4.63775 c 0,1.83937 1.4844,3.19925 3.3262,3.0395 l 49.5274,-4.33975 c 1.8425,-0.166 3.3425,-1.78125 3.3538,-3.626 l 0.01,-4.63025 20.1,-1.7575"
 | 
			
		||||
         inkscape:connector-curvature="0" />
 | 
			
		||||
    </clipPath>
 | 
			
		||||
    <radialGradient
 | 
			
		||||
       inkscape:collect="always"
 | 
			
		||||
       xlink:href="#linearGradient3850"
 | 
			
		||||
       id="radialGradient3856"
 | 
			
		||||
       cx="-26.508606"
 | 
			
		||||
       cy="93.399292"
 | 
			
		||||
       fx="-26.508606"
 | 
			
		||||
       fy="93.399292"
 | 
			
		||||
       r="20.40658"
 | 
			
		||||
       gradientTransform="matrix(-1.4333926,-2.2742838,1.1731823,-0.73941125,-174.08025,98.374394)"
 | 
			
		||||
       gradientUnits="userSpaceOnUse" />
 | 
			
		||||
    <filter
 | 
			
		||||
       inkscape:collect="always"
 | 
			
		||||
       id="filter3885">
 | 
			
		||||
      <feGaussianBlur
 | 
			
		||||
         inkscape:collect="always"
 | 
			
		||||
         stdDeviation="5.7442192"
 | 
			
		||||
         id="feGaussianBlur3887" />
 | 
			
		||||
    </filter>
 | 
			
		||||
    <linearGradient
 | 
			
		||||
       inkscape:collect="always"
 | 
			
		||||
       xlink:href="#linearGradient3850"
 | 
			
		||||
       id="linearGradient3895"
 | 
			
		||||
       x1="348.20132"
 | 
			
		||||
       y1="593.11615"
 | 
			
		||||
       x2="-51.879555"
 | 
			
		||||
       y2="993.19702"
 | 
			
		||||
       gradientUnits="userSpaceOnUse"
 | 
			
		||||
       gradientTransform="translate(-318.48033,212.32022)" />
 | 
			
		||||
    <radialGradient
 | 
			
		||||
       inkscape:collect="always"
 | 
			
		||||
       xlink:href="#linearGradient3850"
 | 
			
		||||
       id="radialGradient3902"
 | 
			
		||||
       gradientUnits="userSpaceOnUse"
 | 
			
		||||
       gradientTransform="matrix(-1.4333926,-2.2742838,1.1731823,-0.73941125,-174.08025,98.374394)"
 | 
			
		||||
       cx="-26.508606"
 | 
			
		||||
       cy="93.399292"
 | 
			
		||||
       fx="-26.508606"
 | 
			
		||||
       fy="93.399292"
 | 
			
		||||
       r="20.40658" />
 | 
			
		||||
    <linearGradient
 | 
			
		||||
       inkscape:collect="always"
 | 
			
		||||
       xlink:href="#linearGradient3850"
 | 
			
		||||
       id="linearGradient3904"
 | 
			
		||||
       gradientUnits="userSpaceOnUse"
 | 
			
		||||
       gradientTransform="translate(-318.48033,212.32022)"
 | 
			
		||||
       x1="348.20132"
 | 
			
		||||
       y1="593.11615"
 | 
			
		||||
       x2="-51.879555"
 | 
			
		||||
       y2="993.19702" />
 | 
			
		||||
    <linearGradient
 | 
			
		||||
       gradientUnits="userSpaceOnUse"
 | 
			
		||||
       y2="23.383789"
 | 
			
		||||
       x2="25.217773"
 | 
			
		||||
       y1="27.095703"
 | 
			
		||||
       x1="21.505859"
 | 
			
		||||
       id="linearGradient4318"
 | 
			
		||||
       xlink:href="#linearGradient4389"
 | 
			
		||||
       inkscape:collect="always" />
 | 
			
		||||
    <linearGradient
 | 
			
		||||
       gradientUnits="userSpaceOnUse"
 | 
			
		||||
       y2="20.884073"
 | 
			
		||||
       x2="71.960243"
 | 
			
		||||
       y1="20.041777"
 | 
			
		||||
       x1="72.802544"
 | 
			
		||||
       id="linearGradient4326"
 | 
			
		||||
       xlink:href="#linearGradient4389"
 | 
			
		||||
       inkscape:collect="always" />
 | 
			
		||||
    <linearGradient
 | 
			
		||||
       gradientUnits="userSpaceOnUse"
 | 
			
		||||
       y2="74.246689"
 | 
			
		||||
       x2="21.69179"
 | 
			
		||||
       y1="73.643555"
 | 
			
		||||
       x1="22.294922"
 | 
			
		||||
       id="linearGradient4334"
 | 
			
		||||
       xlink:href="#linearGradient4328"
 | 
			
		||||
       inkscape:collect="always" />
 | 
			
		||||
  </defs>
 | 
			
		||||
  <metadata
 | 
			
		||||
     id="metadata6522">
 | 
			
		||||
    <rdf:RDF>
 | 
			
		||||
      <cc:Work
 | 
			
		||||
         rdf:about="">
 | 
			
		||||
        <dc:format>image/svg+xml</dc:format>
 | 
			
		||||
        <dc:type
 | 
			
		||||
           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
 | 
			
		||||
        <dc:title></dc:title>
 | 
			
		||||
      </cc:Work>
 | 
			
		||||
    </rdf:RDF>
 | 
			
		||||
  </metadata>
 | 
			
		||||
  <g
 | 
			
		||||
     style="display:inline"
 | 
			
		||||
     transform="translate(268,-635.29076)"
 | 
			
		||||
     id="layer1"
 | 
			
		||||
     inkscape:groupmode="layer"
 | 
			
		||||
     inkscape:label="BACKGROUND">
 | 
			
		||||
    <path
 | 
			
		||||
       sodipodi:nodetypes="sssssssss"
 | 
			
		||||
       inkscape:connector-curvature="0"
 | 
			
		||||
       id="path6455"
 | 
			
		||||
       d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
 | 
			
		||||
       style="fill:url(#linearGradient908);fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)" />
 | 
			
		||||
    <g
 | 
			
		||||
       id="g4336">
 | 
			
		||||
      <g
 | 
			
		||||
         transform="matrix(0.06790711,0,0,-0.06790711,-239.0411,765.68623)"
 | 
			
		||||
         id="g3897"
 | 
			
		||||
         xml:space="default">
 | 
			
		||||
        <path
 | 
			
		||||
           inkscape:connector-curvature="0"
 | 
			
		||||
           style="opacity:0.7;color:#000000;fill:url(#radialGradient3902);fill-opacity:1;stroke:none;stroke-width:2;marker:none;visibility:visible;display:inline;overflow:visible;filter:url(#filter3831);enable-background:accumulate"
 | 
			
		||||
           d="m -48.09375,67.8125 c -0.873996,-0.0028 -2.089735,0.01993 -3.40625,0.09375 -2.633031,0.147647 -5.700107,0.471759 -7.78125,1.53125 a 1.0001,1.0001 0 0 0 -0.25,1.59375 L -38.8125,92.375 a 1.0001,1.0001 0 0 0 0.84375,0.3125 L -24,90.5625 a 1.0001,1.0001 0 0 0 0.53125,-1.71875 L -46.0625,68.125 a 1.0001,1.0001 0 0 0 -0.625,-0.28125 c 0,0 -0.532254,-0.02842 -1.40625,-0.03125 z"
 | 
			
		||||
           transform="matrix(10.616011,0,0,-10.616011,357.98166,1725.8152)"
 | 
			
		||||
           id="path3821"
 | 
			
		||||
           xml:space="default" />
 | 
			
		||||
        <path
 | 
			
		||||
           style="opacity:0.6;color:#000000;fill:none;stroke:#000000;stroke-width:2.77429962;stroke-linecap:round;marker:none;visibility:visible;display:inline;overflow:visible;filter:url(#filter3868);enable-background:accumulate"
 | 
			
		||||
           d="m -15.782705,81.725197 8.7458304,9.147937"
 | 
			
		||||
           id="path3858"
 | 
			
		||||
           inkscape:connector-curvature="0"
 | 
			
		||||
           transform="matrix(10.616011,0,0,-10.616011,39.50133,1725.8152)"
 | 
			
		||||
           xml:space="default" />
 | 
			
		||||
        <path
 | 
			
		||||
           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:0.3;color:#000000;fill:url(#linearGradient3904);fill-opacity:1;stroke:none;stroke-width:2;marker:none;visibility:visible;display:inline;overflow:visible;filter:url(#filter3885);enable-background:accumulate;font-family:Sans;-inkscape-font-specification:Sans"
 | 
			
		||||
           d="m -95.18931,981.03569 a 10.617073,10.617073 0 0 1 -0.995251,-0.3318 l -42.795789,-5.308 a 10.617073,10.617073 0 0 1 -6.30326,-17.9145 L -4.2897203,812.5065 a 10.617073,10.617073 0 0 1 8.95726,-3.3175 l 49.0990503,7.63026 a 10.617073,10.617073 0 0 1 5.97151,17.91452 L -87.55905,978.04989 a 10.617073,10.617073 0 0 1 -7.63026,2.9858 z"
 | 
			
		||||
           id="path3874"
 | 
			
		||||
           inkscape:connector-curvature="0"
 | 
			
		||||
           xml:space="default" />
 | 
			
		||||
      </g>
 | 
			
		||||
      <path
 | 
			
		||||
         style="opacity:1;color:#000000;fill:#871f1c;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.1;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
 | 
			
		||||
         d="M 20.697266 20.515625 C 19.336871 21.10204 18.348875 22.456253 18.345703 23.970703 L 18.345703 24 C 18.345703 23.9808 18.353156 23.962559 18.353516 23.943359 L 18.353516 28.300781 L 18.353516 35.341797 L 21.425781 38.349609 L 18.353516 38.625 L 18.353516 55.039062 L 21.425781 58.046875 L 18.353516 58.322266 L 18.353516 55.039062 L 18.345703 24.0625 L 18.353516 69.601562 C 18.349848 70.477025 18.685456 71.239319 19.222656 71.802734 L 19.212891 71.8125 L 19.357422 71.955078 C 19.360505 71.957909 19.364093 71.960073 19.367188 71.962891 L 26.660156 79.126953 L 33.458984 71.771484 L 21.814453 72.791016 C 21.791653 72.793016 21.770747 72.789016 21.748047 72.791016 L 33.488281 71.738281 L 67.492188 68.685547 C 67.874994 68.651208 68.237746 68.545454 68.578125 68.394531 L 55.199219 55.015625 L 25.611328 57.671875 L 25.611328 54.388672 L 52.1875 52.003906 L 37.123047 36.941406 L 25.611328 37.974609 L 25.611328 34.691406 L 34.111328 33.927734 L 20.697266 20.515625 z "
 | 
			
		||||
         transform="translate(-268,635.29076)"
 | 
			
		||||
         id="path4308" />
 | 
			
		||||
      <path
 | 
			
		||||
         style="color:#000000;fill:#c42e24;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.1;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
 | 
			
		||||
         d="m -200.67969,651.54467 -45.49804,3.95898 c -0.39583,0.0351 -0.7701,0.14975 -1.125,0.30273 l 13.41406,13.41211 36.65625,-3.28711 0.01,0.74415 6.45508,-6.98633 -7.33984,-7.21875 -0.008,0.01 c -0.63301,-0.64671 -1.5421,-1.01814 -2.56446,-0.93554 z m -39,3.42382 -6.67187,0.59766 c 0.0594,-0.008 0.11568,-0.0282 0.17578,-0.0332 z m 42.44727,14.2461 -33.64453,3.01758 15.06445,15.0625 18.57813,-1.66602 0.002,-2.13672 0,-14.27734 z m -0.002,19.69531 -15.56641,1.39648 13.37891,13.37891 c 0.053,-0.0235 0.10451,-0.0502 0.15625,-0.0762 1.19087,-0.65347 2.02247,-1.91423 2.02539,-3.30274 l 0.006,-11.39648 z"
 | 
			
		||||
         id="path4233"
 | 
			
		||||
         inkscape:connector-curvature="0"
 | 
			
		||||
         xml:space="default"
 | 
			
		||||
         sodipodi:nodetypes="ccccccccccccccccccccccccccccc" />
 | 
			
		||||
      <path
 | 
			
		||||
         style="fill:#df4438;fill-opacity:1;fill-rule:nonzero;stroke:none"
 | 
			
		||||
         d="m -193.41992,658.68199 -39.00195,3.39453 -6.66993,0.59766 c -1.81216,0.25153 -3.26311,1.84158 -3.29687,3.66797 l 0,11.39843 52.41406,-4.70117 0,-11.34375 c -0.0805,-1.83267 -1.58243,-3.16418 -3.44531,-3.01367 z"
 | 
			
		||||
         id="path4674" />
 | 
			
		||||
      <path
 | 
			
		||||
         style="fill:#dd3b2f;fill-opacity:1;fill-rule:nonzero;stroke:none"
 | 
			
		||||
         d="m -189.97461,676.32262 -52.41406,4.70117 0,16.41406 52.41406,-4.70117 0,-16.41406 z"
 | 
			
		||||
         id="path4672" />
 | 
			
		||||
      <path
 | 
			
		||||
         style="fill:#d93023;fill-opacity:1;fill-rule:nonzero;stroke:none"
 | 
			
		||||
         d="m -189.97461,696.01793 -52.41406,4.70312 0.002,11.3086 c -0.008,1.88995 1.51656,3.29383 3.40235,3.16015 l 45.73437,-4.10547 c 0.66788,-0.0599 1.28587,-0.3155 1.80273,-0.70312 0.88331,-0.70488 1.46437,-1.77799 1.4668,-2.9375 l 0.006,-11.42578 z"
 | 
			
		||||
         id="path4670" />
 | 
			
		||||
      <path
 | 
			
		||||
         style="fill:#d93023;fill-opacity:1;fill-rule:nonzero;stroke:none"
 | 
			
		||||
         d="m -191.44727,710.38121 c -0.0994,0.0793 -0.20788,0.14708 -0.31445,0.2168 0.10723,-0.0697 0.21469,-0.13718 0.31445,-0.2168 z"
 | 
			
		||||
         id="path4668" />
 | 
			
		||||
      <path
 | 
			
		||||
         style="fill:#d93023;fill-opacity:1;fill-rule:nonzero;stroke:none"
 | 
			
		||||
         d="m -191.96484,710.72496 c -0.0984,0.0562 -0.19952,0.10691 -0.30274,0.1543 0.10395,-0.0471 0.20372,-0.0983 0.30274,-0.1543 z"
 | 
			
		||||
         id="path4666" />
 | 
			
		||||
      <path
 | 
			
		||||
         style="fill:#d93023;fill-opacity:1;fill-rule:nonzero;stroke:none"
 | 
			
		||||
         d="m -192.58594,711.00426 c -0.082,0.0289 -0.1637,0.0589 -0.24804,0.082 0.0849,-0.0229 0.16545,-0.0534 0.24804,-0.082 z"
 | 
			
		||||
         id="path4633" />
 | 
			
		||||
      <rect
 | 
			
		||||
         xml:space="default"
 | 
			
		||||
         y="648.49109"
 | 
			
		||||
         x="-258.70667"
 | 
			
		||||
         height="69.20665"
 | 
			
		||||
         width="69.20665"
 | 
			
		||||
         id="rect3585-3"
 | 
			
		||||
         style="opacity:0.8;color:#000000;fill:none;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" />
 | 
			
		||||
      <path
 | 
			
		||||
         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:1;color:#000000;color-interpolation:sRGB;color-interpolation-filters:sRGB;fill:url(#linearGradient4318);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:5.25;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif"
 | 
			
		||||
         d="M 22.029297 20.195312 L 21.822266 20.212891 C 19.919838 20.381715 18.370776 22.043134 18.349609 23.939453 L 24.662109 30.251953 L 25.605469 31.195312 L 25.605469 31.103516 C 25.609469 29.193966 27.168951 27.515473 29.082031 27.345703 L 29.171875 27.337891 L 28.373047 26.539062 L 22.029297 20.195312 z "
 | 
			
		||||
         transform="translate(-268,635.29076)"
 | 
			
		||||
         id="path4256" />
 | 
			
		||||
      <path
 | 
			
		||||
         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:0.5;color:#000000;color-interpolation:sRGB;color-interpolation-filters:sRGB;fill:url(#linearGradient4326);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:2.4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif;stroke-miterlimit:4;stroke-dasharray:none"
 | 
			
		||||
         d="M 67.330078 16.253906 L 68.03125 16.955078 L 74.472656 23.396484 L 74.580078 23.386719 C 75.531927 23.309814 76.390588 23.620657 77.015625 24.185547 L 69.892578 17.179688 L 69.884766 17.189453 C 69.253843 16.544862 68.348328 16.174551 67.330078 16.253906 z M 77.054688 24.222656 C 77.115589 24.279686 77.164628 24.348282 77.220703 24.410156 L 77.232422 24.398438 L 77.054688 24.222656 z "
 | 
			
		||||
         transform="translate(-268,635.29076)"
 | 
			
		||||
         id="path4272" />
 | 
			
		||||
      <path
 | 
			
		||||
         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:1;color:#000000;color-interpolation:sRGB;color-interpolation-filters:sRGB;fill:url(#linearGradient4334);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:1.7;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif;stroke-miterlimit:4;stroke-dasharray:none"
 | 
			
		||||
         d="M 18.363281 69.712891 C 18.387957 70.540342 18.709001 71.264013 19.222656 71.802734 L 19.212891 71.8125 L 19.357422 71.955078 C 19.360505 71.957909 19.364093 71.960073 19.367188 71.962891 L 26.599609 79.068359 C 26.044831 78.550125 25.698241 77.821152 25.638672 76.988281 L 18.951172 70.298828 L 18.363281 69.712891 z M 26.636719 79.103516 L 26.660156 79.126953 L 26.664062 79.123047 C 26.655656 79.11562 26.645042 79.111033 26.636719 79.103516 z "
 | 
			
		||||
         transform="translate(-268,635.29076)"
 | 
			
		||||
         id="path4290" />
 | 
			
		||||
      <path
 | 
			
		||||
         style="color:#000000;fill:#871f1c;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.1;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
 | 
			
		||||
         d="m 75.006338,38.020624 -45.602041,4.088751 0,3.283203 48.615713,-4.360235 z m 0.002,19.69531 -45.603995,4.090707 0,3.283203 48.615713,-4.362191 z m 1.026864,17.71766 c -0.09902,0.056 -0.198784,0.107197 -0.302734,0.154297 0.10322,-0.04739 0.204334,-0.0981 0.302734,-0.154297 z m -0.621094,0.279297 c -0.08259,0.0286 -0.163146,0.05913 -0.248046,0.08203 0.08434,-0.0231 0.166047,-0.05313 0.248046,-0.08203 z"
 | 
			
		||||
         transform="translate(-268,635.29076)"
 | 
			
		||||
         id="path4656"
 | 
			
		||||
         inkscape:connector-curvature="0"
 | 
			
		||||
         sodipodi:nodetypes="cccccccccccccccc" />
 | 
			
		||||
      <path
 | 
			
		||||
         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none;opacity:0.3"
 | 
			
		||||
         d="M 74.580078 23.390625 L 35.578125 26.785156 L 28.908203 27.382812 C 27.096043 27.634343 25.645088 29.224391 25.611328 31.050781 L 25.611328 31.25 C 25.645088 29.42361 27.096043 27.833561 28.908203 27.582031 L 35.578125 26.984375 L 74.580078 23.589844 C 76.442958 23.439334 77.944891 24.770846 78.025391 26.603516 L 78.025391 26.404297 C 77.944891 24.571627 76.442958 23.240115 74.580078 23.390625 z M 78.025391 41.03125 L 25.611328 45.732422 L 25.611328 45.931641 L 78.025391 41.230469 L 78.025391 41.03125 z M 78.025391 60.726562 L 25.611328 65.429688 L 25.611328 65.628906 L 78.025391 60.925781 L 78.025391 60.726562 z "
 | 
			
		||||
         transform="translate(-268,635.29076)"
 | 
			
		||||
         id="path4676" />
 | 
			
		||||
    </g>
 | 
			
		||||
  </g>
 | 
			
		||||
  <g
 | 
			
		||||
     style="display:inline"
 | 
			
		||||
     inkscape:label="PLACE YOUR PICTOGRAM HERE"
 | 
			
		||||
     id="layer3"
 | 
			
		||||
     inkscape:groupmode="layer" />
 | 
			
		||||
  <g
 | 
			
		||||
     sodipodi:insensitive="true"
 | 
			
		||||
     style="display:none"
 | 
			
		||||
     inkscape:label="BADGE"
 | 
			
		||||
     id="layer2"
 | 
			
		||||
     inkscape:groupmode="layer">
 | 
			
		||||
    <g
 | 
			
		||||
       clip-path="none"
 | 
			
		||||
       id="g4394"
 | 
			
		||||
       transform="translate(-340.00001,-581)"
 | 
			
		||||
       style="display:inline">
 | 
			
		||||
      <g
 | 
			
		||||
         id="g855">
 | 
			
		||||
        <g
 | 
			
		||||
           style="opacity:0.6;filter:url(#filter891)"
 | 
			
		||||
           clip-path="url(#clipPath873)"
 | 
			
		||||
           id="g870"
 | 
			
		||||
           inkscape:groupmode="maskhelper">
 | 
			
		||||
          <path
 | 
			
		||||
             sodipodi:type="arc"
 | 
			
		||||
             style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
 | 
			
		||||
             id="path844"
 | 
			
		||||
             sodipodi:cx="252"
 | 
			
		||||
             sodipodi:cy="552.36218"
 | 
			
		||||
             sodipodi:rx="12"
 | 
			
		||||
             sodipodi:ry="12"
 | 
			
		||||
             d="m 264,552.36218 c 0,6.62742 -5.37258,12 -12,12 -6.62742,0 -12,-5.37258 -12,-12 0,-6.62741 5.37258,-12 12,-12 6.62742,0 12,5.37259 12,12 z"
 | 
			
		||||
             transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)" />
 | 
			
		||||
        </g>
 | 
			
		||||
        <g
 | 
			
		||||
           id="g862">
 | 
			
		||||
          <path
 | 
			
		||||
             transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)"
 | 
			
		||||
             d="m 264,552.36218 c 0,6.62742 -5.37258,12 -12,12 -6.62742,0 -12,-5.37258 -12,-12 0,-6.62741 5.37258,-12 12,-12 6.62742,0 12,5.37259 12,12 z"
 | 
			
		||||
             sodipodi:ry="12"
 | 
			
		||||
             sodipodi:rx="12"
 | 
			
		||||
             sodipodi:cy="552.36218"
 | 
			
		||||
             sodipodi:cx="252"
 | 
			
		||||
             id="path4398"
 | 
			
		||||
             style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
 | 
			
		||||
             sodipodi:type="arc" />
 | 
			
		||||
          <path
 | 
			
		||||
             sodipodi:type="arc"
 | 
			
		||||
             style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
 | 
			
		||||
             id="path4400"
 | 
			
		||||
             sodipodi:cx="252"
 | 
			
		||||
             sodipodi:cy="552.36218"
 | 
			
		||||
             sodipodi:rx="12"
 | 
			
		||||
             sodipodi:ry="12"
 | 
			
		||||
             d="m 264,552.36218 c 0,6.62742 -5.37258,12 -12,12 -6.62742,0 -12,-5.37258 -12,-12 0,-6.62741 5.37258,-12 12,-12 6.62742,0 12,5.37259 12,12 z"
 | 
			
		||||
             transform="matrix(1.25,0,0,1.25,33,-100.45273)" />
 | 
			
		||||
          <path
 | 
			
		||||
             transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)"
 | 
			
		||||
             d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 -0.18379,0.41279 0.0427,4.27917 -0.34859,4.5051 z"
 | 
			
		||||
             inkscape:randomized="0"
 | 
			
		||||
             inkscape:rounded="0.1"
 | 
			
		||||
             inkscape:flatsided="false"
 | 
			
		||||
             sodipodi:arg2="1.6755161"
 | 
			
		||||
             sodipodi:arg1="1.0471976"
 | 
			
		||||
             sodipodi:r2="4.3458705"
 | 
			
		||||
             sodipodi:r1="7.2431178"
 | 
			
		||||
             sodipodi:cy="589.50385"
 | 
			
		||||
             sodipodi:cx="666.19574"
 | 
			
		||||
             sodipodi:sides="5"
 | 
			
		||||
             id="path4459"
 | 
			
		||||
             style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
 | 
			
		||||
             sodipodi:type="star" />
 | 
			
		||||
        </g>
 | 
			
		||||
      </g>
 | 
			
		||||
    </g>
 | 
			
		||||
  </g>
 | 
			
		||||
</svg>
 | 
			
		||||
| 
		 After Width: | Height: | Size: 32 KiB  | 
							
								
								
									
										20
									
								
								metadata.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								metadata.yaml
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,20 @@
 | 
			
		||||
name: cinder-backup
 | 
			
		||||
summary: Cinder-backup integration for OpenStack Block Storage
 | 
			
		||||
maintainer: Edward Hope-Morley <opentastic@gmail.com>
 | 
			
		||||
description: |
 | 
			
		||||
 Cinder is the block storage service for the Openstack project.
 | 
			
		||||
 .
 | 
			
		||||
 This subordinate charm configures the Cinder backup service.
 | 
			
		||||
categories:
 | 
			
		||||
  - miscellaneous
 | 
			
		||||
subordinate: true
 | 
			
		||||
provides:
 | 
			
		||||
  backup-backend:
 | 
			
		||||
    interface: cinder-backup
 | 
			
		||||
    scope: container
 | 
			
		||||
requires:
 | 
			
		||||
  juju-info:
 | 
			
		||||
    interface: juju-info
 | 
			
		||||
    scope: container
 | 
			
		||||
  ceph:
 | 
			
		||||
    interface: ceph-client
 | 
			
		||||
							
								
								
									
										6
									
								
								setup.cfg
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								setup.cfg
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,6 @@
 | 
			
		||||
[nosetests]
 | 
			
		||||
verbosity=1
 | 
			
		||||
with-coverage=1
 | 
			
		||||
cover-erase=1
 | 
			
		||||
cover-package=hooks
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								unit_tests/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								unit_tests/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,2 @@
 | 
			
		||||
import sys
 | 
			
		||||
sys.path.append('hooks')
 | 
			
		||||
							
								
								
									
										35
									
								
								unit_tests/test_cinder_backup_contexts.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										35
									
								
								unit_tests/test_cinder_backup_contexts.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,35 @@
 | 
			
		||||
import cinder_backup_contexts as contexts
 | 
			
		||||
 | 
			
		||||
from test_utils import (
 | 
			
		||||
    CharmTestCase
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
TO_PATCH = [
 | 
			
		||||
    'is_relation_made',
 | 
			
		||||
    'service_name',
 | 
			
		||||
    'get_os_codename_package'
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestCinderBackupContext(CharmTestCase):
 | 
			
		||||
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        super(TestCinderBackupContext, self).setUp(contexts, TO_PATCH)
 | 
			
		||||
 | 
			
		||||
    def test_backup_context(self):
 | 
			
		||||
        self.get_os_codename_package.return_value = 'icehouse'
 | 
			
		||||
        self.service_name.return_value = 'cinder-backup-ut'
 | 
			
		||||
        ctxt = contexts.CephBackupSubordinateContext()()
 | 
			
		||||
        exp = {'cinder': {'/etc/cinder/cinder.conf':
 | 
			
		||||
                          {'sections': {'DEFAULT':
 | 
			
		||||
                                        [('backup_driver',
 | 
			
		||||
                                          'cinder.backup.drivers.ceph'),
 | 
			
		||||
                                         ('backup_ceph_pool',
 | 
			
		||||
                                          'cinder-backup-ut'),
 | 
			
		||||
                                         ('backup_ceph_user',
 | 
			
		||||
                                          'cinder-backup-ut')]}}}}
 | 
			
		||||
        self.assertEqual(ctxt, exp)
 | 
			
		||||
 | 
			
		||||
    def test_backup_context_unsupported(self):
 | 
			
		||||
        self.get_os_codename_package.return_value = 'havana'
 | 
			
		||||
        self.assertRaises(Exception, contexts.CephBackupSubordinateContext())
 | 
			
		||||
							
								
								
									
										98
									
								
								unit_tests/test_cinder_backup_hooks.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										98
									
								
								unit_tests/test_cinder_backup_hooks.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,98 @@
 | 
			
		||||
from mock import patch
 | 
			
		||||
 | 
			
		||||
with patch('cinder_backup_utils.register_configs'):
 | 
			
		||||
    import cinder_backup_hooks as hooks
 | 
			
		||||
 | 
			
		||||
from test_utils import (
 | 
			
		||||
    CharmTestCase
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
TO_PATCH = [
 | 
			
		||||
    # cinder_utils
 | 
			
		||||
    'ensure_ceph_keyring',
 | 
			
		||||
    'register_configs',
 | 
			
		||||
    'restart_map',
 | 
			
		||||
    'set_ceph_env_variables',
 | 
			
		||||
    'is_request_complete',
 | 
			
		||||
    'send_request_if_needed',
 | 
			
		||||
    'CONFIGS',
 | 
			
		||||
    # charmhelpers.core.hookenv
 | 
			
		||||
    'config',
 | 
			
		||||
    'relation_ids',
 | 
			
		||||
    'relation_set',
 | 
			
		||||
    'service_name',
 | 
			
		||||
    'service_restart',
 | 
			
		||||
    'log',
 | 
			
		||||
    # charmhelpers.core.host
 | 
			
		||||
    'apt_install',
 | 
			
		||||
    'apt_update',
 | 
			
		||||
    # charmhelpers.contrib.hahelpers.cluster_utils
 | 
			
		||||
    'execd_preinstall',
 | 
			
		||||
    'delete_keyring'
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestCinderBackupHooks(CharmTestCase):
 | 
			
		||||
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        super(TestCinderBackupHooks, self).setUp(hooks, TO_PATCH)
 | 
			
		||||
        self.config.side_effect = self.test_config.get
 | 
			
		||||
 | 
			
		||||
    @patch('charmhelpers.core.hookenv.config')
 | 
			
		||||
    @patch('os.mkdir')
 | 
			
		||||
    def test_ceph_joined(self, mkdir, mock_config):
 | 
			
		||||
        """It correctly prepares for a ceph changed hook"""
 | 
			
		||||
        with patch('os.path.isdir') as isdir:
 | 
			
		||||
            isdir.return_value = False
 | 
			
		||||
            hooks.hooks.execute(['hooks/ceph-relation-joined'])
 | 
			
		||||
            mkdir.assert_called_with('/etc/ceph')
 | 
			
		||||
 | 
			
		||||
    @patch('charmhelpers.core.hookenv.config')
 | 
			
		||||
    def test_ceph_changed_no_key(self, mock_config):
 | 
			
		||||
        """It does nothing when ceph key is not available"""
 | 
			
		||||
        self.CONFIGS.complete_contexts.return_value = ['']
 | 
			
		||||
        hooks.hooks.execute(['hooks/ceph-relation-changed'])
 | 
			
		||||
        m = 'ceph relation incomplete. Peer not ready?'
 | 
			
		||||
        self.log.assert_called_with(m)
 | 
			
		||||
 | 
			
		||||
    @patch('charmhelpers.core.hookenv.config')
 | 
			
		||||
    def test_ceph_changed(self, mock_config):
 | 
			
		||||
        """It ensures ceph assets created on ceph changed"""
 | 
			
		||||
        self.is_request_complete.return_value = True
 | 
			
		||||
        self.CONFIGS.complete_contexts.return_value = ['ceph']
 | 
			
		||||
        self.service_name.return_value = 'cinder-backup'
 | 
			
		||||
        self.ensure_ceph_keyring.return_value = True
 | 
			
		||||
        hooks.hooks.execute(['hooks/ceph-relation-changed'])
 | 
			
		||||
        self.ensure_ceph_keyring.assert_called_with(service='cinder-backup',
 | 
			
		||||
                                                    user='cinder',
 | 
			
		||||
                                                    group='cinder')
 | 
			
		||||
        self.assertTrue(self.CONFIGS.write_all.called)
 | 
			
		||||
        self.set_ceph_env_variables.assert_called_with(service='cinder-backup')
 | 
			
		||||
 | 
			
		||||
    @patch.object(hooks, 'get_ceph_request')
 | 
			
		||||
    @patch('charmhelpers.core.hookenv.config')
 | 
			
		||||
    def test_ceph_changed_newrq(self, mock_config, mock_get_ceph_request):
 | 
			
		||||
        """It ensures ceph assets created on ceph changed"""
 | 
			
		||||
        mock_get_ceph_request.return_value = 'cephreq'
 | 
			
		||||
        self.is_request_complete.return_value = False
 | 
			
		||||
        self.CONFIGS.complete_contexts.return_value = ['ceph']
 | 
			
		||||
        self.service_name.return_value = 'cinder-backup'
 | 
			
		||||
        self.ensure_ceph_keyring.return_value = True
 | 
			
		||||
        hooks.hooks.execute(['hooks/ceph-relation-changed'])
 | 
			
		||||
        self.ensure_ceph_keyring.assert_called_with(service='cinder-backup',
 | 
			
		||||
                                                    user='cinder',
 | 
			
		||||
                                                    group='cinder')
 | 
			
		||||
        self.send_request_if_needed.assert_called_with('cephreq')
 | 
			
		||||
 | 
			
		||||
    @patch('charmhelpers.core.hookenv.config')
 | 
			
		||||
    def test_ceph_changed_no_keys(self, mock_config):
 | 
			
		||||
        """It ensures ceph assets created on ceph changed"""
 | 
			
		||||
        self.CONFIGS.complete_contexts.return_value = ['ceph']
 | 
			
		||||
        self.service_name.return_value = 'cinder-backup'
 | 
			
		||||
        self.is_request_complete.return_value = True
 | 
			
		||||
        self.ensure_ceph_keyring.return_value = False
 | 
			
		||||
        hooks.hooks.execute(['hooks/ceph-relation-changed'])
 | 
			
		||||
        # NOTE(jamespage): If ensure_ceph keyring fails, then
 | 
			
		||||
        # the hook should just exit 0 and return.
 | 
			
		||||
        self.assertTrue(self.log.called)
 | 
			
		||||
        self.assertFalse(self.CONFIGS.write_all.called)
 | 
			
		||||
							
								
								
									
										98
									
								
								unit_tests/test_utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										98
									
								
								unit_tests/test_utils.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,98 @@
 | 
			
		||||
import logging
 | 
			
		||||
import unittest
 | 
			
		||||
import os
 | 
			
		||||
import yaml
 | 
			
		||||
 | 
			
		||||
from mock import patch
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def load_config():
 | 
			
		||||
    """Walk backwards from __file__ looking for config.yaml, load and return
 | 
			
		||||
    the 'options' section'
 | 
			
		||||
    """
 | 
			
		||||
    config = None
 | 
			
		||||
    f = __file__
 | 
			
		||||
    while config is None:
 | 
			
		||||
        d = os.path.dirname(f)
 | 
			
		||||
        if os.path.isfile(os.path.join(d, 'config.yaml')):
 | 
			
		||||
            config = os.path.join(d, 'config.yaml')
 | 
			
		||||
            break
 | 
			
		||||
        f = d
 | 
			
		||||
 | 
			
		||||
    if not config:
 | 
			
		||||
        logging.error('Could not find config.yaml in any parent directory '
 | 
			
		||||
                      'of %s. ' % file)
 | 
			
		||||
        raise Exception
 | 
			
		||||
 | 
			
		||||
    return yaml.safe_load(open(config).read())['options']
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_default_config():
 | 
			
		||||
    """Load default charm config from config.yaml return as a dict.
 | 
			
		||||
    If no default is set in config.yaml, its value is None.
 | 
			
		||||
    """
 | 
			
		||||
    default_config = {}
 | 
			
		||||
    config = load_config()
 | 
			
		||||
    for k, v in config.iteritems():
 | 
			
		||||
        if 'default' in v:
 | 
			
		||||
            default_config[k] = v['default']
 | 
			
		||||
        else:
 | 
			
		||||
            default_config[k] = None
 | 
			
		||||
    return default_config
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class CharmTestCase(unittest.TestCase):
 | 
			
		||||
 | 
			
		||||
    def setUp(self, obj, patches):
 | 
			
		||||
        super(CharmTestCase, self).setUp()
 | 
			
		||||
        self.patches = patches
 | 
			
		||||
        self.obj = obj
 | 
			
		||||
        self.test_config = TestConfig()
 | 
			
		||||
        self.test_relation = TestRelation()
 | 
			
		||||
        self.patch_all()
 | 
			
		||||
 | 
			
		||||
    def patch(self, method):
 | 
			
		||||
        _m = patch.object(self.obj, method)
 | 
			
		||||
        mock = _m.start()
 | 
			
		||||
        self.addCleanup(_m.stop)
 | 
			
		||||
        return mock
 | 
			
		||||
 | 
			
		||||
    def patch_all(self):
 | 
			
		||||
        for method in self.patches:
 | 
			
		||||
            setattr(self, method, self.patch(method))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestConfig(object):
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
        self.config = get_default_config()
 | 
			
		||||
 | 
			
		||||
    def get(self, attr):
 | 
			
		||||
        try:
 | 
			
		||||
            return self.config[attr]
 | 
			
		||||
        except KeyError:
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
    def get_all(self):
 | 
			
		||||
        return self.config
 | 
			
		||||
 | 
			
		||||
    def set(self, attr, value):
 | 
			
		||||
            if attr not in self.config:
 | 
			
		||||
                raise KeyError
 | 
			
		||||
            self.config[attr] = value
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestRelation(object):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, relation_data={}):
 | 
			
		||||
        self.relation_data = relation_data
 | 
			
		||||
 | 
			
		||||
    def set(self, relation_data):
 | 
			
		||||
        self.relation_data = relation_data
 | 
			
		||||
 | 
			
		||||
    def get(self, attr=None, unit=None, rid=None):
 | 
			
		||||
        if attr is None:
 | 
			
		||||
            return self.relation_data
 | 
			
		||||
        elif attr in self.relation_data:
 | 
			
		||||
            return self.relation_data[attr]
 | 
			
		||||
        return None
 | 
			
		||||
		Reference in New Issue
	
	Block a user