diff --git a/.gitignore b/.gitignore
deleted file mode 100644
index 1f33487..0000000
--- a/.gitignore
+++ /dev/null
@@ -1,9 +0,0 @@
-bin
-.coverage
-.testrepository
-.stestr
-.tox
-*.sw[nop]
-*.pyc
-wily
-tests/*.tar.xz
diff --git a/.gitreview b/.gitreview
index 34276c7..af495ce 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,4 +1,4 @@
[gerrit]
host=review.opendev.org
port=29418
-project=openstack/charm-lxd.git
+project=openstack/charm-nova-lxd.git
diff --git a/.project b/.project
deleted file mode 100644
index cd218d2..0000000
--- a/.project
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
- lxd
-
-
-
-
-
- org.python.pydev.PyDevBuilder
-
-
-
-
-
- org.python.pydev.pythonNature
-
-
diff --git a/.pydevproject b/.pydevproject
deleted file mode 100644
index ba662f3..0000000
--- a/.pydevproject
+++ /dev/null
@@ -1,10 +0,0 @@
-
-
-python 2.7
-Default
-
-/lxd/hooks
-/${PROJECT_DIR_NAME}/unit_tests
-/${PROJECT_DIR_NAME}/tests
-
-
diff --git a/.stestr.conf b/.stestr.conf
deleted file mode 100644
index 5fcccac..0000000
--- a/.stestr.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-[DEFAULT]
-test_path=./unit_tests
-top_dir=./
diff --git a/.zuul.yaml b/.zuul.yaml
deleted file mode 100644
index aa9c508..0000000
--- a/.zuul.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-- project:
- templates:
- - python-charm-jobs
- - openstack-python35-jobs-nonvoting
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/Makefile b/Makefile
deleted file mode 100644
index a54ff19..0000000
--- a/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/make
-PYTHON := /usr/bin/env python
-
-lint:
- @tox -e pep8
-
-test:
- tox -e py27
-
-bin/charm_helpers_sync.py:
- @mkdir -p bin
- @curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py
-
-
-sync: bin/charm_helpers_sync.py
- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml
-
-functional_test:
- @echo Starting Amulet tests...
- tox -e func27
diff --git a/README.md b/README.md
deleted file mode 100644
index be4c791..0000000
--- a/README.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# Overview
-
-LXD is a hypervisor for managing Linux Containers; it provides a
-simple RESTful API for creation and management of containers. This
-charm is currently specific to LXD's use with nova-lxd, but that's
-only by usage, rather than specific design.
-
-# Usage with nova-compute and nova-lxd
-
-While the lxd charm can be used with any charm to enable use of LXD,
-its primary use is with the nova-compute Openstack charm, for
-provisioning LXD based OpenStack Nova instances.
-
-For example:
-
- juju deploy nova-compute
- juju config nova-compute virt-type=lxd
- juju deploy lxd
- juju config lxd block-devices=/dev/sdb storage-type=lvm
- juju add-relation lxd nova-compute
-
-The caveat is that nova-compute is part of a greater ecosystem of many
-OpenStack service charms. For a full OpenStack Mitaka deployment using
-LXD, please refer to the [OpenStack
-LXD](https://jujucharms.com/u/openstack-charmers-next/openstack-lxd)
-bundle.
-
-At this time, nova-lxd is only supported on Ubuntu 16.04 or above,
-with OpenStack Mitaka (provided as part of Ubuntu 16.04).
-
-# Contact Information
-
-Report bugs on [Launchpad](https://bugs.launchpad.net/charm-lxd/+filebug)
\ No newline at end of file
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..376d930
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,6 @@
+This project is no longer maintained.
+
+The contents of this repository are still available in the Git
+source code management system. To see the contents of this
+repository before it reached its end of life, please check out the
+previous commit with "git checkout HEAD^2".
diff --git a/actions/.keep b/actions/.keep
deleted file mode 100644
index f49b91a..0000000
--- a/actions/.keep
+++ /dev/null
@@ -1,3 +0,0 @@
- This file was created by release-tools to ensure that this empty
- directory is preserved in vcs re: lint check definitions in global
- tox.ini files. This file can be removed if/when this dir is actually in use.
diff --git a/bindep.txt b/bindep.txt
deleted file mode 100644
index 70ed989..0000000
--- a/bindep.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-# NOTE: required for charm-tools->launchpadlib->keyring->secretstorage
-libdbus-1-dev
-libdbus-glib-1-dev
diff --git a/charm-helpers.yaml b/charm-helpers.yaml
deleted file mode 100644
index 2390835..0000000
--- a/charm-helpers.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-repo: https://github.com/juju/charm-helpers
-destination: hooks/charmhelpers
-include:
- - core
- - osplatform
- - fetch
- - contrib.openstack|inc=*
- - contrib.storage
- - contrib.hahelpers:
- - apache
- - cluster
- - contrib.network
- - contrib.python
- - payload.execd
- - contrib.charmsupport
diff --git a/config.yaml b/config.yaml
deleted file mode 100644
index 07066d3..0000000
--- a/config.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-options:
- source:
- type: string
- default:
- description: |
- Repository from which to install LXD. May be one of the following:
- distro (default), ppa:somecustom/ppa, a deb url sources entry,
- or a supported release pocket
- block-devices:
- type: string
- default:
- description: |
- A space-separated list of devices to use to back the LXD storage. Items
- in this list should be valid block device paths. Entries that are not
- found will be ignored.
- .
- Currently, only the first block device in the list will be considered.
- overwrite:
- type: boolean
- default: False
- description: |
- If enabled, the charm will attempt to overwrite block devices
- containing previous filesystems or LVM, assuming it is not in use.
- storage-type:
- type: string
- default: btrfs
- description: |
- LXD container storage type: btrfs, zfs, or lvm
- ephemeral-unmount:
- type: string
- default:
- description: |
- Cloud instances provide ephemeral storage which is normally mounted
- on /mnt.
- .
- Providing this option will force an unmount of the ephemeral device
- so that it can be used for LXD container storage. This is useful for
- testing purposes (cloud deployment is not a typical use case).
- use-source:
- type: boolean
- default: False
- description: Use LXD source from github.
- enable-ext4-userns:
- type: boolean
- default: True
- description: |
- Enable use of EXT4 in LXD containers for block storage support.
diff --git a/copyright b/copyright
deleted file mode 100644
index 9d1a4cc..0000000
--- a/copyright
+++ /dev/null
@@ -1,16 +0,0 @@
-Format: http://dep.debian.net/deps/dep5/
-
-Files: *
-Copyright: Copyright 2012, Canonical Ltd., All Rights Reserved.
-License: Apache-2.0
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py
deleted file mode 100644
index 61ef907..0000000
--- a/hooks/charmhelpers/__init__.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-from __future__ import print_function
-from __future__ import absolute_import
-
-import functools
-import inspect
-import subprocess
-import sys
-
-try:
- import six # NOQA:F401
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # NOQA:F401
-
-try:
- import yaml # NOQA:F401
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # NOQA:F401
-
-
-# Holds a list of mapping of mangled function names that have been deprecated
-# using the @deprecate decorator below. This is so that the warning is only
-# printed once for each usage of the function.
-__deprecated_functions = {}
-
-
-def deprecate(warning, date=None, log=None):
- """Add a deprecation warning the first time the function is used.
- The date, which is a string in semi-ISO8660 format indicate the year-month
- that the function is officially going to be removed.
-
- usage:
-
- @deprecate('use core/fetch/add_source() instead', '2017-04')
- def contributed_add_source_thing(...):
- ...
-
- And it then prints to the log ONCE that the function is deprecated.
- The reason for passing the logging function (log) is so that hookenv.log
- can be used for a charm if needed.
-
- :param warning: String to indicat where it has moved ot.
- :param date: optional sting, in YYYY-MM format to indicate when the
- function will definitely (probably) be removed.
- :param log: The log function to call to log. If not, logs to stdout
- """
- def wrap(f):
-
- @functools.wraps(f)
- def wrapped_f(*args, **kwargs):
- try:
- module = inspect.getmodule(f)
- file = inspect.getsourcefile(f)
- lines = inspect.getsourcelines(f)
- f_name = "{}-{}-{}..{}-{}".format(
- module.__name__, file, lines[0], lines[-1], f.__name__)
- except (IOError, TypeError):
- # assume it was local, so just use the name of the function
- f_name = f.__name__
- if f_name not in __deprecated_functions:
- __deprecated_functions[f_name] = True
- s = "DEPRECATION WARNING: Function {} is being removed".format(
- f.__name__)
- if date:
- s = "{} on/around {}".format(s, date)
- if warning:
- s = "{} : {}".format(s, warning)
- if log:
- log(s)
- else:
- print(s)
- return f(*args, **kwargs)
- return wrapped_f
- return wrap
diff --git a/hooks/charmhelpers/contrib/__init__.py b/hooks/charmhelpers/contrib/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/hooks/charmhelpers/contrib/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/charmsupport/__init__.py b/hooks/charmhelpers/contrib/charmsupport/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/hooks/charmhelpers/contrib/charmsupport/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
deleted file mode 100644
index 0626b32..0000000
--- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py
+++ /dev/null
@@ -1,455 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Compatibility with the nrpe-external-master charm"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Matthew Wedgwood
-
-import subprocess
-import pwd
-import grp
-import os
-import glob
-import shutil
-import re
-import shlex
-import yaml
-
-from charmhelpers.core.hookenv import (
- config,
- hook_name,
- local_unit,
- log,
- relation_ids,
- relation_set,
- relations_of_type,
-)
-
-from charmhelpers.core.host import service
-from charmhelpers.core import host
-
-# This module adds compatibility with the nrpe-external-master and plain nrpe
-# subordinate charms. To use it in your charm:
-#
-# 1. Update metadata.yaml
-#
-# provides:
-# (...)
-# nrpe-external-master:
-# interface: nrpe-external-master
-# scope: container
-#
-# and/or
-#
-# provides:
-# (...)
-# local-monitors:
-# interface: local-monitors
-# scope: container
-
-#
-# 2. Add the following to config.yaml
-#
-# nagios_context:
-# default: "juju"
-# type: string
-# description: |
-# Used by the nrpe subordinate charms.
-# A string that will be prepended to instance name to set the host name
-# in nagios. So for instance the hostname would be something like:
-# juju-myservice-0
-# If you're running multiple environments with the same services in them
-# this allows you to differentiate between them.
-# nagios_servicegroups:
-# default: ""
-# type: string
-# description: |
-# A comma-separated list of nagios servicegroups.
-# If left empty, the nagios_context will be used as the servicegroup
-#
-# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
-#
-# 4. Update your hooks.py with something like this:
-#
-# from charmsupport.nrpe import NRPE
-# (...)
-# def update_nrpe_config():
-# nrpe_compat = NRPE()
-# nrpe_compat.add_check(
-# shortname = "myservice",
-# description = "Check MyService",
-# check_cmd = "check_http -w 2 -c 10 http://localhost"
-# )
-# nrpe_compat.add_check(
-# "myservice_other",
-# "Check for widget failures",
-# check_cmd = "/srv/myapp/scripts/widget_check"
-# )
-# nrpe_compat.write()
-#
-# def config_changed():
-# (...)
-# update_nrpe_config()
-#
-# def nrpe_external_master_relation_changed():
-# update_nrpe_config()
-#
-# def local_monitors_relation_changed():
-# update_nrpe_config()
-#
-# 4.a If your charm is a subordinate charm set primary=False
-#
-# from charmsupport.nrpe import NRPE
-# (...)
-# def update_nrpe_config():
-# nrpe_compat = NRPE(primary=False)
-#
-# 5. ln -s hooks.py nrpe-external-master-relation-changed
-# ln -s hooks.py local-monitors-relation-changed
-
-
-class CheckException(Exception):
- pass
-
-
-class Check(object):
- shortname_re = '[A-Za-z0-9-_.@]+$'
- service_template = ("""
-#---------------------------------------------------
-# This file is Juju managed
-#---------------------------------------------------
-define service {{
- use active-service
- host_name {nagios_hostname}
- service_description {nagios_hostname}[{shortname}] """
- """{description}
- check_command check_nrpe!{command}
- servicegroups {nagios_servicegroup}
-}}
-""")
-
- def __init__(self, shortname, description, check_cmd):
- super(Check, self).__init__()
- # XXX: could be better to calculate this from the service name
- if not re.match(self.shortname_re, shortname):
- raise CheckException("shortname must match {}".format(
- Check.shortname_re))
- self.shortname = shortname
- self.command = "check_{}".format(shortname)
- # Note: a set of invalid characters is defined by the
- # Nagios server config
- # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
- self.description = description
- self.check_cmd = self._locate_cmd(check_cmd)
-
- def _get_check_filename(self):
- return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
-
- def _get_service_filename(self, hostname):
- return os.path.join(NRPE.nagios_exportdir,
- 'service__{}_{}.cfg'.format(hostname, self.command))
-
- def _locate_cmd(self, check_cmd):
- search_path = (
- '/usr/lib/nagios/plugins',
- '/usr/local/lib/nagios/plugins',
- )
- parts = shlex.split(check_cmd)
- for path in search_path:
- if os.path.exists(os.path.join(path, parts[0])):
- command = os.path.join(path, parts[0])
- if len(parts) > 1:
- command += " " + " ".join(parts[1:])
- return command
- log('Check command not found: {}'.format(parts[0]))
- return ''
-
- def _remove_service_files(self):
- if not os.path.exists(NRPE.nagios_exportdir):
- return
- for f in os.listdir(NRPE.nagios_exportdir):
- if f.endswith('_{}.cfg'.format(self.command)):
- os.remove(os.path.join(NRPE.nagios_exportdir, f))
-
- def remove(self, hostname):
- nrpe_check_file = self._get_check_filename()
- if os.path.exists(nrpe_check_file):
- os.remove(nrpe_check_file)
- self._remove_service_files()
-
- def write(self, nagios_context, hostname, nagios_servicegroups):
- nrpe_check_file = self._get_check_filename()
- with open(nrpe_check_file, 'w') as nrpe_check_config:
- nrpe_check_config.write("# check {}\n".format(self.shortname))
- if nagios_servicegroups:
- nrpe_check_config.write(
- "# The following header was added automatically by juju\n")
- nrpe_check_config.write(
- "# Modifying it will affect nagios monitoring and alerting\n")
- nrpe_check_config.write(
- "# servicegroups: {}\n".format(nagios_servicegroups))
- nrpe_check_config.write("command[{}]={}\n".format(
- self.command, self.check_cmd))
-
- if not os.path.exists(NRPE.nagios_exportdir):
- log('Not writing service config as {} is not accessible'.format(
- NRPE.nagios_exportdir))
- else:
- self.write_service_config(nagios_context, hostname,
- nagios_servicegroups)
-
- def write_service_config(self, nagios_context, hostname,
- nagios_servicegroups):
- self._remove_service_files()
-
- templ_vars = {
- 'nagios_hostname': hostname,
- 'nagios_servicegroup': nagios_servicegroups,
- 'description': self.description,
- 'shortname': self.shortname,
- 'command': self.command,
- }
- nrpe_service_text = Check.service_template.format(**templ_vars)
- nrpe_service_file = self._get_service_filename(hostname)
- with open(nrpe_service_file, 'w') as nrpe_service_config:
- nrpe_service_config.write(str(nrpe_service_text))
-
- def run(self):
- subprocess.call(self.check_cmd)
-
-
-class NRPE(object):
- nagios_logdir = '/var/log/nagios'
- nagios_exportdir = '/var/lib/nagios/export'
- nrpe_confdir = '/etc/nagios/nrpe.d'
- homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server
-
- def __init__(self, hostname=None, primary=True):
- super(NRPE, self).__init__()
- self.config = config()
- self.primary = primary
- self.nagios_context = self.config['nagios_context']
- if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
- self.nagios_servicegroups = self.config['nagios_servicegroups']
- else:
- self.nagios_servicegroups = self.nagios_context
- self.unit_name = local_unit().replace('/', '-')
- if hostname:
- self.hostname = hostname
- else:
- nagios_hostname = get_nagios_hostname()
- if nagios_hostname:
- self.hostname = nagios_hostname
- else:
- self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
- self.checks = []
- # Iff in an nrpe-external-master relation hook, set primary status
- relation = relation_ids('nrpe-external-master')
- if relation:
- log("Setting charm primary status {}".format(primary))
- for rid in relation_ids('nrpe-external-master'):
- relation_set(relation_id=rid, relation_settings={'primary': self.primary})
-
- def add_check(self, *args, **kwargs):
- self.checks.append(Check(*args, **kwargs))
-
- def remove_check(self, *args, **kwargs):
- if kwargs.get('shortname') is None:
- raise ValueError('shortname of check must be specified')
-
- # Use sensible defaults if they're not specified - these are not
- # actually used during removal, but they're required for constructing
- # the Check object; check_disk is chosen because it's part of the
- # nagios-plugins-basic package.
- if kwargs.get('check_cmd') is None:
- kwargs['check_cmd'] = 'check_disk'
- if kwargs.get('description') is None:
- kwargs['description'] = ''
-
- check = Check(*args, **kwargs)
- check.remove(self.hostname)
-
- def write(self):
- try:
- nagios_uid = pwd.getpwnam('nagios').pw_uid
- nagios_gid = grp.getgrnam('nagios').gr_gid
- except Exception:
- log("Nagios user not set up, nrpe checks not updated")
- return
-
- if not os.path.exists(NRPE.nagios_logdir):
- os.mkdir(NRPE.nagios_logdir)
- os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
-
- nrpe_monitors = {}
- monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
- for nrpecheck in self.checks:
- nrpecheck.write(self.nagios_context, self.hostname,
- self.nagios_servicegroups)
- nrpe_monitors[nrpecheck.shortname] = {
- "command": nrpecheck.command,
- }
-
- # update-status hooks are configured to firing every 5 minutes by
- # default. When nagios-nrpe-server is restarted, the nagios server
- # reports checks failing causing unnecessary alerts. Let's not restart
- # on update-status hooks.
- if not hook_name() == 'update-status':
- service('restart', 'nagios-nrpe-server')
-
- monitor_ids = relation_ids("local-monitors") + \
- relation_ids("nrpe-external-master")
- for rid in monitor_ids:
- relation_set(relation_id=rid, monitors=yaml.dump(monitors))
-
-
-def get_nagios_hostcontext(relation_name='nrpe-external-master'):
- """
- Query relation with nrpe subordinate, return the nagios_host_context
-
- :param str relation_name: Name of relation nrpe sub joined to
- """
- for rel in relations_of_type(relation_name):
- if 'nagios_host_context' in rel:
- return rel['nagios_host_context']
-
-
-def get_nagios_hostname(relation_name='nrpe-external-master'):
- """
- Query relation with nrpe subordinate, return the nagios_hostname
-
- :param str relation_name: Name of relation nrpe sub joined to
- """
- for rel in relations_of_type(relation_name):
- if 'nagios_hostname' in rel:
- return rel['nagios_hostname']
-
-
-def get_nagios_unit_name(relation_name='nrpe-external-master'):
- """
- Return the nagios unit name prepended with host_context if needed
-
- :param str relation_name: Name of relation nrpe sub joined to
- """
- host_context = get_nagios_hostcontext(relation_name)
- if host_context:
- unit = "%s:%s" % (host_context, local_unit())
- else:
- unit = local_unit()
- return unit
-
-
-def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
- """
- Add checks for each service in list
-
- :param NRPE nrpe: NRPE object to add check to
- :param list services: List of services to check
- :param str unit_name: Unit name to use in check description
- :param bool immediate_check: For sysv init, run the service check immediately
- """
- for svc in services:
- # Don't add a check for these services from neutron-gateway
- if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
- next
-
- upstart_init = '/etc/init/%s.conf' % svc
- sysv_init = '/etc/init.d/%s' % svc
-
- if host.init_is_systemd():
- nrpe.add_check(
- shortname=svc,
- description='process check {%s}' % unit_name,
- check_cmd='check_systemd.py %s' % svc
- )
- elif os.path.exists(upstart_init):
- nrpe.add_check(
- shortname=svc,
- description='process check {%s}' % unit_name,
- check_cmd='check_upstart_job %s' % svc
- )
- elif os.path.exists(sysv_init):
- cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
- checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
- croncmd = (
- '/usr/local/lib/nagios/plugins/check_exit_status.pl '
- '-e -s /etc/init.d/%s status' % svc
- )
- cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
- f = open(cronpath, 'w')
- f.write(cron_file)
- f.close()
- nrpe.add_check(
- shortname=svc,
- description='service check {%s}' % unit_name,
- check_cmd='check_status_file.py -f %s' % checkpath,
- )
- # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail
- # (LP: #1670223).
- if immediate_check and os.path.isdir(nrpe.homedir):
- f = open(checkpath, 'w')
- subprocess.call(
- croncmd.split(),
- stdout=f,
- stderr=subprocess.STDOUT
- )
- f.close()
- os.chmod(checkpath, 0o644)
-
-
-def copy_nrpe_checks(nrpe_files_dir=None):
- """
- Copy the nrpe checks into place
-
- """
- NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
- if nrpe_files_dir is None:
- # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks
- for segment in ['.', 'hooks']:
- nrpe_files_dir = os.path.abspath(os.path.join(
- os.getenv('CHARM_DIR'),
- segment,
- 'charmhelpers',
- 'contrib',
- 'openstack',
- 'files'))
- if os.path.isdir(nrpe_files_dir):
- break
- else:
- raise RuntimeError("Couldn't find charmhelpers directory")
- if not os.path.exists(NAGIOS_PLUGINS):
- os.makedirs(NAGIOS_PLUGINS)
- for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
- if os.path.isfile(fname):
- shutil.copy2(fname,
- os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
-
-
-def add_haproxy_checks(nrpe, unit_name):
- """
- Add checks for each service in list
-
- :param NRPE nrpe: NRPE object to add check to
- :param str unit_name: Unit name to use in check description
- """
- nrpe.add_check(
- shortname='haproxy_servers',
- description='Check HAProxy {%s}' % unit_name,
- check_cmd='check_haproxy.sh')
- nrpe.add_check(
- shortname='haproxy_queue',
- description='Check HAProxy queue depth {%s}' % unit_name,
- check_cmd='check_haproxy_queue_depth.sh')
diff --git a/hooks/charmhelpers/contrib/charmsupport/volumes.py b/hooks/charmhelpers/contrib/charmsupport/volumes.py
deleted file mode 100644
index 7ea43f0..0000000
--- a/hooks/charmhelpers/contrib/charmsupport/volumes.py
+++ /dev/null
@@ -1,173 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''
-Functions for managing volumes in juju units. One volume is supported per unit.
-Subordinates may have their own storage, provided it is on its own partition.
-
-Configuration stanzas::
-
- volume-ephemeral:
- type: boolean
- default: true
- description: >
- If false, a volume is mounted as sepecified in "volume-map"
- If true, ephemeral storage will be used, meaning that log data
- will only exist as long as the machine. YOU HAVE BEEN WARNED.
- volume-map:
- type: string
- default: {}
- description: >
- YAML map of units to device names, e.g:
- "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
- Service units will raise a configure-error if volume-ephemeral
- is 'true' and no volume-map value is set. Use 'juju set' to set a
- value and 'juju resolved' to complete configuration.
-
-Usage::
-
- from charmsupport.volumes import configure_volume, VolumeConfigurationError
- from charmsupport.hookenv import log, ERROR
- def post_mount_hook():
- stop_service('myservice')
- def post_mount_hook():
- start_service('myservice')
-
- if __name__ == '__main__':
- try:
- configure_volume(before_change=pre_mount_hook,
- after_change=post_mount_hook)
- except VolumeConfigurationError:
- log('Storage could not be configured', ERROR)
-
-'''
-
-# XXX: Known limitations
-# - fstab is neither consulted nor updated
-
-import os
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-import yaml
-
-
-MOUNT_BASE = '/srv/juju/volumes'
-
-
-class VolumeConfigurationError(Exception):
- '''Volume configuration data is missing or invalid'''
- pass
-
-
-def get_config():
- '''Gather and sanity-check volume configuration data'''
- volume_config = {}
- config = hookenv.config()
-
- errors = False
-
- if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
- volume_config['ephemeral'] = True
- else:
- volume_config['ephemeral'] = False
-
- try:
- volume_map = yaml.safe_load(config.get('volume-map', '{}'))
- except yaml.YAMLError as e:
- hookenv.log("Error parsing YAML volume-map: {}".format(e),
- hookenv.ERROR)
- errors = True
- if volume_map is None:
- # probably an empty string
- volume_map = {}
- elif not isinstance(volume_map, dict):
- hookenv.log("Volume-map should be a dictionary, not {}".format(
- type(volume_map)))
- errors = True
-
- volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
- if volume_config['device'] and volume_config['ephemeral']:
- # asked for ephemeral storage but also defined a volume ID
- hookenv.log('A volume is defined for this unit, but ephemeral '
- 'storage was requested', hookenv.ERROR)
- errors = True
- elif not volume_config['device'] and not volume_config['ephemeral']:
- # asked for permanent storage but did not define volume ID
- hookenv.log('Ephemeral storage was requested, but there is no volume '
- 'defined for this unit.', hookenv.ERROR)
- errors = True
-
- unit_mount_name = hookenv.local_unit().replace('/', '-')
- volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
-
- if errors:
- return None
- return volume_config
-
-
-def mount_volume(config):
- if os.path.exists(config['mountpoint']):
- if not os.path.isdir(config['mountpoint']):
- hookenv.log('Not a directory: {}'.format(config['mountpoint']))
- raise VolumeConfigurationError()
- else:
- host.mkdir(config['mountpoint'])
- if os.path.ismount(config['mountpoint']):
- unmount_volume(config)
- if not host.mount(config['device'], config['mountpoint'], persist=True):
- raise VolumeConfigurationError()
-
-
-def unmount_volume(config):
- if os.path.ismount(config['mountpoint']):
- if not host.umount(config['mountpoint'], persist=True):
- raise VolumeConfigurationError()
-
-
-def managed_mounts():
- '''List of all mounted managed volumes'''
- return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
-
-
-def configure_volume(before_change=lambda: None, after_change=lambda: None):
- '''Set up storage (or don't) according to the charm's volume configuration.
- Returns the mount point or "ephemeral". before_change and after_change
- are optional functions to be called if the volume configuration changes.
- '''
-
- config = get_config()
- if not config:
- hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
- raise VolumeConfigurationError()
-
- if config['ephemeral']:
- if os.path.ismount(config['mountpoint']):
- before_change()
- unmount_volume(config)
- after_change()
- return 'ephemeral'
- else:
- # persistent storage
- if os.path.ismount(config['mountpoint']):
- mounts = dict(managed_mounts())
- if mounts.get(config['mountpoint']) != config['device']:
- before_change()
- unmount_volume(config)
- mount_volume(config)
- after_change()
- else:
- before_change()
- mount_volume(config)
- after_change()
- return config['mountpoint']
diff --git a/hooks/charmhelpers/contrib/hahelpers/__init__.py b/hooks/charmhelpers/contrib/hahelpers/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/hooks/charmhelpers/contrib/hahelpers/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/hahelpers/apache.py b/hooks/charmhelpers/contrib/hahelpers/apache.py
deleted file mode 100644
index 2c1e371..0000000
--- a/hooks/charmhelpers/contrib/hahelpers/apache.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Copyright 2012 Canonical Ltd.
-#
-# This file is sourced from lp:openstack-charm-helpers
-#
-# Authors:
-# James Page
-# Adam Gandelman
-#
-
-import os
-
-from charmhelpers.core import host
-from charmhelpers.core.hookenv import (
- config as config_get,
- relation_get,
- relation_ids,
- related_units as relation_list,
- log,
- INFO,
-)
-
-
-def get_cert(cn=None):
- # TODO: deal with multiple https endpoints via charm config
- cert = config_get('ssl_cert')
- key = config_get('ssl_key')
- if not (cert and key):
- log("Inspecting identity-service relations for SSL certificate.",
- level=INFO)
- cert = key = None
- if cn:
- ssl_cert_attr = 'ssl_cert_{}'.format(cn)
- ssl_key_attr = 'ssl_key_{}'.format(cn)
- else:
- ssl_cert_attr = 'ssl_cert'
- ssl_key_attr = 'ssl_key'
- for r_id in relation_ids('identity-service'):
- for unit in relation_list(r_id):
- if not cert:
- cert = relation_get(ssl_cert_attr,
- rid=r_id, unit=unit)
- if not key:
- key = relation_get(ssl_key_attr,
- rid=r_id, unit=unit)
- return (cert, key)
-
-
-def get_ca_cert():
- ca_cert = config_get('ssl_ca')
- if ca_cert is None:
- log("Inspecting identity-service relations for CA SSL certificate.",
- level=INFO)
- for r_id in (relation_ids('identity-service') +
- relation_ids('identity-credentials')):
- for unit in relation_list(r_id):
- if ca_cert is None:
- ca_cert = relation_get('ca_cert',
- rid=r_id, unit=unit)
- return ca_cert
-
-
-def retrieve_ca_cert(cert_file):
- cert = None
- if os.path.isfile(cert_file):
- with open(cert_file, 'rb') as crt:
- cert = crt.read()
- return cert
-
-
-def install_ca_cert(ca_cert):
- host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert')
diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py
deleted file mode 100644
index 4a737e2..0000000
--- a/hooks/charmhelpers/contrib/hahelpers/cluster.py
+++ /dev/null
@@ -1,406 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# James Page
-# Adam Gandelman
-#
-
-"""
-Helpers for clustering and determining "cluster leadership" and other
-clustering-related helpers.
-"""
-
-import subprocess
-import os
-import time
-
-from socket import gethostname as get_unit_hostname
-
-import six
-
-from charmhelpers.core.hookenv import (
- log,
- relation_ids,
- related_units as relation_list,
- relation_get,
- config as config_get,
- INFO,
- DEBUG,
- WARNING,
- unit_get,
- is_leader as juju_is_leader,
- status_set,
-)
-from charmhelpers.core.host import (
- modulo_distribution,
-)
-from charmhelpers.core.decorators import (
- retry_on_exception,
-)
-from charmhelpers.core.strutils import (
- bool_from_string,
-)
-
-DC_RESOURCE_NAME = 'DC'
-
-
-class HAIncompleteConfig(Exception):
- pass
-
-
-class HAIncorrectConfig(Exception):
- pass
-
-
-class CRMResourceNotFound(Exception):
- pass
-
-
-class CRMDCNotFound(Exception):
- pass
-
-
-def is_elected_leader(resource):
- """
- Returns True if the charm executing this is the elected cluster leader.
-
- It relies on two mechanisms to determine leadership:
- 1. If juju is sufficiently new and leadership election is supported,
- the is_leader command will be used.
- 2. If the charm is part of a corosync cluster, call corosync to
- determine leadership.
- 3. If the charm is not part of a corosync cluster, the leader is
- determined as being "the alive unit with the lowest unit numer". In
- other words, the oldest surviving unit.
- """
- try:
- return juju_is_leader()
- except NotImplementedError:
- log('Juju leadership election feature not enabled'
- ', using fallback support',
- level=WARNING)
-
- if is_clustered():
- if not is_crm_leader(resource):
- log('Deferring action to CRM leader.', level=INFO)
- return False
- else:
- peers = peer_units()
- if peers and not oldest_peer(peers):
- log('Deferring action to oldest service unit.', level=INFO)
- return False
- return True
-
-
-def is_clustered():
- for r_id in (relation_ids('ha') or []):
- for unit in (relation_list(r_id) or []):
- clustered = relation_get('clustered',
- rid=r_id,
- unit=unit)
- if clustered:
- return True
- return False
-
-
-def is_crm_dc():
- """
- Determine leadership by querying the pacemaker Designated Controller
- """
- cmd = ['crm', 'status']
- try:
- status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- if not isinstance(status, six.text_type):
- status = six.text_type(status, "utf-8")
- except subprocess.CalledProcessError as ex:
- raise CRMDCNotFound(str(ex))
-
- current_dc = ''
- for line in status.split('\n'):
- if line.startswith('Current DC'):
- # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
- current_dc = line.split(':')[1].split()[0]
- if current_dc == get_unit_hostname():
- return True
- elif current_dc == 'NONE':
- raise CRMDCNotFound('Current DC: NONE')
-
- return False
-
-
-@retry_on_exception(5, base_delay=2,
- exc_type=(CRMResourceNotFound, CRMDCNotFound))
-def is_crm_leader(resource, retry=False):
- """
- Returns True if the charm calling this is the elected corosync leader,
- as returned by calling the external "crm" command.
-
- We allow this operation to be retried to avoid the possibility of getting a
- false negative. See LP #1396246 for more info.
- """
- if resource == DC_RESOURCE_NAME:
- return is_crm_dc()
- cmd = ['crm', 'resource', 'show', resource]
- try:
- status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- if not isinstance(status, six.text_type):
- status = six.text_type(status, "utf-8")
- except subprocess.CalledProcessError:
- status = None
-
- if status and get_unit_hostname() in status:
- return True
-
- if status and "resource %s is NOT running" % (resource) in status:
- raise CRMResourceNotFound("CRM resource %s not found" % (resource))
-
- return False
-
-
-def is_leader(resource):
- log("is_leader is deprecated. Please consider using is_crm_leader "
- "instead.", level=WARNING)
- return is_crm_leader(resource)
-
-
-def peer_units(peer_relation="cluster"):
- peers = []
- for r_id in (relation_ids(peer_relation) or []):
- for unit in (relation_list(r_id) or []):
- peers.append(unit)
- return peers
-
-
-def peer_ips(peer_relation='cluster', addr_key='private-address'):
- '''Return a dict of peers and their private-address'''
- peers = {}
- for r_id in relation_ids(peer_relation):
- for unit in relation_list(r_id):
- peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
- return peers
-
-
-def oldest_peer(peers):
- """Determines who the oldest peer is by comparing unit numbers."""
- local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
- for peer in peers:
- remote_unit_no = int(peer.split('/')[1])
- if remote_unit_no < local_unit_no:
- return False
- return True
-
-
-def eligible_leader(resource):
- log("eligible_leader is deprecated. Please consider using "
- "is_elected_leader instead.", level=WARNING)
- return is_elected_leader(resource)
-
-
-def https():
- '''
- Determines whether enough data has been provided in configuration
- or relation data to configure HTTPS
- .
- returns: boolean
- '''
- use_https = config_get('use-https')
- if use_https and bool_from_string(use_https):
- return True
- if config_get('ssl_cert') and config_get('ssl_key'):
- return True
- for r_id in relation_ids('certificates'):
- for unit in relation_list(r_id):
- ca = relation_get('ca', rid=r_id, unit=unit)
- if ca:
- return True
- for r_id in relation_ids('identity-service'):
- for unit in relation_list(r_id):
- # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
- rel_state = [
- relation_get('https_keystone', rid=r_id, unit=unit),
- relation_get('ca_cert', rid=r_id, unit=unit),
- ]
- # NOTE: works around (LP: #1203241)
- if (None not in rel_state) and ('' not in rel_state):
- return True
- return False
-
-
-def determine_api_port(public_port, singlenode_mode=False):
- '''
- Determine correct API server listening port based on
- existence of HTTPS reverse proxy and/or haproxy.
-
- public_port: int: standard public port for given service
-
- singlenode_mode: boolean: Shuffle ports when only a single unit is present
-
- returns: int: the correct listening port for the API service
- '''
- i = 0
- if singlenode_mode:
- i += 1
- elif len(peer_units()) > 0 or is_clustered():
- i += 1
- if https():
- i += 1
- return public_port - (i * 10)
-
-
-def determine_apache_port(public_port, singlenode_mode=False):
- '''
- Description: Determine correct apache listening port based on public IP +
- state of the cluster.
-
- public_port: int: standard public port for given service
-
- singlenode_mode: boolean: Shuffle ports when only a single unit is present
-
- returns: int: the correct listening port for the HAProxy service
- '''
- i = 0
- if singlenode_mode:
- i += 1
- elif len(peer_units()) > 0 or is_clustered():
- i += 1
- return public_port - (i * 10)
-
-
-def get_hacluster_config(exclude_keys=None):
- '''
- Obtains all relevant configuration from charm configuration required
- for initiating a relation to hacluster:
-
- ha-bindiface, ha-mcastport, vip, os-internal-hostname,
- os-admin-hostname, os-public-hostname, os-access-hostname
-
- param: exclude_keys: list of setting key(s) to be excluded.
- returns: dict: A dict containing settings keyed by setting name.
- raises: HAIncompleteConfig if settings are missing or incorrect.
- '''
- settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname',
- 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname']
- conf = {}
- for setting in settings:
- if exclude_keys and setting in exclude_keys:
- continue
-
- conf[setting] = config_get(setting)
-
- if not valid_hacluster_config():
- raise HAIncorrectConfig('Insufficient or incorrect config data to '
- 'configure hacluster.')
- return conf
-
-
-def valid_hacluster_config():
- '''
- Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname
- must be set.
-
- Note: ha-bindiface and ha-macastport both have defaults and will always
- be set. We only care that either vip or dns-ha is set.
-
- :returns: boolean: valid config returns true.
- raises: HAIncompatibileConfig if settings conflict.
- raises: HAIncompleteConfig if settings are missing.
- '''
- vip = config_get('vip')
- dns = config_get('dns-ha')
- if not(bool(vip) ^ bool(dns)):
- msg = ('HA: Either vip or dns-ha must be set but not both in order to '
- 'use high availability')
- status_set('blocked', msg)
- raise HAIncorrectConfig(msg)
-
- # If dns-ha then one of os-*-hostname must be set
- if dns:
- dns_settings = ['os-internal-hostname', 'os-admin-hostname',
- 'os-public-hostname', 'os-access-hostname']
- # At this point it is unknown if one or all of the possible
- # network spaces are in HA. Validate at least one is set which is
- # the minimum required.
- for setting in dns_settings:
- if config_get(setting):
- log('DNS HA: At least one hostname is set {}: {}'
- ''.format(setting, config_get(setting)),
- level=DEBUG)
- return True
-
- msg = ('DNS HA: At least one os-*-hostname(s) must be set to use '
- 'DNS HA')
- status_set('blocked', msg)
- raise HAIncompleteConfig(msg)
-
- log('VIP HA: VIP is set {}'.format(vip), level=DEBUG)
- return True
-
-
-def canonical_url(configs, vip_setting='vip'):
- '''
- Returns the correct HTTP URL to this host given the state of HTTPS
- configuration and hacluster.
-
- :configs : OSTemplateRenderer: A config tempating object to inspect for
- a complete https context.
-
- :vip_setting: str: Setting in charm config that specifies
- VIP address.
- '''
- scheme = 'http'
- if 'https' in configs.complete_contexts():
- scheme = 'https'
- if is_clustered():
- addr = config_get(vip_setting)
- else:
- addr = unit_get('private-address')
- return '%s://%s' % (scheme, addr)
-
-
-def distributed_wait(modulo=None, wait=None, operation_name='operation'):
- ''' Distribute operations by waiting based on modulo_distribution
-
- If modulo and or wait are not set, check config_get for those values.
- If config values are not set, default to modulo=3 and wait=30.
-
- :param modulo: int The modulo number creates the group distribution
- :param wait: int The constant time wait value
- :param operation_name: string Operation name for status message
- i.e. 'restart'
- :side effect: Calls config_get()
- :side effect: Calls log()
- :side effect: Calls status_set()
- :side effect: Calls time.sleep()
- '''
- if modulo is None:
- modulo = config_get('modulo-nodes') or 3
- if wait is None:
- wait = config_get('known-wait') or 30
- if juju_is_leader():
- # The leader should never wait
- calculated_wait = 0
- else:
- # non_zero_wait=True guarantees the non-leader who gets modulo 0
- # will still wait
- calculated_wait = modulo_distribution(modulo=modulo, wait=wait,
- non_zero_wait=True)
- msg = "Waiting {} seconds for {} ...".format(calculated_wait,
- operation_name)
- log(msg, DEBUG)
- status_set('maintenance', msg)
- time.sleep(calculated_wait)
diff --git a/hooks/charmhelpers/contrib/network/__init__.py b/hooks/charmhelpers/contrib/network/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/hooks/charmhelpers/contrib/network/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py
deleted file mode 100644
index b13277b..0000000
--- a/hooks/charmhelpers/contrib/network/ip.py
+++ /dev/null
@@ -1,602 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import glob
-import re
-import subprocess
-import six
-import socket
-
-from functools import partial
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import (
- config,
- log,
- network_get_primary_address,
- unit_get,
- WARNING,
- NoNetworkBinding,
-)
-
-from charmhelpers.core.host import (
- lsb_release,
- CompareHostReleases,
-)
-
-try:
- import netifaces
-except ImportError:
- apt_update(fatal=True)
- if six.PY2:
- apt_install('python-netifaces', fatal=True)
- else:
- apt_install('python3-netifaces', fatal=True)
- import netifaces
-
-try:
- import netaddr
-except ImportError:
- apt_update(fatal=True)
- if six.PY2:
- apt_install('python-netaddr', fatal=True)
- else:
- apt_install('python3-netaddr', fatal=True)
- import netaddr
-
-
-def _validate_cidr(network):
- try:
- netaddr.IPNetwork(network)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Network (%s) is not in CIDR presentation format" %
- network)
-
-
-def no_ip_found_error_out(network):
- errmsg = ("No IP address found in network(s): %s" % network)
- raise ValueError(errmsg)
-
-
-def _get_ipv6_network_from_address(address):
- """Get an netaddr.IPNetwork for the given IPv6 address
- :param address: a dict as returned by netifaces.ifaddresses
- :returns netaddr.IPNetwork: None if the address is a link local or loopback
- address
- """
- if address['addr'].startswith('fe80') or address['addr'] == "::1":
- return None
-
- prefix = address['netmask'].split("/")
- if len(prefix) > 1:
- netmask = prefix[1]
- else:
- netmask = address['netmask']
- return netaddr.IPNetwork("%s/%s" % (address['addr'],
- netmask))
-
-
-def get_address_in_network(network, fallback=None, fatal=False):
- """Get an IPv4 or IPv6 address within the network from the host.
-
- :param network (str): CIDR presentation format. For example,
- '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
- :param fallback (str): If no address is found, return fallback.
- :param fatal (boolean): If no address is found, fallback is not
- set and fatal is True then exit(1).
- """
- if network is None:
- if fallback is not None:
- return fallback
-
- if fatal:
- no_ip_found_error_out(network)
- else:
- return None
-
- networks = network.split() or [network]
- for network in networks:
- _validate_cidr(network)
- network = netaddr.IPNetwork(network)
- for iface in netifaces.interfaces():
- try:
- addresses = netifaces.ifaddresses(iface)
- except ValueError:
- # If an instance was deleted between
- # netifaces.interfaces() run and now, its interfaces are gone
- continue
- if network.version == 4 and netifaces.AF_INET in addresses:
- for addr in addresses[netifaces.AF_INET]:
- cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
- addr['netmask']))
- if cidr in network:
- return str(cidr.ip)
-
- if network.version == 6 and netifaces.AF_INET6 in addresses:
- for addr in addresses[netifaces.AF_INET6]:
- cidr = _get_ipv6_network_from_address(addr)
- if cidr and cidr in network:
- return str(cidr.ip)
-
- if fallback is not None:
- return fallback
-
- if fatal:
- no_ip_found_error_out(network)
-
- return None
-
-
-def is_ipv6(address):
- """Determine whether provided address is IPv6 or not."""
- try:
- address = netaddr.IPAddress(address)
- except netaddr.AddrFormatError:
- # probably a hostname - so not an address at all!
- return False
-
- return address.version == 6
-
-
-def is_address_in_network(network, address):
- """
- Determine whether the provided address is within a network range.
-
- :param network (str): CIDR presentation format. For example,
- '192.168.1.0/24'.
- :param address: An individual IPv4 or IPv6 address without a net
- mask or subnet prefix. For example, '192.168.1.1'.
- :returns boolean: Flag indicating whether address is in network.
- """
- try:
- network = netaddr.IPNetwork(network)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Network (%s) is not in CIDR presentation format" %
- network)
-
- try:
- address = netaddr.IPAddress(address)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Address (%s) is not in correct presentation format" %
- address)
-
- if address in network:
- return True
- else:
- return False
-
-
-def _get_for_address(address, key):
- """Retrieve an attribute of or the physical interface that
- the IP address provided could be bound to.
-
- :param address (str): An individual IPv4 or IPv6 address without a net
- mask or subnet prefix. For example, '192.168.1.1'.
- :param key: 'iface' for the physical interface name or an attribute
- of the configured interface, for example 'netmask'.
- :returns str: Requested attribute or None if address is not bindable.
- """
- address = netaddr.IPAddress(address)
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- if address.version == 4 and netifaces.AF_INET in addresses:
- addr = addresses[netifaces.AF_INET][0]['addr']
- netmask = addresses[netifaces.AF_INET][0]['netmask']
- network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
- cidr = network.cidr
- if address in cidr:
- if key == 'iface':
- return iface
- else:
- return addresses[netifaces.AF_INET][0][key]
-
- if address.version == 6 and netifaces.AF_INET6 in addresses:
- for addr in addresses[netifaces.AF_INET6]:
- network = _get_ipv6_network_from_address(addr)
- if not network:
- continue
-
- cidr = network.cidr
- if address in cidr:
- if key == 'iface':
- return iface
- elif key == 'netmask' and cidr:
- return str(cidr).split('/')[1]
- else:
- return addr[key]
- return None
-
-
-get_iface_for_address = partial(_get_for_address, key='iface')
-
-
-get_netmask_for_address = partial(_get_for_address, key='netmask')
-
-
-def resolve_network_cidr(ip_address):
- '''
- Resolves the full address cidr of an ip_address based on
- configured network interfaces
- '''
- netmask = get_netmask_for_address(ip_address)
- return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
-
-
-def format_ipv6_addr(address):
- """If address is IPv6, wrap it in '[]' otherwise return None.
-
- This is required by most configuration files when specifying IPv6
- addresses.
- """
- if is_ipv6(address):
- return "[%s]" % address
-
- return None
-
-
-def is_ipv6_disabled():
- try:
- result = subprocess.check_output(
- ['sysctl', 'net.ipv6.conf.all.disable_ipv6'],
- stderr=subprocess.STDOUT,
- universal_newlines=True)
- except subprocess.CalledProcessError:
- return True
-
- return "net.ipv6.conf.all.disable_ipv6 = 1" in result
-
-
-def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
- fatal=True, exc_list=None):
- """Return the assigned IP address for a given interface, if any.
-
- :param iface: network interface on which address(es) are expected to
- be found.
- :param inet_type: inet address family
- :param inc_aliases: include alias interfaces in search
- :param fatal: if True, raise exception if address not found
- :param exc_list: list of addresses to ignore
- :return: list of ip addresses
- """
- # Extract nic if passed /dev/ethX
- if '/' in iface:
- iface = iface.split('/')[-1]
-
- if not exc_list:
- exc_list = []
-
- try:
- inet_num = getattr(netifaces, inet_type)
- except AttributeError:
- raise Exception("Unknown inet type '%s'" % str(inet_type))
-
- interfaces = netifaces.interfaces()
- if inc_aliases:
- ifaces = []
- for _iface in interfaces:
- if iface == _iface or _iface.split(':')[0] == iface:
- ifaces.append(_iface)
-
- if fatal and not ifaces:
- raise Exception("Invalid interface '%s'" % iface)
-
- ifaces.sort()
- else:
- if iface not in interfaces:
- if fatal:
- raise Exception("Interface '%s' not found " % (iface))
- else:
- return []
-
- else:
- ifaces = [iface]
-
- addresses = []
- for netiface in ifaces:
- net_info = netifaces.ifaddresses(netiface)
- if inet_num in net_info:
- for entry in net_info[inet_num]:
- if 'addr' in entry and entry['addr'] not in exc_list:
- addresses.append(entry['addr'])
-
- if fatal and not addresses:
- raise Exception("Interface '%s' doesn't have any %s addresses." %
- (iface, inet_type))
-
- return sorted(addresses)
-
-
-get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
-
-
-def get_iface_from_addr(addr):
- """Work out on which interface the provided address is configured."""
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- for inet_type in addresses:
- for _addr in addresses[inet_type]:
- _addr = _addr['addr']
- # link local
- ll_key = re.compile("(.+)%.*")
- raw = re.match(ll_key, _addr)
- if raw:
- _addr = raw.group(1)
-
- if _addr == addr:
- log("Address '%s' is configured on iface '%s'" %
- (addr, iface))
- return iface
-
- msg = "Unable to infer net iface on which '%s' is configured" % (addr)
- raise Exception(msg)
-
-
-def sniff_iface(f):
- """Ensure decorated function is called with a value for iface.
-
- If no iface provided, inject net iface inferred from unit private address.
- """
- def iface_sniffer(*args, **kwargs):
- if not kwargs.get('iface', None):
- kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
-
- return f(*args, **kwargs)
-
- return iface_sniffer
-
-
-@sniff_iface
-def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
- dynamic_only=True):
- """Get assigned IPv6 address for a given interface.
-
- Returns list of addresses found. If no address found, returns empty list.
-
- If iface is None, we infer the current primary interface by doing a reverse
- lookup on the unit private-address.
-
- We currently only support scope global IPv6 addresses i.e. non-temporary
- addresses. If no global IPv6 address is found, return the first one found
- in the ipv6 address list.
-
- :param iface: network interface on which ipv6 address(es) are expected to
- be found.
- :param inc_aliases: include alias interfaces in search
- :param fatal: if True, raise exception if address not found
- :param exc_list: list of addresses to ignore
- :param dynamic_only: only recognise dynamic addresses
- :return: list of ipv6 addresses
- """
- addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
- inc_aliases=inc_aliases, fatal=fatal,
- exc_list=exc_list)
-
- if addresses:
- global_addrs = []
- for addr in addresses:
- key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
- m = re.match(key_scope_link_local, addr)
- if m:
- eui_64_mac = m.group(1)
- iface = m.group(2)
- else:
- global_addrs.append(addr)
-
- if global_addrs:
- # Make sure any found global addresses are not temporary
- cmd = ['ip', 'addr', 'show', iface]
- out = subprocess.check_output(cmd).decode('UTF-8')
- if dynamic_only:
- key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*")
- else:
- key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
-
- addrs = []
- for line in out.split('\n'):
- line = line.strip()
- m = re.match(key, line)
- if m and 'temporary' not in line:
- # Return the first valid address we find
- for addr in global_addrs:
- if m.group(1) == addr:
- if not dynamic_only or \
- m.group(1).endswith(eui_64_mac):
- addrs.append(addr)
-
- if addrs:
- return addrs
-
- if fatal:
- raise Exception("Interface '%s' does not have a scope global "
- "non-temporary ipv6 address." % iface)
-
- return []
-
-
-def get_bridges(vnic_dir='/sys/devices/virtual/net'):
- """Return a list of bridges on the system."""
- b_regex = "%s/*/bridge" % vnic_dir
- return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
-
-
-def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
- """Return a list of nics comprising a given bridge on the system."""
- brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
- return [x.split('/')[-1] for x in glob.glob(brif_regex)]
-
-
-def is_bridge_member(nic):
- """Check if a given nic is a member of a bridge."""
- for bridge in get_bridges():
- if nic in get_bridge_nics(bridge):
- return True
-
- return False
-
-
-def is_ip(address):
- """
- Returns True if address is a valid IP address.
- """
- try:
- # Test to see if already an IPv4/IPv6 address
- address = netaddr.IPAddress(address)
- return True
- except (netaddr.AddrFormatError, ValueError):
- return False
-
-
-def ns_query(address):
- try:
- import dns.resolver
- except ImportError:
- if six.PY2:
- apt_install('python-dnspython', fatal=True)
- else:
- apt_install('python3-dnspython', fatal=True)
- import dns.resolver
-
- if isinstance(address, dns.name.Name):
- rtype = 'PTR'
- elif isinstance(address, six.string_types):
- rtype = 'A'
- else:
- return None
-
- try:
- answers = dns.resolver.query(address, rtype)
- except dns.resolver.NXDOMAIN:
- return None
-
- if answers:
- return str(answers[0])
- return None
-
-
-def get_host_ip(hostname, fallback=None):
- """
- Resolves the IP for a given hostname, or returns
- the input if it is already an IP.
- """
- if is_ip(hostname):
- return hostname
-
- ip_addr = ns_query(hostname)
- if not ip_addr:
- try:
- ip_addr = socket.gethostbyname(hostname)
- except Exception:
- log("Failed to resolve hostname '%s'" % (hostname),
- level=WARNING)
- return fallback
- return ip_addr
-
-
-def get_hostname(address, fqdn=True):
- """
- Resolves hostname for given IP, or returns the input
- if it is already a hostname.
- """
- if is_ip(address):
- try:
- import dns.reversename
- except ImportError:
- if six.PY2:
- apt_install("python-dnspython", fatal=True)
- else:
- apt_install("python3-dnspython", fatal=True)
- import dns.reversename
-
- rev = dns.reversename.from_address(address)
- result = ns_query(rev)
-
- if not result:
- try:
- result = socket.gethostbyaddr(address)[0]
- except Exception:
- return None
- else:
- result = address
-
- if fqdn:
- # strip trailing .
- if result.endswith('.'):
- return result[:-1]
- else:
- return result
- else:
- return result.split('.')[0]
-
-
-def port_has_listener(address, port):
- """
- Returns True if the address:port is open and being listened to,
- else False.
-
- @param address: an IP address or hostname
- @param port: integer port
-
- Note calls 'zc' via a subprocess shell
- """
- cmd = ['nc', '-z', address, str(port)]
- result = subprocess.call(cmd)
- return not(bool(result))
-
-
-def assert_charm_supports_ipv6():
- """Check whether we are able to support charms ipv6."""
- release = lsb_release()['DISTRIB_CODENAME'].lower()
- if CompareHostReleases(release) < "trusty":
- raise Exception("IPv6 is not supported in the charms for Ubuntu "
- "versions less than Trusty 14.04")
-
-
-def get_relation_ip(interface, cidr_network=None):
- """Return this unit's IP for the given interface.
-
- Allow for an arbitrary interface to use with network-get to select an IP.
- Handle all address selection options including passed cidr network and
- IPv6.
-
- Usage: get_relation_ip('amqp', cidr_network='10.0.0.0/8')
-
- @param interface: string name of the relation.
- @param cidr_network: string CIDR Network to select an address from.
- @raises Exception if prefer-ipv6 is configured but IPv6 unsupported.
- @returns IPv6 or IPv4 address
- """
- # Select the interface address first
- # For possible use as a fallback bellow with get_address_in_network
- try:
- # Get the interface specific IP
- address = network_get_primary_address(interface)
- except NotImplementedError:
- # If network-get is not available
- address = get_host_ip(unit_get('private-address'))
- except NoNetworkBinding:
- log("No network binding for {}".format(interface), WARNING)
- address = get_host_ip(unit_get('private-address'))
-
- if config('prefer-ipv6'):
- # Currently IPv6 has priority, eventually we want IPv6 to just be
- # another network space.
- assert_charm_supports_ipv6()
- return get_ipv6_addr()[0]
- elif cidr_network:
- # If a specific CIDR network is passed get the address from that
- # network.
- return get_address_in_network(cidr_network, address)
-
- # Return the interface address
- return address
diff --git a/hooks/charmhelpers/contrib/network/ovs/__init__.py b/hooks/charmhelpers/contrib/network/ovs/__init__.py
deleted file mode 100644
index a8856e9..0000000
--- a/hooks/charmhelpers/contrib/network/ovs/__init__.py
+++ /dev/null
@@ -1,249 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-''' Helpers for interacting with OpenvSwitch '''
-import hashlib
-import subprocess
-import os
-import six
-
-from charmhelpers.fetch import apt_install
-
-
-from charmhelpers.core.hookenv import (
- log, WARNING, INFO, DEBUG
-)
-from charmhelpers.core.host import (
- service
-)
-
-BRIDGE_TEMPLATE = """\
-# This veth pair is required when neutron data-port is mapped to an existing linux bridge. lp:1635067
-
-auto {linuxbridge_port}
-iface {linuxbridge_port} inet manual
- pre-up ip link add name {linuxbridge_port} type veth peer name {ovsbridge_port}
- pre-up ip link set {ovsbridge_port} master {bridge}
- pre-up ip link set {ovsbridge_port} up
- up ip link set {linuxbridge_port} up
- down ip link del {linuxbridge_port}
-"""
-
-MAX_KERNEL_INTERFACE_NAME_LEN = 15
-
-
-def add_bridge(name, datapath_type=None):
- ''' Add the named bridge to openvswitch '''
- log('Creating bridge {}'.format(name))
- cmd = ["ovs-vsctl", "--", "--may-exist", "add-br", name]
- if datapath_type is not None:
- cmd += ['--', 'set', 'bridge', name,
- 'datapath_type={}'.format(datapath_type)]
- subprocess.check_call(cmd)
-
-
-def del_bridge(name):
- ''' Delete the named bridge from openvswitch '''
- log('Deleting bridge {}'.format(name))
- subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
-
-
-def add_bridge_port(name, port, promisc=False):
- ''' Add a port to the named openvswitch bridge '''
- log('Adding port {} to bridge {}'.format(port, name))
- subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
- name, port])
- subprocess.check_call(["ip", "link", "set", port, "up"])
- if promisc:
- subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
- else:
- subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
-
-
-def del_bridge_port(name, port):
- ''' Delete a port from the named openvswitch bridge '''
- log('Deleting port {} from bridge {}'.format(port, name))
- subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
- name, port])
- subprocess.check_call(["ip", "link", "set", port, "down"])
- subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
-
-
-def add_ovsbridge_linuxbridge(name, bridge):
- ''' Add linux bridge to the named openvswitch bridge
- :param name: Name of ovs bridge to be added to Linux bridge
- :param bridge: Name of Linux bridge to be added to ovs bridge
- :returns: True if veth is added between ovs bridge and linux bridge,
- False otherwise'''
- try:
- import netifaces
- except ImportError:
- if six.PY2:
- apt_install('python-netifaces', fatal=True)
- else:
- apt_install('python3-netifaces', fatal=True)
- import netifaces
-
- # NOTE(jamespage):
- # Older code supported addition of a linuxbridge directly
- # to an OVS bridge; ensure we don't break uses on upgrade
- existing_ovs_bridge = port_to_br(bridge)
- if existing_ovs_bridge is not None:
- log('Linuxbridge {} is already directly in use'
- ' by OVS bridge {}'.format(bridge, existing_ovs_bridge),
- level=INFO)
- return
-
- # NOTE(jamespage):
- # preserve existing naming because interfaces may already exist.
- ovsbridge_port = "veth-" + name
- linuxbridge_port = "veth-" + bridge
- if (len(ovsbridge_port) > MAX_KERNEL_INTERFACE_NAME_LEN or
- len(linuxbridge_port) > MAX_KERNEL_INTERFACE_NAME_LEN):
- # NOTE(jamespage):
- # use parts of hashed bridgename (openstack style) when
- # a bridge name exceeds 15 chars
- hashed_bridge = hashlib.sha256(bridge.encode('UTF-8')).hexdigest()
- base = '{}-{}'.format(hashed_bridge[:8], hashed_bridge[-2:])
- ovsbridge_port = "cvo{}".format(base)
- linuxbridge_port = "cvb{}".format(base)
-
- interfaces = netifaces.interfaces()
- for interface in interfaces:
- if interface == ovsbridge_port or interface == linuxbridge_port:
- log('Interface {} already exists'.format(interface), level=INFO)
- return
-
- log('Adding linuxbridge {} to ovsbridge {}'.format(bridge, name),
- level=INFO)
-
- check_for_eni_source()
-
- with open('/etc/network/interfaces.d/{}.cfg'.format(
- linuxbridge_port), 'w') as config:
- config.write(BRIDGE_TEMPLATE.format(linuxbridge_port=linuxbridge_port,
- ovsbridge_port=ovsbridge_port,
- bridge=bridge))
-
- subprocess.check_call(["ifup", linuxbridge_port])
- add_bridge_port(name, linuxbridge_port)
-
-
-def is_linuxbridge_interface(port):
- ''' Check if the interface is a linuxbridge bridge
- :param port: Name of an interface to check whether it is a Linux bridge
- :returns: True if port is a Linux bridge'''
-
- if os.path.exists('/sys/class/net/' + port + '/bridge'):
- log('Interface {} is a Linux bridge'.format(port), level=DEBUG)
- return True
- else:
- log('Interface {} is not a Linux bridge'.format(port), level=DEBUG)
- return False
-
-
-def set_manager(manager):
- ''' Set the controller for the local openvswitch '''
- log('Setting manager for local ovs to {}'.format(manager))
- subprocess.check_call(['ovs-vsctl', 'set-manager',
- 'ssl:{}'.format(manager)])
-
-
-def set_Open_vSwitch_column_value(column_value):
- """
- Calls ovs-vsctl and sets the 'column_value' in the Open_vSwitch table.
-
- :param column_value:
- See http://www.openvswitch.org//ovs-vswitchd.conf.db.5.pdf for
- details of the relevant values.
- :type str
- :raises CalledProcessException: possibly ovsdb-server is not running
- """
- log('Setting {} in the Open_vSwitch table'.format(column_value))
- subprocess.check_call(['ovs-vsctl', 'set', 'Open_vSwitch', '.', column_value])
-
-
-CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
-
-
-def get_certificate():
- ''' Read openvswitch certificate from disk '''
- if os.path.exists(CERT_PATH):
- log('Reading ovs certificate from {}'.format(CERT_PATH))
- with open(CERT_PATH, 'r') as cert:
- full_cert = cert.read()
- begin_marker = "-----BEGIN CERTIFICATE-----"
- end_marker = "-----END CERTIFICATE-----"
- begin_index = full_cert.find(begin_marker)
- end_index = full_cert.rfind(end_marker)
- if end_index == -1 or begin_index == -1:
- raise RuntimeError("Certificate does not contain valid begin"
- " and end markers.")
- full_cert = full_cert[begin_index:(end_index + len(end_marker))]
- return full_cert
- else:
- log('Certificate not found', level=WARNING)
- return None
-
-
-def check_for_eni_source():
- ''' Juju removes the source line when setting up interfaces,
- replace if missing '''
-
- with open('/etc/network/interfaces', 'r') as eni:
- for line in eni:
- if line == 'source /etc/network/interfaces.d/*':
- return
- with open('/etc/network/interfaces', 'a') as eni:
- eni.write('\nsource /etc/network/interfaces.d/*')
-
-
-def full_restart():
- ''' Full restart and reload of openvswitch '''
- if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
- service('start', 'openvswitch-force-reload-kmod')
- else:
- service('force-reload-kmod', 'openvswitch-switch')
-
-
-def enable_ipfix(bridge, target):
- '''Enable IPfix on bridge to target.
- :param bridge: Bridge to monitor
- :param target: IPfix remote endpoint
- '''
- cmd = ['ovs-vsctl', 'set', 'Bridge', bridge, 'ipfix=@i', '--',
- '--id=@i', 'create', 'IPFIX', 'targets="{}"'.format(target)]
- log('Enabling IPfix on {}.'.format(bridge))
- subprocess.check_call(cmd)
-
-
-def disable_ipfix(bridge):
- '''Diable IPfix on target bridge.
- :param bridge: Bridge to modify
- '''
- cmd = ['ovs-vsctl', 'clear', 'Bridge', bridge, 'ipfix']
- subprocess.check_call(cmd)
-
-
-def port_to_br(port):
- '''Determine the bridge that contains a port
- :param port: Name of port to check for
- :returns str: OVS bridge containing port or None if not found
- '''
- try:
- return subprocess.check_output(
- ['ovs-vsctl', 'port-to-br', port]
- ).decode('UTF-8').strip()
- except subprocess.CalledProcessError:
- return None
diff --git a/hooks/charmhelpers/contrib/network/ufw.py b/hooks/charmhelpers/contrib/network/ufw.py
deleted file mode 100644
index 5db622f..0000000
--- a/hooks/charmhelpers/contrib/network/ufw.py
+++ /dev/null
@@ -1,339 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-This module contains helpers to add and remove ufw rules.
-
-Examples:
-
-- open SSH port for subnet 10.0.3.0/24:
-
- >>> from charmhelpers.contrib.network import ufw
- >>> ufw.enable()
- >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp')
-
-- open service by name as defined in /etc/services:
-
- >>> from charmhelpers.contrib.network import ufw
- >>> ufw.enable()
- >>> ufw.service('ssh', 'open')
-
-- close service by port number:
-
- >>> from charmhelpers.contrib.network import ufw
- >>> ufw.enable()
- >>> ufw.service('4949', 'close') # munin
-"""
-import re
-import os
-import subprocess
-
-from charmhelpers.core import hookenv
-from charmhelpers.core.kernel import modprobe, is_module_loaded
-
-__author__ = "Felipe Reyes "
-
-
-class UFWError(Exception):
- pass
-
-
-class UFWIPv6Error(UFWError):
- pass
-
-
-def is_enabled():
- """
- Check if `ufw` is enabled
-
- :returns: True if ufw is enabled
- """
- output = subprocess.check_output(['ufw', 'status'],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
-
- m = re.findall(r'^Status: active\n', output, re.M)
-
- return len(m) >= 1
-
-
-def is_ipv6_ok(soft_fail=False):
- """
- Check if IPv6 support is present and ip6tables functional
-
- :param soft_fail: If set to True and IPv6 support is broken, then reports
- that the host doesn't have IPv6 support, otherwise a
- UFWIPv6Error exception is raised.
- :returns: True if IPv6 is working, False otherwise
- """
-
- # do we have IPv6 in the machine?
- if os.path.isdir('/proc/sys/net/ipv6'):
- # is ip6tables kernel module loaded?
- if not is_module_loaded('ip6_tables'):
- # ip6tables support isn't complete, let's try to load it
- try:
- modprobe('ip6_tables')
- # great, we can load the module
- return True
- except subprocess.CalledProcessError as ex:
- hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
- level="WARN")
- # we are in a world where ip6tables isn't working
- if soft_fail:
- # so we inform that the machine doesn't have IPv6
- return False
- else:
- raise UFWIPv6Error("IPv6 firewall support broken")
- else:
- # the module is present :)
- return True
-
- else:
- # the system doesn't have IPv6
- return False
-
-
-def disable_ipv6():
- """
- Disable ufw IPv6 support in /etc/default/ufw
- """
- exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g',
- '/etc/default/ufw'])
- if exit_code == 0:
- hookenv.log('IPv6 support in ufw disabled', level='INFO')
- else:
- hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR")
- raise UFWError("Couldn't disable IPv6 support in ufw")
-
-
-def enable(soft_fail=False):
- """
- Enable ufw
-
- :param soft_fail: If set to True silently disables IPv6 support in ufw,
- otherwise a UFWIPv6Error exception is raised when IP6
- support is broken.
- :returns: True if ufw is successfully enabled
- """
- if is_enabled():
- return True
-
- if not is_ipv6_ok(soft_fail):
- disable_ipv6()
-
- output = subprocess.check_output(['ufw', 'enable'],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
-
- m = re.findall('^Firewall is active and enabled on system startup\n',
- output, re.M)
- hookenv.log(output, level='DEBUG')
-
- if len(m) == 0:
- hookenv.log("ufw couldn't be enabled", level='WARN')
- return False
- else:
- hookenv.log("ufw enabled", level='INFO')
- return True
-
-
-def reload():
- """
- Reload ufw
-
- :returns: True if ufw is successfully enabled
- """
- output = subprocess.check_output(['ufw', 'reload'],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
-
- m = re.findall('^Firewall reloaded\n',
- output, re.M)
- hookenv.log(output, level='DEBUG')
-
- if len(m) == 0:
- hookenv.log("ufw couldn't be reloaded", level='WARN')
- return False
- else:
- hookenv.log("ufw reloaded", level='INFO')
- return True
-
-
-def disable():
- """
- Disable ufw
-
- :returns: True if ufw is successfully disabled
- """
- if not is_enabled():
- return True
-
- output = subprocess.check_output(['ufw', 'disable'],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
-
- m = re.findall(r'^Firewall stopped and disabled on system startup\n',
- output, re.M)
- hookenv.log(output, level='DEBUG')
-
- if len(m) == 0:
- hookenv.log("ufw couldn't be disabled", level='WARN')
- return False
- else:
- hookenv.log("ufw disabled", level='INFO')
- return True
-
-
-def default_policy(policy='deny', direction='incoming'):
- """
- Changes the default policy for traffic `direction`
-
- :param policy: allow, deny or reject
- :param direction: traffic direction, possible values: incoming, outgoing,
- routed
- """
- if policy not in ['allow', 'deny', 'reject']:
- raise UFWError(('Unknown policy %s, valid values: '
- 'allow, deny, reject') % policy)
-
- if direction not in ['incoming', 'outgoing', 'routed']:
- raise UFWError(('Unknown direction %s, valid values: '
- 'incoming, outgoing, routed') % direction)
-
- output = subprocess.check_output(['ufw', 'default', policy, direction],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
- hookenv.log(output, level='DEBUG')
-
- m = re.findall("^Default %s policy changed to '%s'\n" % (direction,
- policy),
- output, re.M)
- if len(m) == 0:
- hookenv.log("ufw couldn't change the default policy to %s for %s"
- % (policy, direction), level='WARN')
- return False
- else:
- hookenv.log("ufw default policy for %s changed to %s"
- % (direction, policy), level='INFO')
- return True
-
-
-def modify_access(src, dst='any', port=None, proto=None, action='allow',
- index=None):
- """
- Grant access to an address or subnet
-
- :param src: address (e.g. 192.168.1.234) or subnet
- (e.g. 192.168.1.0/24).
- :param dst: destiny of the connection, if the machine has multiple IPs and
- connections to only one of those have to accepted this is the
- field has to be set.
- :param port: destiny port
- :param proto: protocol (tcp or udp)
- :param action: `allow` or `delete`
- :param index: if different from None the rule is inserted at the given
- `index`.
- """
- if not is_enabled():
- hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
- return
-
- if action == 'delete':
- cmd = ['ufw', 'delete', 'allow']
- elif index is not None:
- cmd = ['ufw', 'insert', str(index), action]
- else:
- cmd = ['ufw', action]
-
- if src is not None:
- cmd += ['from', src]
-
- if dst is not None:
- cmd += ['to', dst]
-
- if port is not None:
- cmd += ['port', str(port)]
-
- if proto is not None:
- cmd += ['proto', proto]
-
- hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
- (stdout, stderr) = p.communicate()
-
- hookenv.log(stdout, level='INFO')
-
- if p.returncode != 0:
- hookenv.log(stderr, level='ERROR')
- hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
- p.returncode),
- level='ERROR')
-
-
-def grant_access(src, dst='any', port=None, proto=None, index=None):
- """
- Grant access to an address or subnet
-
- :param src: address (e.g. 192.168.1.234) or subnet
- (e.g. 192.168.1.0/24).
- :param dst: destiny of the connection, if the machine has multiple IPs and
- connections to only one of those have to accepted this is the
- field has to be set.
- :param port: destiny port
- :param proto: protocol (tcp or udp)
- :param index: if different from None the rule is inserted at the given
- `index`.
- """
- return modify_access(src, dst=dst, port=port, proto=proto, action='allow',
- index=index)
-
-
-def revoke_access(src, dst='any', port=None, proto=None):
- """
- Revoke access to an address or subnet
-
- :param src: address (e.g. 192.168.1.234) or subnet
- (e.g. 192.168.1.0/24).
- :param dst: destiny of the connection, if the machine has multiple IPs and
- connections to only one of those have to accepted this is the
- field has to be set.
- :param port: destiny port
- :param proto: protocol (tcp or udp)
- """
- return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
-
-
-def service(name, action):
- """
- Open/close access to a service
-
- :param name: could be a service name defined in `/etc/services` or a port
- number.
- :param action: `open` or `close`
- """
- if action == 'open':
- subprocess.check_output(['ufw', 'allow', str(name)],
- universal_newlines=True)
- elif action == 'close':
- subprocess.check_output(['ufw', 'delete', 'allow', str(name)],
- universal_newlines=True)
- else:
- raise UFWError(("'{}' not supported, use 'allow' "
- "or 'delete'").format(action))
diff --git a/hooks/charmhelpers/contrib/openstack/__init__.py b/hooks/charmhelpers/contrib/openstack/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/hooks/charmhelpers/contrib/openstack/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/openstack/alternatives.py b/hooks/charmhelpers/contrib/openstack/alternatives.py
deleted file mode 100644
index 547de09..0000000
--- a/hooks/charmhelpers/contrib/openstack/alternatives.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-''' Helper for managing alternatives for file conflict resolution '''
-
-import subprocess
-import shutil
-import os
-
-
-def install_alternative(name, target, source, priority=50):
- ''' Install alternative configuration '''
- if (os.path.exists(target) and not os.path.islink(target)):
- # Move existing file/directory away before installing
- shutil.move(target, '{}.bak'.format(target))
- cmd = [
- 'update-alternatives', '--force', '--install',
- target, name, source, str(priority)
- ]
- subprocess.check_call(cmd)
-
-
-def remove_alternative(name, source):
- """Remove an installed alternative configuration file
-
- :param name: string name of the alternative to remove
- :param source: string full path to alternative to remove
- """
- cmd = [
- 'update-alternatives', '--remove',
- name, source
- ]
- subprocess.check_call(cmd)
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/hooks/charmhelpers/contrib/openstack/amulet/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/hooks/charmhelpers/contrib/openstack/amulet/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
deleted file mode 100644
index 8e57467..0000000
--- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
+++ /dev/null
@@ -1,361 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import os
-import re
-import sys
-import six
-from collections import OrderedDict
-from charmhelpers.contrib.amulet.deployment import (
- AmuletDeployment
-)
-from charmhelpers.contrib.openstack.amulet.utils import (
- OPENSTACK_RELEASES_PAIRS
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-
-class OpenStackAmuletDeployment(AmuletDeployment):
- """OpenStack amulet deployment.
-
- This class inherits from AmuletDeployment and has additional support
- that is specifically for use by OpenStack charms.
- """
-
- def __init__(self, series=None, openstack=None, source=None,
- stable=True, log_level=DEBUG):
- """Initialize the deployment environment."""
- super(OpenStackAmuletDeployment, self).__init__(series)
- self.log = self.get_logger(level=log_level)
- self.log.info('OpenStackAmuletDeployment: init')
- self.openstack = openstack
- self.source = source
- self.stable = stable
-
- def get_logger(self, name="deployment-logger", level=logging.DEBUG):
- """Get a logger object that will log to stdout."""
- log = logging
- logger = log.getLogger(name)
- fmt = log.Formatter("%(asctime)s %(funcName)s "
- "%(levelname)s: %(message)s")
-
- handler = log.StreamHandler(stream=sys.stdout)
- handler.setLevel(level)
- handler.setFormatter(fmt)
-
- logger.addHandler(handler)
- logger.setLevel(level)
-
- return logger
-
- def _determine_branch_locations(self, other_services):
- """Determine the branch locations for the other services.
-
- Determine if the local branch being tested is derived from its
- stable or next (dev) branch, and based on this, use the corresonding
- stable or next branches for the other_services."""
-
- self.log.info('OpenStackAmuletDeployment: determine branch locations')
-
- # Charms outside the ~openstack-charmers
- base_charms = {
- 'mysql': ['trusty'],
- 'mongodb': ['trusty'],
- 'nrpe': ['trusty', 'xenial'],
- }
-
- for svc in other_services:
- # If a location has been explicitly set, use it
- if svc.get('location'):
- continue
- if svc['name'] in base_charms:
- # NOTE: not all charms have support for all series we
- # want/need to test against, so fix to most recent
- # that each base charm supports
- target_series = self.series
- if self.series not in base_charms[svc['name']]:
- target_series = base_charms[svc['name']][-1]
- svc['location'] = 'cs:{}/{}'.format(target_series,
- svc['name'])
- elif self.stable:
- svc['location'] = 'cs:{}/{}'.format(self.series,
- svc['name'])
- else:
- svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
- self.series,
- svc['name']
- )
-
- return other_services
-
- def _add_services(self, this_service, other_services, use_source=None,
- no_origin=None):
- """Add services to the deployment and optionally set
- openstack-origin/source.
-
- :param this_service dict: Service dictionary describing the service
- whose amulet tests are being run
- :param other_services dict: List of service dictionaries describing
- the services needed to support the target
- service
- :param use_source list: List of services which use the 'source' config
- option rather than 'openstack-origin'
- :param no_origin list: List of services which do not support setting
- the Cloud Archive.
- Service Dict:
- {
- 'name': str charm-name,
- 'units': int number of units,
- 'constraints': dict of juju constraints,
- 'location': str location of charm,
- }
- eg
- this_service = {
- 'name': 'openvswitch-odl',
- 'constraints': {'mem': '8G'},
- }
- other_services = [
- {
- 'name': 'nova-compute',
- 'units': 2,
- 'constraints': {'mem': '4G'},
- 'location': cs:~bob/xenial/nova-compute
- },
- {
- 'name': 'mysql',
- 'constraints': {'mem': '2G'},
- },
- {'neutron-api-odl'}]
- use_source = ['mysql']
- no_origin = ['neutron-api-odl']
- """
- self.log.info('OpenStackAmuletDeployment: adding services')
-
- other_services = self._determine_branch_locations(other_services)
-
- super(OpenStackAmuletDeployment, self)._add_services(this_service,
- other_services)
-
- services = other_services
- services.append(this_service)
-
- use_source = use_source or []
- no_origin = no_origin or []
-
- # Charms which should use the source config option
- use_source = list(set(
- use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
- 'ceph-osd', 'ceph-radosgw', 'ceph-mon',
- 'ceph-proxy', 'percona-cluster', 'lxd']))
-
- # Charms which can not use openstack-origin, ie. many subordinates
- no_origin = list(set(
- no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch',
- 'nrpe', 'openvswitch-odl', 'neutron-api-odl',
- 'odl-controller', 'cinder-backup', 'nexentaedge-data',
- 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
- 'cinder-nexentaedge', 'nexentaedge-mgmt',
- 'ceilometer-agent']))
-
- if self.openstack:
- for svc in services:
- if svc['name'] not in use_source + no_origin:
- config = {'openstack-origin': self.openstack}
- self.d.configure(svc['name'], config)
-
- if self.source:
- for svc in services:
- if svc['name'] in use_source and svc['name'] not in no_origin:
- config = {'source': self.source}
- self.d.configure(svc['name'], config)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- self.log.info('OpenStackAmuletDeployment: configure services')
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _auto_wait_for_status(self, message=None, exclude_services=None,
- include_only=None, timeout=None):
- """Wait for all units to have a specific extended status, except
- for any defined as excluded. Unless specified via message, any
- status containing any case of 'ready' will be considered a match.
-
- Examples of message usage:
-
- Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
- message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
-
- Wait for all units to reach this status (exact match):
- message = re.compile('^Unit is ready and clustered$')
-
- Wait for all units to reach any one of these (exact match):
- message = re.compile('Unit is ready|OK|Ready')
-
- Wait for at least one unit to reach this status (exact match):
- message = {'ready'}
-
- See Amulet's sentry.wait_for_messages() for message usage detail.
- https://github.com/juju/amulet/blob/master/amulet/sentry.py
-
- :param message: Expected status match
- :param exclude_services: List of juju service names to ignore,
- not to be used in conjuction with include_only.
- :param include_only: List of juju service names to exclusively check,
- not to be used in conjuction with exclude_services.
- :param timeout: Maximum time in seconds to wait for status match
- :returns: None. Raises if timeout is hit.
- """
- if not timeout:
- timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
- self.log.info('Waiting for extended status on units for {}s...'
- ''.format(timeout))
-
- all_services = self.d.services.keys()
-
- if exclude_services and include_only:
- raise ValueError('exclude_services can not be used '
- 'with include_only')
-
- if message:
- if isinstance(message, re._pattern_type):
- match = message.pattern
- else:
- match = message
-
- self.log.debug('Custom extended status wait match: '
- '{}'.format(match))
- else:
- self.log.debug('Default extended status wait match: contains '
- 'READY (case-insensitive)')
- message = re.compile('.*ready.*', re.IGNORECASE)
-
- if exclude_services:
- self.log.debug('Excluding services from extended status match: '
- '{}'.format(exclude_services))
- else:
- exclude_services = []
-
- if include_only:
- services = include_only
- else:
- services = list(set(all_services) - set(exclude_services))
-
- self.log.debug('Waiting up to {}s for extended status on services: '
- '{}'.format(timeout, services))
- service_messages = {service: message for service in services}
-
- # Check for idleness
- self.d.sentry.wait(timeout=timeout)
- # Check for error states and bail early
- self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
- # Check for ready messages
- self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
-
- self.log.info('OK')
-
- def _get_openstack_release(self):
- """Get openstack release.
-
- Return an integer representing the enum value of the openstack
- release.
- """
- # Must be ordered by OpenStack release (not by Ubuntu release):
- for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
- setattr(self, os_pair, i)
-
- releases = {
- ('trusty', None): self.trusty_icehouse,
- ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
- ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
- ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
- ('xenial', None): self.xenial_mitaka,
- ('xenial', 'cloud:xenial-newton'): self.xenial_newton,
- ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata,
- ('xenial', 'cloud:xenial-pike'): self.xenial_pike,
- ('xenial', 'cloud:xenial-queens'): self.xenial_queens,
- ('yakkety', None): self.yakkety_newton,
- ('zesty', None): self.zesty_ocata,
- ('artful', None): self.artful_pike,
- ('bionic', None): self.bionic_queens,
- ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
- ('bionic', 'cloud:bionic-stein'): self.bionic_stein,
- ('cosmic', None): self.cosmic_rocky,
- ('disco', None): self.disco_stein,
- }
- return releases[(self.series, self.openstack)]
-
- def _get_openstack_release_string(self):
- """Get openstack release string.
-
- Return a string representing the openstack release.
- """
- releases = OrderedDict([
- ('trusty', 'icehouse'),
- ('xenial', 'mitaka'),
- ('yakkety', 'newton'),
- ('zesty', 'ocata'),
- ('artful', 'pike'),
- ('bionic', 'queens'),
- ('cosmic', 'rocky'),
- ('disco', 'stein'),
- ])
- if self.openstack:
- os_origin = self.openstack.split(':')[1]
- return os_origin.split('%s-' % self.series)[1].split('/')[0]
- else:
- return releases[self.series]
-
- def get_ceph_expected_pools(self, radosgw=False):
- """Return a list of expected ceph pools in a ceph + cinder + glance
- test scenario, based on OpenStack release and whether ceph radosgw
- is flagged as present or not."""
-
- if self._get_openstack_release() == self.trusty_icehouse:
- # Icehouse
- pools = [
- 'data',
- 'metadata',
- 'rbd',
- 'cinder-ceph',
- 'glance'
- ]
- elif (self.trusty_kilo <= self._get_openstack_release() <=
- self.zesty_ocata):
- # Kilo through Ocata
- pools = [
- 'rbd',
- 'cinder-ceph',
- 'glance'
- ]
- else:
- # Pike and later
- pools = [
- 'cinder-ceph',
- 'glance'
- ]
-
- if radosgw:
- pools.extend([
- '.rgw.root',
- '.rgw.control',
- '.rgw',
- '.rgw.gc',
- '.users.uid'
- ])
-
- return pools
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
deleted file mode 100644
index 53fa650..0000000
--- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ /dev/null
@@ -1,1588 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import amulet
-import json
-import logging
-import os
-import re
-import six
-import time
-import urllib
-import urlparse
-
-import cinderclient.v1.client as cinder_client
-import cinderclient.v2.client as cinder_clientv2
-import glanceclient.v1 as glance_client
-import glanceclient.v2 as glance_clientv2
-import heatclient.v1.client as heat_client
-from keystoneclient.v2_0 import client as keystone_client
-from keystoneauth1.identity import (
- v3,
- v2,
-)
-from keystoneauth1 import session as keystone_session
-from keystoneclient.v3 import client as keystone_client_v3
-from novaclient import exceptions
-
-import novaclient.client as nova_client
-import novaclient
-import pika
-import swiftclient
-
-from charmhelpers.core.decorators import retry_on_exception
-from charmhelpers.contrib.amulet.utils import (
- AmuletUtils
-)
-from charmhelpers.core.host import CompareHostReleases
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-NOVA_CLIENT_VERSION = "2"
-
-OPENSTACK_RELEASES_PAIRS = [
- 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
- 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
- 'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
- 'xenial_pike', 'artful_pike', 'xenial_queens',
- 'bionic_queens', 'bionic_rocky', 'cosmic_rocky',
- 'bionic_stein', 'disco_stein']
-
-
-class OpenStackAmuletUtils(AmuletUtils):
- """OpenStack amulet utilities.
-
- This class inherits from AmuletUtils and has additional support
- that is specifically for use by OpenStack charm tests.
- """
-
- def __init__(self, log_level=ERROR):
- """Initialize the deployment environment."""
- super(OpenStackAmuletUtils, self).__init__(log_level)
-
- def validate_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected, openstack_release=None):
- """Validate endpoint data. Pick the correct validator based on
- OpenStack release. Expected data should be in the v2 format:
- {
- 'id': id,
- 'region': region,
- 'adminurl': adminurl,
- 'internalurl': internalurl,
- 'publicurl': publicurl,
- 'service_id': service_id}
-
- """
- validation_function = self.validate_v2_endpoint_data
- xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
- if openstack_release and openstack_release >= xenial_queens:
- validation_function = self.validate_v3_endpoint_data
- expected = {
- 'id': expected['id'],
- 'region': expected['region'],
- 'region_id': 'RegionOne',
- 'url': self.valid_url,
- 'interface': self.not_null,
- 'service_id': expected['service_id']}
- return validation_function(endpoints, admin_port, internal_port,
- public_port, expected)
-
- def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected):
- """Validate endpoint data.
-
- Validate actual endpoint data vs expected endpoint data. The ports
- are used to find the matching endpoint.
- """
- self.log.debug('Validating endpoint data...')
- self.log.debug('actual: {}'.format(repr(endpoints)))
- found = False
- for ep in endpoints:
- self.log.debug('endpoint: {}'.format(repr(ep)))
- if (admin_port in ep.adminurl and
- internal_port in ep.internalurl and
- public_port in ep.publicurl):
- found = True
- actual = {'id': ep.id,
- 'region': ep.region,
- 'adminurl': ep.adminurl,
- 'internalurl': ep.internalurl,
- 'publicurl': ep.publicurl,
- 'service_id': ep.service_id}
- ret = self._validate_dict_data(expected, actual)
- if ret:
- return 'unexpected endpoint data - {}'.format(ret)
-
- if not found:
- return 'endpoint not found'
-
- def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected, expected_num_eps=3):
- """Validate keystone v3 endpoint data.
-
- Validate the v3 endpoint data which has changed from v2. The
- ports are used to find the matching endpoint.
-
- The new v3 endpoint data looks like:
-
- ['},
- region=RegionOne,
- region_id=RegionOne,
- service_id=17f842a0dc084b928e476fafe67e4095,
- url=http://10.5.6.5:9312>,
- '},
- region=RegionOne,
- region_id=RegionOne,
- service_id=72fc8736fb41435e8b3584205bb2cfa3,
- url=http://10.5.6.6:35357/v3>,
- ... ]
- """
- self.log.debug('Validating v3 endpoint data...')
- self.log.debug('actual: {}'.format(repr(endpoints)))
- found = []
- for ep in endpoints:
- self.log.debug('endpoint: {}'.format(repr(ep)))
- if ((admin_port in ep.url and ep.interface == 'admin') or
- (internal_port in ep.url and ep.interface == 'internal') or
- (public_port in ep.url and ep.interface == 'public')):
- found.append(ep.interface)
- # note we ignore the links member.
- actual = {'id': ep.id,
- 'region': ep.region,
- 'region_id': ep.region_id,
- 'interface': self.not_null,
- 'url': ep.url,
- 'service_id': ep.service_id, }
- ret = self._validate_dict_data(expected, actual)
- if ret:
- return 'unexpected endpoint data - {}'.format(ret)
-
- if len(found) != expected_num_eps:
- return 'Unexpected number of endpoints found'
-
- def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
- """Convert v2 endpoint data into v3.
-
- {
- 'service_name1': [
- {
- 'adminURL': adminURL,
- 'id': id,
- 'region': region.
- 'publicURL': publicURL,
- 'internalURL': internalURL
- }],
- 'service_name2': [
- {
- 'adminURL': adminURL,
- 'id': id,
- 'region': region.
- 'publicURL': publicURL,
- 'internalURL': internalURL
- }],
- }
- """
- self.log.warn("Endpoint ID and Region ID validation is limited to not "
- "null checks after v2 to v3 conversion")
- for svc in ep_data.keys():
- assert len(ep_data[svc]) == 1, "Unknown data format"
- svc_ep_data = ep_data[svc][0]
- ep_data[svc] = [
- {
- 'url': svc_ep_data['adminURL'],
- 'interface': 'admin',
- 'region': svc_ep_data['region'],
- 'region_id': self.not_null,
- 'id': self.not_null},
- {
- 'url': svc_ep_data['publicURL'],
- 'interface': 'public',
- 'region': svc_ep_data['region'],
- 'region_id': self.not_null,
- 'id': self.not_null},
- {
- 'url': svc_ep_data['internalURL'],
- 'interface': 'internal',
- 'region': svc_ep_data['region'],
- 'region_id': self.not_null,
- 'id': self.not_null}]
- return ep_data
-
- def validate_svc_catalog_endpoint_data(self, expected, actual,
- openstack_release=None):
- """Validate service catalog endpoint data. Pick the correct validator
- for the OpenStack version. Expected data should be in the v2 format:
- {
- 'service_name1': [
- {
- 'adminURL': adminURL,
- 'id': id,
- 'region': region.
- 'publicURL': publicURL,
- 'internalURL': internalURL
- }],
- 'service_name2': [
- {
- 'adminURL': adminURL,
- 'id': id,
- 'region': region.
- 'publicURL': publicURL,
- 'internalURL': internalURL
- }],
- }
-
- """
- validation_function = self.validate_v2_svc_catalog_endpoint_data
- xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
- if openstack_release and openstack_release >= xenial_queens:
- validation_function = self.validate_v3_svc_catalog_endpoint_data
- expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
- return validation_function(expected, actual)
-
- def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
- """Validate service catalog endpoint data.
-
- Validate a list of actual service catalog endpoints vs a list of
- expected service catalog endpoints.
- """
- self.log.debug('Validating service catalog endpoint data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for k, v in six.iteritems(expected):
- if k in actual:
- ret = self._validate_dict_data(expected[k][0], actual[k][0])
- if ret:
- return self.endpoint_error(k, ret)
- else:
- return "endpoint {} does not exist".format(k)
- return ret
-
- def validate_v3_svc_catalog_endpoint_data(self, expected, actual):
- """Validate the keystone v3 catalog endpoint data.
-
- Validate a list of dictinaries that make up the keystone v3 service
- catalogue.
-
- It is in the form of:
-
-
- {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e',
- u'interface': u'admin',
- u'region': u'RegionOne',
- u'region_id': u'RegionOne',
- u'url': u'http://10.5.5.224:35357/v3'},
- {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf',
- u'interface': u'public',
- u'region': u'RegionOne',
- u'region_id': u'RegionOne',
- u'url': u'http://10.5.5.224:5000/v3'},
- {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b',
- u'interface': u'internal',
- u'region': u'RegionOne',
- u'region_id': u'RegionOne',
- u'url': u'http://10.5.5.224:5000/v3'}],
- u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62',
- u'interface': u'public',
- u'region': u'RegionOne',
- u'region_id': u'RegionOne',
- u'url': u'http://10.5.5.223:9311'},
- {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d',
- u'interface': u'internal',
- u'region': u'RegionOne',
- u'region_id': u'RegionOne',
- u'url': u'http://10.5.5.223:9311'},
- {u'id': u'f629388955bc407f8b11d8b7ca168086',
- u'interface': u'admin',
- u'region': u'RegionOne',
- u'region_id': u'RegionOne',
- u'url': u'http://10.5.5.223:9312'}]}
-
- Note, that an added complication is that the order of admin, public,
- internal against 'interface' in each region.
-
- Thus, the function sorts the expected and actual lists using the
- interface key as a sort key, prior to the comparison.
- """
- self.log.debug('Validating v3 service catalog endpoint data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for k, v in six.iteritems(expected):
- if k in actual:
- l_expected = sorted(v, key=lambda x: x['interface'])
- l_actual = sorted(actual[k], key=lambda x: x['interface'])
- if len(l_actual) != len(l_expected):
- return ("endpoint {} has differing number of interfaces "
- " - expected({}), actual({})"
- .format(k, len(l_expected), len(l_actual)))
- for i_expected, i_actual in zip(l_expected, l_actual):
- self.log.debug("checking interface {}"
- .format(i_expected['interface']))
- ret = self._validate_dict_data(i_expected, i_actual)
- if ret:
- return self.endpoint_error(k, ret)
- else:
- return "endpoint {} does not exist".format(k)
- return ret
-
- def validate_tenant_data(self, expected, actual):
- """Validate tenant data.
-
- Validate a list of actual tenant data vs list of expected tenant
- data.
- """
- self.log.debug('Validating tenant data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'enabled': act.enabled, 'description': act.description,
- 'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected tenant data - {}".format(ret)
- if not found:
- return "tenant {} does not exist".format(e['name'])
- return ret
-
- def validate_role_data(self, expected, actual):
- """Validate role data.
-
- Validate a list of actual role data vs a list of expected role
- data.
- """
- self.log.debug('Validating role data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected role data - {}".format(ret)
- if not found:
- return "role {} does not exist".format(e['name'])
- return ret
-
- def validate_user_data(self, expected, actual, api_version=None):
- """Validate user data.
-
- Validate a list of actual user data vs a list of expected user
- data.
- """
- self.log.debug('Validating user data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- if e['name'] == act.name:
- a = {'enabled': act.enabled, 'name': act.name,
- 'email': act.email, 'id': act.id}
- if api_version == 3:
- a['default_project_id'] = getattr(act,
- 'default_project_id',
- 'none')
- else:
- a['tenantId'] = act.tenantId
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected user data - {}".format(ret)
- if not found:
- return "user {} does not exist".format(e['name'])
- return ret
-
- def validate_flavor_data(self, expected, actual):
- """Validate flavor data.
-
- Validate a list of actual flavors vs a list of expected flavors.
- """
- self.log.debug('Validating flavor data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- act = [a.name for a in actual]
- return self._validate_list_data(expected, act)
-
- def tenant_exists(self, keystone, tenant):
- """Return True if tenant exists."""
- self.log.debug('Checking if tenant exists ({})...'.format(tenant))
- return tenant in [t.name for t in keystone.tenants.list()]
-
- @retry_on_exception(num_retries=5, base_delay=1)
- def keystone_wait_for_propagation(self, sentry_relation_pairs,
- api_version):
- """Iterate over list of sentry and relation tuples and verify that
- api_version has the expected value.
-
- :param sentry_relation_pairs: list of sentry, relation name tuples used
- for monitoring propagation of relation
- data
- :param api_version: api_version to expect in relation data
- :returns: None if successful. Raise on error.
- """
- for (sentry, relation_name) in sentry_relation_pairs:
- rel = sentry.relation('identity-service',
- relation_name)
- self.log.debug('keystone relation data: {}'.format(rel))
- if rel.get('api_version') != str(api_version):
- raise Exception("api_version not propagated through relation"
- " data yet ('{}' != '{}')."
- "".format(rel.get('api_version'), api_version))
-
- def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
- api_version):
- """Configure preferred-api-version of keystone in deployment and
- monitor provided list of relation objects for propagation
- before returning to caller.
-
- :param sentry_relation_pairs: list of sentry, relation tuples used for
- monitoring propagation of relation data
- :param deployment: deployment to configure
- :param api_version: value preferred-api-version will be set to
- :returns: None if successful. Raise on error.
- """
- self.log.debug("Setting keystone preferred-api-version: '{}'"
- "".format(api_version))
-
- config = {'preferred-api-version': api_version}
- deployment.d.configure('keystone', config)
- deployment._auto_wait_for_status()
- self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
-
- def authenticate_cinder_admin(self, keystone, api_version=2):
- """Authenticates admin user with cinder."""
- self.log.debug('Authenticating cinder admin...')
- _clients = {
- 1: cinder_client.Client,
- 2: cinder_clientv2.Client}
- return _clients[api_version](session=keystone.session)
-
- def authenticate_keystone(self, keystone_ip, username, password,
- api_version=False, admin_port=False,
- user_domain_name=None, domain_name=None,
- project_domain_name=None, project_name=None):
- """Authenticate with Keystone"""
- self.log.debug('Authenticating with keystone...')
- if not api_version:
- api_version = 2
- sess, auth = self.get_keystone_session(
- keystone_ip=keystone_ip,
- username=username,
- password=password,
- api_version=api_version,
- admin_port=admin_port,
- user_domain_name=user_domain_name,
- domain_name=domain_name,
- project_domain_name=project_domain_name,
- project_name=project_name
- )
- if api_version == 2:
- client = keystone_client.Client(session=sess)
- else:
- client = keystone_client_v3.Client(session=sess)
- # This populates the client.service_catalog
- client.auth_ref = auth.get_access(sess)
- return client
-
- def get_keystone_session(self, keystone_ip, username, password,
- api_version=False, admin_port=False,
- user_domain_name=None, domain_name=None,
- project_domain_name=None, project_name=None):
- """Return a keystone session object"""
- ep = self.get_keystone_endpoint(keystone_ip,
- api_version=api_version,
- admin_port=admin_port)
- if api_version == 2:
- auth = v2.Password(
- username=username,
- password=password,
- tenant_name=project_name,
- auth_url=ep
- )
- sess = keystone_session.Session(auth=auth)
- else:
- auth = v3.Password(
- user_domain_name=user_domain_name,
- username=username,
- password=password,
- domain_name=domain_name,
- project_domain_name=project_domain_name,
- project_name=project_name,
- auth_url=ep
- )
- sess = keystone_session.Session(auth=auth)
- return (sess, auth)
-
- def get_keystone_endpoint(self, keystone_ip, api_version=None,
- admin_port=False):
- """Return keystone endpoint"""
- port = 5000
- if admin_port:
- port = 35357
- base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
- port)
- if api_version == 2:
- ep = base_ep + "/v2.0"
- else:
- ep = base_ep + "/v3"
- return ep
-
- def get_default_keystone_session(self, keystone_sentry,
- openstack_release=None, api_version=2):
- """Return a keystone session object and client object assuming standard
- default settings
-
- Example call in amulet tests:
- self.keystone_session, self.keystone = u.get_default_keystone_session(
- self.keystone_sentry,
- openstack_release=self._get_openstack_release())
-
- The session can then be used to auth other clients:
- neutronclient.Client(session=session)
- aodh_client.Client(session=session)
- eyc
- """
- self.log.debug('Authenticating keystone admin...')
- # 11 => xenial_queens
- if api_version == 3 or (openstack_release and openstack_release >= 11):
- client_class = keystone_client_v3.Client
- api_version = 3
- else:
- client_class = keystone_client.Client
- keystone_ip = keystone_sentry.info['public-address']
- session, auth = self.get_keystone_session(
- keystone_ip,
- api_version=api_version,
- username='admin',
- password='openstack',
- project_name='admin',
- user_domain_name='admin_domain',
- project_domain_name='admin_domain')
- client = client_class(session=session)
- # This populates the client.service_catalog
- client.auth_ref = auth.get_access(session)
- return session, client
-
- def authenticate_keystone_admin(self, keystone_sentry, user, password,
- tenant=None, api_version=None,
- keystone_ip=None, user_domain_name=None,
- project_domain_name=None,
- project_name=None):
- """Authenticates admin user with the keystone admin endpoint."""
- self.log.debug('Authenticating keystone admin...')
- if not keystone_ip:
- keystone_ip = keystone_sentry.info['public-address']
-
- # To support backward compatibility usage of this function
- if not project_name:
- project_name = tenant
- if api_version == 3 and not user_domain_name:
- user_domain_name = 'admin_domain'
- if api_version == 3 and not project_domain_name:
- project_domain_name = 'admin_domain'
- if api_version == 3 and not project_name:
- project_name = 'admin'
-
- return self.authenticate_keystone(
- keystone_ip, user, password,
- api_version=api_version,
- user_domain_name=user_domain_name,
- project_domain_name=project_domain_name,
- project_name=project_name,
- admin_port=True)
-
- def authenticate_keystone_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with the keystone public endpoint."""
- self.log.debug('Authenticating keystone user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- interface='publicURL')
- keystone_ip = urlparse.urlparse(ep).hostname
-
- return self.authenticate_keystone(keystone_ip, user, password,
- project_name=tenant)
-
- def authenticate_glance_admin(self, keystone, force_v1_client=False):
- """Authenticates admin user with glance."""
- self.log.debug('Authenticating glance admin...')
- ep = keystone.service_catalog.url_for(service_type='image',
- interface='adminURL')
- if not force_v1_client and keystone.session:
- return glance_clientv2.Client("2", session=keystone.session)
- else:
- return glance_client.Client(ep, token=keystone.auth_token)
-
- def authenticate_heat_admin(self, keystone):
- """Authenticates the admin user with heat."""
- self.log.debug('Authenticating heat admin...')
- ep = keystone.service_catalog.url_for(service_type='orchestration',
- interface='publicURL')
- if keystone.session:
- return heat_client.Client(endpoint=ep, session=keystone.session)
- else:
- return heat_client.Client(endpoint=ep, token=keystone.auth_token)
-
- def authenticate_nova_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with nova-api."""
- self.log.debug('Authenticating nova user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- interface='publicURL')
- if keystone.session:
- return nova_client.Client(NOVA_CLIENT_VERSION,
- session=keystone.session,
- auth_url=ep)
- elif novaclient.__version__[0] >= "7":
- return nova_client.Client(NOVA_CLIENT_VERSION,
- username=user, password=password,
- project_name=tenant, auth_url=ep)
- else:
- return nova_client.Client(NOVA_CLIENT_VERSION,
- username=user, api_key=password,
- project_id=tenant, auth_url=ep)
-
- def authenticate_swift_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with swift api."""
- self.log.debug('Authenticating swift user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- interface='publicURL')
- if keystone.session:
- return swiftclient.Connection(session=keystone.session)
- else:
- return swiftclient.Connection(authurl=ep,
- user=user,
- key=password,
- tenant_name=tenant,
- auth_version='2.0')
-
- def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto",
- ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True):
- """Create the specified flavor."""
- try:
- nova.flavors.find(name=name)
- except (exceptions.NotFound, exceptions.NoUniqueMatch):
- self.log.debug('Creating flavor ({})'.format(name))
- nova.flavors.create(name, ram, vcpus, disk, flavorid,
- ephemeral, swap, rxtx_factor, is_public)
-
- def glance_create_image(self, glance, image_name, image_url,
- download_dir='tests',
- hypervisor_type=None,
- disk_format='qcow2',
- architecture='x86_64',
- container_format='bare'):
- """Download an image and upload it to glance, validate its status
- and return an image object pointer. KVM defaults, can override for
- LXD.
-
- :param glance: pointer to authenticated glance api connection
- :param image_name: display name for new image
- :param image_url: url to retrieve
- :param download_dir: directory to store downloaded image file
- :param hypervisor_type: glance image hypervisor property
- :param disk_format: glance image disk format
- :param architecture: glance image architecture property
- :param container_format: glance image container format
- :returns: glance image pointer
- """
- self.log.debug('Creating glance image ({}) from '
- '{}...'.format(image_name, image_url))
-
- # Download image
- http_proxy = os.getenv('AMULET_HTTP_PROXY')
- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
- if http_proxy:
- proxies = {'http': http_proxy}
- opener = urllib.FancyURLopener(proxies)
- else:
- opener = urllib.FancyURLopener()
-
- abs_file_name = os.path.join(download_dir, image_name)
- if not os.path.exists(abs_file_name):
- opener.retrieve(image_url, abs_file_name)
-
- # Create glance image
- glance_properties = {
- 'architecture': architecture,
- }
- if hypervisor_type:
- glance_properties['hypervisor_type'] = hypervisor_type
- # Create glance image
- if float(glance.version) < 2.0:
- with open(abs_file_name) as f:
- image = glance.images.create(
- name=image_name,
- is_public=True,
- disk_format=disk_format,
- container_format=container_format,
- properties=glance_properties,
- data=f)
- else:
- image = glance.images.create(
- name=image_name,
- visibility="public",
- disk_format=disk_format,
- container_format=container_format)
- glance.images.upload(image.id, open(abs_file_name, 'rb'))
- glance.images.update(image.id, **glance_properties)
-
- # Wait for image to reach active status
- img_id = image.id
- ret = self.resource_reaches_status(glance.images, img_id,
- expected_stat='active',
- msg='Image status wait')
- if not ret:
- msg = 'Glance image failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new image
- self.log.debug('Validating image attributes...')
- val_img_name = glance.images.get(img_id).name
- val_img_stat = glance.images.get(img_id).status
- val_img_cfmt = glance.images.get(img_id).container_format
- val_img_dfmt = glance.images.get(img_id).disk_format
-
- if float(glance.version) < 2.0:
- val_img_pub = glance.images.get(img_id).is_public
- else:
- val_img_pub = glance.images.get(img_id).visibility == "public"
-
- msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
- 'container fmt:{} disk fmt:{}'.format(
- val_img_name, val_img_pub, img_id,
- val_img_stat, val_img_cfmt, val_img_dfmt))
-
- if val_img_name == image_name and val_img_stat == 'active' \
- and val_img_pub is True and val_img_cfmt == container_format \
- and val_img_dfmt == disk_format:
- self.log.debug(msg_attr)
- else:
- msg = ('Image validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return image
-
- def create_cirros_image(self, glance, image_name, hypervisor_type=None):
- """Download the latest cirros image and upload it to glance,
- validate and return a resource pointer.
-
- :param glance: pointer to authenticated glance connection
- :param image_name: display name for new image
- :param hypervisor_type: glance image hypervisor property
- :returns: glance image pointer
- """
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'glance_create_image instead of '
- 'create_cirros_image.')
-
- self.log.debug('Creating glance cirros image '
- '({})...'.format(image_name))
-
- # Get cirros image URL
- http_proxy = os.getenv('AMULET_HTTP_PROXY')
- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
- if http_proxy:
- proxies = {'http': http_proxy}
- opener = urllib.FancyURLopener(proxies)
- else:
- opener = urllib.FancyURLopener()
-
- f = opener.open('http://download.cirros-cloud.net/version/released')
- version = f.read().strip()
- cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
- cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
- version, cirros_img)
- f.close()
-
- return self.glance_create_image(
- glance,
- image_name,
- cirros_url,
- hypervisor_type=hypervisor_type)
-
- def delete_image(self, glance, image):
- """Delete the specified image."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_image.')
- self.log.debug('Deleting glance image ({})...'.format(image))
- return self.delete_resource(glance.images, image, msg='glance image')
-
- def create_instance(self, nova, image_name, instance_name, flavor):
- """Create the specified instance."""
- self.log.debug('Creating instance '
- '({}|{}|{})'.format(instance_name, image_name, flavor))
- image = nova.glance.find_image(image_name)
- flavor = nova.flavors.find(name=flavor)
- instance = nova.servers.create(name=instance_name, image=image,
- flavor=flavor)
-
- count = 1
- status = instance.status
- while status != 'ACTIVE' and count < 60:
- time.sleep(3)
- instance = nova.servers.get(instance.id)
- status = instance.status
- self.log.debug('instance status: {}'.format(status))
- count += 1
-
- if status != 'ACTIVE':
- self.log.error('instance creation timed out')
- return None
-
- return instance
-
- def delete_instance(self, nova, instance):
- """Delete the specified instance."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_instance.')
- self.log.debug('Deleting instance ({})...'.format(instance))
- return self.delete_resource(nova.servers, instance,
- msg='nova instance')
-
- def create_or_get_keypair(self, nova, keypair_name="testkey"):
- """Create a new keypair, or return pointer if it already exists."""
- try:
- _keypair = nova.keypairs.get(keypair_name)
- self.log.debug('Keypair ({}) already exists, '
- 'using it.'.format(keypair_name))
- return _keypair
- except Exception:
- self.log.debug('Keypair ({}) does not exist, '
- 'creating it.'.format(keypair_name))
-
- _keypair = nova.keypairs.create(name=keypair_name)
- return _keypair
-
- def _get_cinder_obj_name(self, cinder_object):
- """Retrieve name of cinder object.
-
- :param cinder_object: cinder snapshot or volume object
- :returns: str cinder object name
- """
- # v1 objects store name in 'display_name' attr but v2+ use 'name'
- try:
- return cinder_object.display_name
- except AttributeError:
- return cinder_object.name
-
- def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
- img_id=None, src_vol_id=None, snap_id=None):
- """Create cinder volume, optionally from a glance image, OR
- optionally as a clone of an existing volume, OR optionally
- from a snapshot. Wait for the new volume status to reach
- the expected status, validate and return a resource pointer.
-
- :param vol_name: cinder volume display name
- :param vol_size: size in gigabytes
- :param img_id: optional glance image id
- :param src_vol_id: optional source volume id to clone
- :param snap_id: optional snapshot id to use
- :returns: cinder volume pointer
- """
- # Handle parameter input and avoid impossible combinations
- if img_id and not src_vol_id and not snap_id:
- # Create volume from image
- self.log.debug('Creating cinder volume from glance image...')
- bootable = 'true'
- elif src_vol_id and not img_id and not snap_id:
- # Clone an existing volume
- self.log.debug('Cloning cinder volume...')
- bootable = cinder.volumes.get(src_vol_id).bootable
- elif snap_id and not src_vol_id and not img_id:
- # Create volume from snapshot
- self.log.debug('Creating cinder volume from snapshot...')
- snap = cinder.volume_snapshots.find(id=snap_id)
- vol_size = snap.size
- snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
- bootable = cinder.volumes.get(snap_vol_id).bootable
- elif not img_id and not src_vol_id and not snap_id:
- # Create volume
- self.log.debug('Creating cinder volume...')
- bootable = 'false'
- else:
- # Impossible combination of parameters
- msg = ('Invalid method use - name:{} size:{} img_id:{} '
- 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
- img_id, src_vol_id,
- snap_id))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Create new volume
- try:
- vol_new = cinder.volumes.create(display_name=vol_name,
- imageRef=img_id,
- size=vol_size,
- source_volid=src_vol_id,
- snapshot_id=snap_id)
- vol_id = vol_new.id
- except TypeError:
- vol_new = cinder.volumes.create(name=vol_name,
- imageRef=img_id,
- size=vol_size,
- source_volid=src_vol_id,
- snapshot_id=snap_id)
- vol_id = vol_new.id
- except Exception as e:
- msg = 'Failed to create volume: {}'.format(e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Wait for volume to reach available status
- ret = self.resource_reaches_status(cinder.volumes, vol_id,
- expected_stat="available",
- msg="Volume status wait")
- if not ret:
- msg = 'Cinder volume failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new volume
- self.log.debug('Validating volume attributes...')
- val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id))
- val_vol_boot = cinder.volumes.get(vol_id).bootable
- val_vol_stat = cinder.volumes.get(vol_id).status
- val_vol_size = cinder.volumes.get(vol_id).size
- msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
- '{} size:{}'.format(val_vol_name, vol_id,
- val_vol_stat, val_vol_boot,
- val_vol_size))
-
- if val_vol_boot == bootable and val_vol_stat == 'available' \
- and val_vol_name == vol_name and val_vol_size == vol_size:
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return vol_new
-
- def delete_resource(self, resource, resource_id,
- msg="resource", max_wait=120):
- """Delete one openstack resource, such as one instance, keypair,
- image, volume, stack, etc., and confirm deletion within max wait time.
-
- :param resource: pointer to os resource type, ex:glance_client.images
- :param resource_id: unique name or id for the openstack resource
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, otherwise False
- """
- self.log.debug('Deleting OpenStack resource '
- '{} ({})'.format(resource_id, msg))
- num_before = len(list(resource.list()))
- resource.delete(resource_id)
-
- tries = 0
- num_after = len(list(resource.list()))
- while num_after != (num_before - 1) and tries < (max_wait / 4):
- self.log.debug('{} delete check: '
- '{} [{}:{}] {}'.format(msg, tries,
- num_before,
- num_after,
- resource_id))
- time.sleep(4)
- num_after = len(list(resource.list()))
- tries += 1
-
- self.log.debug('{}: expected, actual count = {}, '
- '{}'.format(msg, num_before - 1, num_after))
-
- if num_after == (num_before - 1):
- return True
- else:
- self.log.error('{} delete timed out'.format(msg))
- return False
-
- def resource_reaches_status(self, resource, resource_id,
- expected_stat='available',
- msg='resource', max_wait=120):
- """Wait for an openstack resources status to reach an
- expected status within a specified time. Useful to confirm that
- nova instances, cinder vols, snapshots, glance images, heat stacks
- and other resources eventually reach the expected status.
-
- :param resource: pointer to os resource type, ex: heat_client.stacks
- :param resource_id: unique id for the openstack resource
- :param expected_stat: status to expect resource to reach
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, False if status is not reached
- """
-
- tries = 0
- resource_stat = resource.get(resource_id).status
- while resource_stat != expected_stat and tries < (max_wait / 4):
- self.log.debug('{} status check: '
- '{} [{}:{}] {}'.format(msg, tries,
- resource_stat,
- expected_stat,
- resource_id))
- time.sleep(4)
- resource_stat = resource.get(resource_id).status
- tries += 1
-
- self.log.debug('{}: expected, actual status = {}, '
- '{}'.format(msg, resource_stat, expected_stat))
-
- if resource_stat == expected_stat:
- return True
- else:
- self.log.debug('{} never reached expected status: '
- '{}'.format(resource_id, expected_stat))
- return False
-
- def get_ceph_osd_id_cmd(self, index):
- """Produce a shell command that will return a ceph-osd id."""
- return ("`initctl list | grep 'ceph-osd ' | "
- "awk 'NR=={} {{ print $2 }}' | "
- "grep -o '[0-9]*'`".format(index + 1))
-
- def get_ceph_pools(self, sentry_unit):
- """Return a dict of ceph pools from a single ceph unit, with
- pool name as keys, pool id as vals."""
- pools = {}
- cmd = 'sudo ceph osd lspools'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # For mimic ceph osd lspools output
- output = output.replace("\n", ",")
-
- # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
- for pool in str(output).split(','):
- pool_id_name = pool.split(' ')
- if len(pool_id_name) == 2:
- pool_id = pool_id_name[0]
- pool_name = pool_id_name[1]
- pools[pool_name] = int(pool_id)
-
- self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
- pools))
- return pools
-
- def get_ceph_df(self, sentry_unit):
- """Return dict of ceph df json output, including ceph pool state.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :returns: Dict of ceph df output
- """
- cmd = 'sudo ceph df --format=json'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return json.loads(output)
-
- def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
- """Take a sample of attributes of a ceph pool, returning ceph
- pool name, object count and disk space used for the specified
- pool ID number.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :param pool_id: Ceph pool ID
- :returns: List of pool name, object count, kb disk space used
- """
- df = self.get_ceph_df(sentry_unit)
- for pool in df['pools']:
- if pool['id'] == pool_id:
- pool_name = pool['name']
- obj_count = pool['stats']['objects']
- kb_used = pool['stats']['kb_used']
-
- self.log.debug('Ceph {} pool (ID {}): {} objects, '
- '{} kb used'.format(pool_name, pool_id,
- obj_count, kb_used))
- return pool_name, obj_count, kb_used
-
- def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
- """Validate ceph pool samples taken over time, such as pool
- object counts or pool kb used, before adding, after adding, and
- after deleting items which affect those pool attributes. The
- 2nd element is expected to be greater than the 1st; 3rd is expected
- to be less than the 2nd.
-
- :param samples: List containing 3 data samples
- :param sample_type: String for logging and usage context
- :returns: None if successful, Failure message otherwise
- """
- original, created, deleted = range(3)
- if samples[created] <= samples[original] or \
- samples[deleted] >= samples[created]:
- return ('Ceph {} samples ({}) '
- 'unexpected.'.format(sample_type, samples))
- else:
- self.log.debug('Ceph {} samples (OK): '
- '{}'.format(sample_type, samples))
- return None
-
- # rabbitmq/amqp specific helpers:
-
- def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
- """Wait for rmq units extended status to show cluster readiness,
- after an optional initial sleep period. Initial sleep is likely
- necessary to be effective following a config change, as status
- message may not instantly update to non-ready."""
-
- if init_sleep:
- time.sleep(init_sleep)
-
- message = re.compile('^Unit is ready and clustered$')
- deployment._auto_wait_for_status(message=message,
- timeout=timeout,
- include_only=['rabbitmq-server'])
-
- def add_rmq_test_user(self, sentry_units,
- username="testuser1", password="changeme"):
- """Add a test user via the first rmq juju unit, check connection as
- the new user against all sentry units.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Adding rmq user ({})...'.format(username))
-
- # Check that user does not already exist
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
- if username in output:
- self.log.warning('User ({}) already exists, returning '
- 'gracefully.'.format(username))
- return
-
- perms = '".*" ".*" ".*"'
- cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
- 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
-
- # Add user via first unit
- for cmd in cmds:
- output, _ = self.run_cmd_unit(sentry_units[0], cmd)
-
- # Check connection against the other sentry_units
- self.log.debug('Checking user connect against units...')
- for sentry_unit in sentry_units:
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
- username=username,
- password=password)
- connection.close()
-
- def delete_rmq_test_user(self, sentry_units, username="testuser1"):
- """Delete a rabbitmq user via the first rmq juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful or no such user.
- """
- self.log.debug('Deleting rmq user ({})...'.format(username))
-
- # Check that the user exists
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
-
- if username not in output:
- self.log.warning('User ({}) does not exist, returning '
- 'gracefully.'.format(username))
- return
-
- # Delete the user
- cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
-
- def get_rmq_cluster_status(self, sentry_unit):
- """Execute rabbitmq cluster status command on a unit and return
- the full output.
-
- :param unit: sentry unit
- :returns: String containing console output of cluster status command
- """
- cmd = 'rabbitmqctl cluster_status'
- output, _ = self.run_cmd_unit(sentry_unit, cmd)
- self.log.debug('{} cluster_status:\n{}'.format(
- sentry_unit.info['unit_name'], output))
- return str(output)
-
- def get_rmq_cluster_running_nodes(self, sentry_unit):
- """Parse rabbitmqctl cluster_status output string, return list of
- running rabbitmq cluster nodes.
-
- :param unit: sentry unit
- :returns: List containing node names of running nodes
- """
- # NOTE(beisner): rabbitmqctl cluster_status output is not
- # json-parsable, do string chop foo, then json.loads that.
- str_stat = self.get_rmq_cluster_status(sentry_unit)
- if 'running_nodes' in str_stat:
- pos_start = str_stat.find("{running_nodes,") + 15
- pos_end = str_stat.find("]},", pos_start) + 1
- str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
- run_nodes = json.loads(str_run_nodes)
- return run_nodes
- else:
- return []
-
- def validate_rmq_cluster_running_nodes(self, sentry_units):
- """Check that all rmq unit hostnames are represented in the
- cluster_status output of all units.
-
- :param host_names: dict of juju unit names to host names
- :param units: list of sentry unit pointers (all rmq units)
- :returns: None if successful, otherwise return error message
- """
- host_names = self.get_unit_hostnames(sentry_units)
- errors = []
-
- # Query every unit for cluster_status running nodes
- for query_unit in sentry_units:
- query_unit_name = query_unit.info['unit_name']
- running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
-
- # Confirm that every unit is represented in the queried unit's
- # cluster_status running nodes output.
- for validate_unit in sentry_units:
- val_host_name = host_names[validate_unit.info['unit_name']]
- val_node_name = 'rabbit@{}'.format(val_host_name)
-
- if val_node_name not in running_nodes:
- errors.append('Cluster member check failed on {}: {} not '
- 'in {}\n'.format(query_unit_name,
- val_node_name,
- running_nodes))
- if errors:
- return ''.join(errors)
-
- def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
- """Check a single juju rmq unit for ssl and port in the config file."""
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- conf_file = '/etc/rabbitmq/rabbitmq.config'
- conf_contents = str(self.file_contents_safe(sentry_unit,
- conf_file, max_wait=16))
- # Checks
- conf_ssl = 'ssl' in conf_contents
- conf_port = str(port) in conf_contents
-
- # Port explicitly checked in config
- if port and conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif port and not conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{} but not on port {} '
- '({})'.format(host, port, unit_name))
- return False
- # Port not checked (useful when checking that ssl is disabled)
- elif not port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif not conf_ssl:
- self.log.debug('SSL not enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return False
- else:
- msg = ('Unknown condition when checking SSL status @{}:{} '
- '({})'.format(host, port, unit_name))
- amulet.raise_status(amulet.FAIL, msg)
-
- def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
- """Check that ssl is enabled on rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :param port: optional ssl port override to validate
- :returns: None if successful, otherwise return error message
- """
- for sentry_unit in sentry_units:
- if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
- return ('Unexpected condition: ssl is disabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def validate_rmq_ssl_disabled_units(self, sentry_units):
- """Check that ssl is enabled on listed rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :returns: True if successful. Raise on error.
- """
- for sentry_unit in sentry_units:
- if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
- return ('Unexpected condition: ssl is enabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def configure_rmq_ssl_on(self, sentry_units, deployment,
- port=None, max_wait=60):
- """Turn ssl charm config option on, with optional non-default
- ssl port specification. Confirm that it is enabled on every
- unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param port: amqp port, use defaults if None
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: on')
-
- # Enable RMQ SSL
- config = {'ssl': 'on'}
- if port:
- config['ssl_port'] = port
-
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
- """Turn ssl charm config option off, confirm that it is disabled
- on every unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: off')
-
- # Disable RMQ SSL
- config = {'ssl': 'off'}
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def connect_amqp_by_unit(self, sentry_unit, ssl=False,
- port=None, fatal=True,
- username="testuser1", password="changeme"):
- """Establish and return a pika amqp connection to the rabbitmq service
- running on a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :param fatal: boolean, default to True (raises on connect error)
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: pika amqp connection pointer or None if failed and non-fatal
- """
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- # Default port logic if port is not specified
- if ssl and not port:
- port = 5671
- elif not ssl and not port:
- port = 5672
-
- self.log.debug('Connecting to amqp on {}:{} ({}) as '
- '{}...'.format(host, port, unit_name, username))
-
- try:
- credentials = pika.PlainCredentials(username, password)
- parameters = pika.ConnectionParameters(host=host, port=port,
- credentials=credentials,
- ssl=ssl,
- connection_attempts=3,
- retry_delay=5,
- socket_timeout=1)
- connection = pika.BlockingConnection(parameters)
- assert connection.is_open is True
- assert connection.is_closing is False
- self.log.debug('Connect OK')
- return connection
- except Exception as e:
- msg = ('amqp connection failed to {}:{} as '
- '{} ({})'.format(host, port, username, str(e)))
- if fatal:
- amulet.raise_status(amulet.FAIL, msg)
- else:
- self.log.warn(msg)
- return None
-
- def publish_amqp_message_by_unit(self, sentry_unit, message,
- queue="test", ssl=False,
- username="testuser1",
- password="changeme",
- port=None):
- """Publish an amqp message to a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param message: amqp message string
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: None. Raises exception if publish failed.
- """
- self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
- message))
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
-
- # NOTE(beisner): extra debug here re: pika hang potential:
- # https://github.com/pika/pika/issues/297
- # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
- self.log.debug('Defining channel...')
- channel = connection.channel()
- self.log.debug('Declaring queue...')
- channel.queue_declare(queue=queue, auto_delete=False, durable=True)
- self.log.debug('Publishing message...')
- channel.basic_publish(exchange='', routing_key=queue, body=message)
- self.log.debug('Closing channel...')
- channel.close()
- self.log.debug('Closing connection...')
- connection.close()
-
- def get_amqp_message_by_unit(self, sentry_unit, queue="test",
- username="testuser1",
- password="changeme",
- ssl=False, port=None):
- """Get an amqp message from a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: amqp message body as string. Raise if get fails.
- """
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
- channel = connection.channel()
- method_frame, _, body = channel.basic_get(queue)
-
- if method_frame:
- self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
- body))
- channel.basic_ack(method_frame.delivery_tag)
- channel.close()
- connection.close()
- return body
- else:
- msg = 'No message retrieved.'
- amulet.raise_status(amulet.FAIL, msg)
-
- def validate_memcache(self, sentry_unit, conf, os_release,
- earliest_release=5, section='keystone_authtoken',
- check_kvs=None):
- """Check Memcache is running and is configured to be used
-
- Example call from Amulet test:
-
- def test_110_memcache(self):
- u.validate_memcache(self.neutron_api_sentry,
- '/etc/neutron/neutron.conf',
- self._get_openstack_release())
-
- :param sentry_unit: sentry unit
- :param conf: OpenStack config file to check memcache settings
- :param os_release: Current OpenStack release int code
- :param earliest_release: Earliest Openstack release to check int code
- :param section: OpenStack config file section to check
- :param check_kvs: Dict of settings to check in config file
- :returns: None
- """
- if os_release < earliest_release:
- self.log.debug('Skipping memcache checks for deployment. {} <'
- 'mitaka'.format(os_release))
- return
- _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'}
- self.log.debug('Checking memcached is running')
- ret = self.validate_services_by_name({sentry_unit: ['memcached']})
- if ret:
- amulet.raise_status(amulet.FAIL, msg='Memcache running check'
- 'failed {}'.format(ret))
- else:
- self.log.debug('OK')
- self.log.debug('Checking memcache url is configured in {}'.format(
- conf))
- if self.validate_config_data(sentry_unit, conf, section, _kvs):
- message = "Memcache config error in: {}".format(conf)
- amulet.raise_status(amulet.FAIL, msg=message)
- else:
- self.log.debug('OK')
- self.log.debug('Checking memcache configuration in '
- '/etc/memcached.conf')
- contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf',
- fatal=True)
- ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs')
- if CompareHostReleases(ubuntu_release) <= 'trusty':
- memcache_listen_addr = 'ip6-localhost'
- else:
- memcache_listen_addr = '::1'
- expected = {
- '-p': '11211',
- '-l': memcache_listen_addr}
- found = []
- for key, value in expected.items():
- for line in contents.split('\n'):
- if line.startswith(key):
- self.log.debug('Checking {} is set to {}'.format(
- key,
- value))
- assert value == line.split()[-1]
- self.log.debug(line.split()[-1])
- found.append(key)
- if sorted(found) == sorted(expected.keys()):
- self.log.debug('OK')
- else:
- message = "Memcache config error in: /etc/memcached.conf"
- amulet.raise_status(amulet.FAIL, msg=message)
diff --git a/hooks/charmhelpers/contrib/openstack/audits/__init__.py b/hooks/charmhelpers/contrib/openstack/audits/__init__.py
deleted file mode 100644
index 7f7e5f7..0000000
--- a/hooks/charmhelpers/contrib/openstack/audits/__init__.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright 2019 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""OpenStack Security Audit code"""
-
-import collections
-from enum import Enum
-import traceback
-
-from charmhelpers.core.host import cmp_pkgrevno
-import charmhelpers.contrib.openstack.utils as openstack_utils
-import charmhelpers.core.hookenv as hookenv
-
-
-class AuditType(Enum):
- OpenStackSecurityGuide = 1
-
-
-_audits = {}
-
-Audit = collections.namedtuple('Audit', 'func filters')
-
-
-def audit(*args):
- """Decorator to register an audit.
-
- These are used to generate audits that can be run on a
- deployed system that matches the given configuration
-
- :param args: List of functions to filter tests against
- :type args: List[Callable[Dict]]
- """
- def wrapper(f):
- test_name = f.__name__
- if _audits.get(test_name):
- raise RuntimeError(
- "Test name '{}' used more than once"
- .format(test_name))
- non_callables = [fn for fn in args if not callable(fn)]
- if non_callables:
- raise RuntimeError(
- "Configuration includes non-callable filters: {}"
- .format(non_callables))
- _audits[test_name] = Audit(func=f, filters=args)
- return f
- return wrapper
-
-
-def is_audit_type(*args):
- """This audit is included in the specified kinds of audits.
-
- :param *args: List of AuditTypes to include this audit in
- :type args: List[AuditType]
- :rtype: Callable[Dict]
- """
- def _is_audit_type(audit_options):
- if audit_options.get('audit_type') in args:
- return True
- else:
- return False
- return _is_audit_type
-
-
-def since_package(pkg, pkg_version):
- """This audit should be run after the specified package version (incl).
-
- :param pkg: Package name to compare
- :type pkg: str
- :param release: The package version
- :type release: str
- :rtype: Callable[Dict]
- """
- def _since_package(audit_options=None):
- return cmp_pkgrevno(pkg, pkg_version) >= 0
-
- return _since_package
-
-
-def before_package(pkg, pkg_version):
- """This audit should be run before the specified package version (excl).
-
- :param pkg: Package name to compare
- :type pkg: str
- :param release: The package version
- :type release: str
- :rtype: Callable[Dict]
- """
- def _before_package(audit_options=None):
- return not since_package(pkg, pkg_version)()
-
- return _before_package
-
-
-def since_openstack_release(pkg, release):
- """This audit should run after the specified OpenStack version (incl).
-
- :param pkg: Package name to compare
- :type pkg: str
- :param release: The OpenStack release codename
- :type release: str
- :rtype: Callable[Dict]
- """
- def _since_openstack_release(audit_options=None):
- _release = openstack_utils.get_os_codename_package(pkg)
- return openstack_utils.CompareOpenStackReleases(_release) >= release
-
- return _since_openstack_release
-
-
-def before_openstack_release(pkg, release):
- """This audit should run before the specified OpenStack version (excl).
-
- :param pkg: Package name to compare
- :type pkg: str
- :param release: The OpenStack release codename
- :type release: str
- :rtype: Callable[Dict]
- """
- def _before_openstack_release(audit_options=None):
- return not since_openstack_release(pkg, release)()
-
- return _before_openstack_release
-
-
-def it_has_config(config_key):
- """This audit should be run based on specified config keys.
-
- :param config_key: Config key to look for
- :type config_key: str
- :rtype: Callable[Dict]
- """
- def _it_has_config(audit_options):
- return audit_options.get(config_key) is not None
-
- return _it_has_config
-
-
-def run(audit_options):
- """Run the configured audits with the specified audit_options.
-
- :param audit_options: Configuration for the audit
- :type audit_options: Config
-
- :rtype: Dict[str, str]
- """
- errors = {}
- results = {}
- for name, audit in sorted(_audits.items()):
- result_name = name.replace('_', '-')
- if result_name in audit_options.get('excludes', []):
- print(
- "Skipping {} because it is"
- "excluded in audit config"
- .format(result_name))
- continue
- if all(p(audit_options) for p in audit.filters):
- try:
- audit.func(audit_options)
- print("{}: PASS".format(name))
- results[result_name] = {
- 'success': True,
- }
- except AssertionError as e:
- print("{}: FAIL ({})".format(name, e))
- results[result_name] = {
- 'success': False,
- 'message': e,
- }
- except Exception as e:
- print("{}: ERROR ({})".format(name, e))
- errors[name] = e
- results[result_name] = {
- 'success': False,
- 'message': e,
- }
- for name, error in errors.items():
- print("=" * 20)
- print("Error in {}: ".format(name))
- traceback.print_tb(error.__traceback__)
- print()
- return results
-
-
-def action_parse_results(result):
- """Parse the result of `run` in the context of an action.
-
- :param result: The result of running the security-checklist
- action on a unit
- :type result: Dict[str, Dict[str, str]]
- :rtype: int
- """
- passed = True
- for test, result in result.items():
- if result['success']:
- hookenv.action_set({test: 'PASS'})
- else:
- hookenv.action_set({test: 'FAIL - {}'.format(result['message'])})
- passed = False
- if not passed:
- hookenv.action_fail("One or more tests failed")
- return 0 if passed else 1
diff --git a/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py
deleted file mode 100644
index e5b7ac1..0000000
--- a/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py
+++ /dev/null
@@ -1,266 +0,0 @@
-# Copyright 2019 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import collections
-import configparser
-import glob
-import os.path
-import subprocess
-
-from charmhelpers.contrib.openstack.audits import (
- audit,
- AuditType,
- # filters
- is_audit_type,
- it_has_config,
-)
-
-from charmhelpers.core.hookenv import (
- cached,
-)
-
-"""
-The Security Guide suggests a specific list of files inside the
-config directory for the service having 640 specifically, but
-by ensuring the containing directory is 750, only the owner can
-write, and only the group can read files within the directory.
-
-By restricting access to the containing directory, we can more
-effectively ensure that there is no accidental leakage if a new
-file is added to the service without being added to the security
-guide, and to this check.
-"""
-FILE_ASSERTIONS = {
- 'barbican': {
- '/etc/barbican': {'group': 'barbican', 'mode': '750'},
- },
- 'ceph-mon': {
- '/var/lib/charm/ceph-mon/ceph.conf':
- {'owner': 'root', 'group': 'root', 'mode': '644'},
- '/etc/ceph/ceph.client.admin.keyring':
- {'owner': 'ceph', 'group': 'ceph'},
- '/etc/ceph/rbdmap': {'mode': '644'},
- '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'},
- '/var/lib/ceph/bootstrap-*/ceph.keyring':
- {'owner': 'ceph', 'group': 'ceph', 'mode': '600'}
- },
- 'ceph-osd': {
- '/var/lib/charm/ceph-osd/ceph.conf':
- {'owner': 'ceph', 'group': 'ceph', 'mode': '644'},
- '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'},
- '/var/lib/ceph/*': {'owner': 'ceph', 'group': 'ceph', 'mode': '755'},
- '/var/lib/ceph/bootstrap-*/ceph.keyring':
- {'owner': 'ceph', 'group': 'ceph', 'mode': '600'},
- '/var/lib/ceph/radosgw':
- {'owner': 'ceph', 'group': 'ceph', 'mode': '755'},
- },
- 'cinder': {
- '/etc/cinder': {'group': 'cinder', 'mode': '750'},
- },
- 'glance': {
- '/etc/glance': {'group': 'glance', 'mode': '750'},
- },
- 'keystone': {
- '/etc/keystone':
- {'owner': 'keystone', 'group': 'keystone', 'mode': '750'},
- },
- 'manilla': {
- '/etc/manila': {'group': 'manilla', 'mode': '750'},
- },
- 'neutron-gateway': {
- '/etc/neutron': {'group': 'neutron', 'mode': '750'},
- },
- 'neutron-api': {
- '/etc/neutron/': {'group': 'neutron', 'mode': '750'},
- },
- 'nova-cloud-controller': {
- '/etc/nova': {'group': 'nova', 'mode': '750'},
- },
- 'nova-compute': {
- '/etc/nova/': {'group': 'nova', 'mode': '750'},
- },
- 'openstack-dashboard': {
- # From security guide
- '/etc/openstack-dashboard/local_settings.py':
- {'group': 'horizon', 'mode': '640'},
- },
-}
-
-Ownership = collections.namedtuple('Ownership', 'owner group mode')
-
-
-@cached
-def _stat(file):
- """
- Get the Ownership information from a file.
-
- :param file: The path to a file to stat
- :type file: str
- :returns: owner, group, and mode of the specified file
- :rtype: Ownership
- :raises subprocess.CalledProcessError: If the underlying stat fails
- """
- out = subprocess.check_output(
- ['stat', '-c', '%U %G %a', file]).decode('utf-8')
- return Ownership(*out.strip().split(' '))
-
-
-@cached
-def _config_ini(path):
- """
- Parse an ini file
-
- :param path: The path to a file to parse
- :type file: str
- :returns: Configuration contained in path
- :rtype: Dict
- """
- conf = configparser.ConfigParser()
- conf.read(path)
- return dict(conf)
-
-
-def _validate_file_ownership(owner, group, file_name, optional=False):
- """
- Validate that a specified file is owned by `owner:group`.
-
- :param owner: Name of the owner
- :type owner: str
- :param group: Name of the group
- :type group: str
- :param file_name: Path to the file to verify
- :type file_name: str
- :param optional: Is this file optional,
- ie: Should this test fail when it's missing
- :type optional: bool
- """
- try:
- ownership = _stat(file_name)
- except subprocess.CalledProcessError as e:
- print("Error reading file: {}".format(e))
- if not optional:
- assert False, "Specified file does not exist: {}".format(file_name)
- assert owner == ownership.owner, \
- "{} has an incorrect owner: {} should be {}".format(
- file_name, ownership.owner, owner)
- assert group == ownership.group, \
- "{} has an incorrect group: {} should be {}".format(
- file_name, ownership.group, group)
- print("Validate ownership of {}: PASS".format(file_name))
-
-
-def _validate_file_mode(mode, file_name, optional=False):
- """
- Validate that a specified file has the specified permissions.
-
- :param mode: file mode that is desires
- :type owner: str
- :param file_name: Path to the file to verify
- :type file_name: str
- :param optional: Is this file optional,
- ie: Should this test fail when it's missing
- :type optional: bool
- """
- try:
- ownership = _stat(file_name)
- except subprocess.CalledProcessError as e:
- print("Error reading file: {}".format(e))
- if not optional:
- assert False, "Specified file does not exist: {}".format(file_name)
- assert mode == ownership.mode, \
- "{} has an incorrect mode: {} should be {}".format(
- file_name, ownership.mode, mode)
- print("Validate mode of {}: PASS".format(file_name))
-
-
-@cached
-def _config_section(config, section):
- """Read the configuration file and return a section."""
- path = os.path.join(config.get('config_path'), config.get('config_file'))
- conf = _config_ini(path)
- return conf.get(section)
-
-
-@audit(is_audit_type(AuditType.OpenStackSecurityGuide),
- it_has_config('files'))
-def validate_file_ownership(config):
- """Verify that configuration files are owned by the correct user/group."""
- files = config.get('files', {})
- for file_name, options in files.items():
- for key in options.keys():
- if key not in ["owner", "group", "mode"]:
- raise RuntimeError(
- "Invalid ownership configuration: {}".format(key))
- owner = options.get('owner', config.get('owner', 'root'))
- group = options.get('group', config.get('group', 'root'))
- optional = options.get('optional', config.get('optional', 'False'))
- if '*' in file_name:
- for file in glob.glob(file_name):
- if file not in files.keys():
- if os.path.isfile(file):
- _validate_file_ownership(owner, group, file, optional)
- else:
- if os.path.isfile(file_name):
- _validate_file_ownership(owner, group, file_name, optional)
-
-
-@audit(is_audit_type(AuditType.OpenStackSecurityGuide),
- it_has_config('files'))
-def validate_file_permissions(config):
- """Verify that permissions on configuration files are secure enough."""
- files = config.get('files', {})
- for file_name, options in files.items():
- for key in options.keys():
- if key not in ["owner", "group", "mode"]:
- raise RuntimeError(
- "Invalid ownership configuration: {}".format(key))
- mode = options.get('mode', config.get('permissions', '600'))
- optional = options.get('optional', config.get('optional', 'False'))
- if '*' in file_name:
- for file in glob.glob(file_name):
- if file not in files.keys():
- if os.path.isfile(file):
- _validate_file_mode(mode, file, optional)
- else:
- if os.path.isfile(file_name):
- _validate_file_mode(mode, file_name, optional)
-
-
-@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
-def validate_uses_keystone(audit_options):
- """Validate that the service uses Keystone for authentication."""
- section = _config_section(audit_options, 'DEFAULT')
- assert section is not None, "Missing section 'DEFAULT'"
- assert section.get('auth_strategy') == "keystone", \
- "Application is not using Keystone"
-
-
-@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
-def validate_uses_tls_for_keystone(audit_options):
- """Verify that TLS is used to communicate with Keystone."""
- section = _config_section(audit_options, 'keystone_authtoken')
- assert section is not None, "Missing section 'keystone_authtoken'"
- assert not section.get('insecure') and \
- "https://" in section.get("auth_uri"), \
- "TLS is not used for Keystone"
-
-
-@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
-def validate_uses_tls_for_glance(audit_options):
- """Verify that TLS is used to communicate with Glance."""
- section = _config_section(audit_options, 'glance')
- assert section is not None, "Missing section 'glance'"
- assert not section.get('insecure') and \
- "https://" in section.get("api_servers"), \
- "TLS is not used for Glance"
diff --git a/hooks/charmhelpers/contrib/openstack/cert_utils.py b/hooks/charmhelpers/contrib/openstack/cert_utils.py
deleted file mode 100644
index 47b8603..0000000
--- a/hooks/charmhelpers/contrib/openstack/cert_utils.py
+++ /dev/null
@@ -1,283 +0,0 @@
-# Copyright 2014-2018 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Common python helper functions used for OpenStack charm certificats.
-
-import os
-import json
-
-from charmhelpers.contrib.network.ip import (
- get_hostname,
- resolve_network_cidr,
-)
-from charmhelpers.core.hookenv import (
- local_unit,
- network_get_primary_address,
- config,
- related_units,
- relation_get,
- relation_ids,
- unit_get,
- NoNetworkBinding,
- log,
- WARNING,
-)
-from charmhelpers.contrib.openstack.ip import (
- ADMIN,
- resolve_address,
- get_vip_in_network,
- INTERNAL,
- PUBLIC,
- ADDRESS_MAP)
-
-from charmhelpers.core.host import (
- mkdir,
- write_file,
-)
-
-from charmhelpers.contrib.hahelpers.apache import (
- install_ca_cert
-)
-
-
-class CertRequest(object):
-
- """Create a request for certificates to be generated
- """
-
- def __init__(self, json_encode=True):
- self.entries = []
- self.hostname_entry = None
- self.json_encode = json_encode
-
- def add_entry(self, net_type, cn, addresses):
- """Add a request to the batch
-
- :param net_type: str netwrok space name request is for
- :param cn: str Canonical Name for certificate
- :param addresses: [] List of addresses to be used as SANs
- """
- self.entries.append({
- 'cn': cn,
- 'addresses': addresses})
-
- def add_hostname_cn(self):
- """Add a request for the hostname of the machine"""
- ip = unit_get('private-address')
- addresses = [ip]
- # If a vip is being used without os-hostname config or
- # network spaces then we need to ensure the local units
- # cert has the approriate vip in the SAN list
- vip = get_vip_in_network(resolve_network_cidr(ip))
- if vip:
- addresses.append(vip)
- self.hostname_entry = {
- 'cn': get_hostname(ip),
- 'addresses': addresses}
-
- def add_hostname_cn_ip(self, addresses):
- """Add an address to the SAN list for the hostname request
-
- :param addr: [] List of address to be added
- """
- for addr in addresses:
- if addr not in self.hostname_entry['addresses']:
- self.hostname_entry['addresses'].append(addr)
-
- def get_request(self):
- """Generate request from the batched up entries
-
- """
- if self.hostname_entry:
- self.entries.append(self.hostname_entry)
- request = {}
- for entry in self.entries:
- sans = sorted(list(set(entry['addresses'])))
- request[entry['cn']] = {'sans': sans}
- if self.json_encode:
- return {'cert_requests': json.dumps(request, sort_keys=True)}
- else:
- return {'cert_requests': request}
-
-
-def get_certificate_request(json_encode=True):
- """Generate a certificatee requests based on the network confioguration
-
- """
- req = CertRequest(json_encode=json_encode)
- req.add_hostname_cn()
- # Add os-hostname entries
- for net_type in [INTERNAL, ADMIN, PUBLIC]:
- net_config = config(ADDRESS_MAP[net_type]['override'])
- try:
- net_addr = resolve_address(endpoint_type=net_type)
- ip = network_get_primary_address(
- ADDRESS_MAP[net_type]['binding'])
- addresses = [net_addr, ip]
- vip = get_vip_in_network(resolve_network_cidr(ip))
- if vip:
- addresses.append(vip)
- if net_config:
- req.add_entry(
- net_type,
- net_config,
- addresses)
- else:
- # There is network address with no corresponding hostname.
- # Add the ip to the hostname cert to allow for this.
- req.add_hostname_cn_ip(addresses)
- except NoNetworkBinding:
- log("Skipping request for certificate for ip in {} space, no "
- "local address found".format(net_type), WARNING)
- return req.get_request()
-
-
-def create_ip_cert_links(ssl_dir, custom_hostname_link=None):
- """Create symlinks for SAN records
-
- :param ssl_dir: str Directory to create symlinks in
- :param custom_hostname_link: str Additional link to be created
- """
- hostname = get_hostname(unit_get('private-address'))
- hostname_cert = os.path.join(
- ssl_dir,
- 'cert_{}'.format(hostname))
- hostname_key = os.path.join(
- ssl_dir,
- 'key_{}'.format(hostname))
- # Add links to hostname cert, used if os-hostname vars not set
- for net_type in [INTERNAL, ADMIN, PUBLIC]:
- try:
- addr = resolve_address(endpoint_type=net_type)
- cert = os.path.join(ssl_dir, 'cert_{}'.format(addr))
- key = os.path.join(ssl_dir, 'key_{}'.format(addr))
- if os.path.isfile(hostname_cert) and not os.path.isfile(cert):
- os.symlink(hostname_cert, cert)
- os.symlink(hostname_key, key)
- except NoNetworkBinding:
- log("Skipping creating cert symlink for ip in {} space, no "
- "local address found".format(net_type), WARNING)
- if custom_hostname_link:
- custom_cert = os.path.join(
- ssl_dir,
- 'cert_{}'.format(custom_hostname_link))
- custom_key = os.path.join(
- ssl_dir,
- 'key_{}'.format(custom_hostname_link))
- if os.path.isfile(hostname_cert) and not os.path.isfile(custom_cert):
- os.symlink(hostname_cert, custom_cert)
- os.symlink(hostname_key, custom_key)
-
-
-def install_certs(ssl_dir, certs, chain=None, user='root', group='root'):
- """Install the certs passed into the ssl dir and append the chain if
- provided.
-
- :param ssl_dir: str Directory to create symlinks in
- :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}}
- :param chain: str Chain to be appended to certs
- :param user: (Optional) Owner of certificate files. Defaults to 'root'
- :type user: str
- :param group: (Optional) Group of certificate files. Defaults to 'root'
- :type group: str
- """
- for cn, bundle in certs.items():
- cert_filename = 'cert_{}'.format(cn)
- key_filename = 'key_{}'.format(cn)
- cert_data = bundle['cert']
- if chain:
- # Append chain file so that clients that trust the root CA will
- # trust certs signed by an intermediate in the chain
- cert_data = cert_data + os.linesep + chain
- write_file(
- path=os.path.join(ssl_dir, cert_filename), owner=user, group=group,
- content=cert_data, perms=0o640)
- write_file(
- path=os.path.join(ssl_dir, key_filename), owner=user, group=group,
- content=bundle['key'], perms=0o640)
-
-
-def process_certificates(service_name, relation_id, unit,
- custom_hostname_link=None, user='root', group='root'):
- """Process the certificates supplied down the relation
-
- :param service_name: str Name of service the certifcates are for.
- :param relation_id: str Relation id providing the certs
- :param unit: str Unit providing the certs
- :param custom_hostname_link: str Name of custom link to create
- :param user: (Optional) Owner of certificate files. Defaults to 'root'
- :type user: str
- :param group: (Optional) Group of certificate files. Defaults to 'root'
- :type group: str
- """
- data = relation_get(rid=relation_id, unit=unit)
- ssl_dir = os.path.join('/etc/apache2/ssl/', service_name)
- mkdir(path=ssl_dir)
- name = local_unit().replace('/', '_')
- certs = data.get('{}.processed_requests'.format(name))
- chain = data.get('chain')
- ca = data.get('ca')
- if certs:
- certs = json.loads(certs)
- install_ca_cert(ca.encode())
- install_certs(ssl_dir, certs, chain, user=user, group=group)
- create_ip_cert_links(
- ssl_dir,
- custom_hostname_link=custom_hostname_link)
-
-
-def get_requests_for_local_unit(relation_name=None):
- """Extract any certificates data targeted at this unit down relation_name.
-
- :param relation_name: str Name of relation to check for data.
- :returns: List of bundles of certificates.
- :rtype: List of dicts
- """
- local_name = local_unit().replace('/', '_')
- raw_certs_key = '{}.processed_requests'.format(local_name)
- relation_name = relation_name or 'certificates'
- bundles = []
- for rid in relation_ids(relation_name):
- for unit in related_units(rid):
- data = relation_get(rid=rid, unit=unit)
- if data.get(raw_certs_key):
- bundles.append({
- 'ca': data['ca'],
- 'chain': data.get('chain'),
- 'certs': json.loads(data[raw_certs_key])})
- return bundles
-
-
-def get_bundle_for_cn(cn, relation_name=None):
- """Extract certificates for the given cn.
-
- :param cn: str Canonical Name on certificate.
- :param relation_name: str Relation to check for certificates down.
- :returns: Dictionary of certificate data,
- :rtype: dict.
- """
- entries = get_requests_for_local_unit(relation_name)
- cert_bundle = {}
- for entry in entries:
- for _cn, bundle in entry['certs'].items():
- if _cn == cn:
- cert_bundle = {
- 'cert': bundle['cert'],
- 'key': bundle['key'],
- 'chain': entry['chain'],
- 'ca': entry['ca']}
- break
- if cert_bundle:
- break
- return cert_bundle
diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py
deleted file mode 100644
index d513371..0000000
--- a/hooks/charmhelpers/contrib/openstack/context.py
+++ /dev/null
@@ -1,1964 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import collections
-import glob
-import json
-import math
-import os
-import re
-import time
-from base64 import b64decode
-from subprocess import check_call, CalledProcessError
-
-import six
-
-from charmhelpers.fetch import (
- apt_install,
- filter_installed_packages,
-)
-from charmhelpers.core.hookenv import (
- NoNetworkBinding,
- config,
- is_relation_made,
- local_unit,
- log,
- relation_get,
- relation_ids,
- related_units,
- relation_set,
- unit_get,
- unit_private_ip,
- charm_name,
- DEBUG,
- INFO,
- ERROR,
- status_set,
- network_get_primary_address
-)
-
-from charmhelpers.core.sysctl import create as sysctl_create
-from charmhelpers.core.strutils import bool_from_string
-from charmhelpers.contrib.openstack.exceptions import OSContextError
-
-from charmhelpers.core.host import (
- get_bond_master,
- is_phy_iface,
- list_nics,
- get_nic_hwaddr,
- mkdir,
- write_file,
- pwgen,
- lsb_release,
- CompareHostReleases,
- is_container,
-)
-from charmhelpers.contrib.hahelpers.cluster import (
- determine_apache_port,
- determine_api_port,
- https,
- is_clustered,
-)
-from charmhelpers.contrib.hahelpers.apache import (
- get_cert,
- get_ca_cert,
- install_ca_cert,
-)
-from charmhelpers.contrib.openstack.neutron import (
- neutron_plugin_attribute,
- parse_data_port_mappings,
-)
-from charmhelpers.contrib.openstack.ip import (
- resolve_address,
- INTERNAL,
- ADMIN,
- PUBLIC,
- ADDRESS_MAP,
-)
-from charmhelpers.contrib.network.ip import (
- get_address_in_network,
- get_ipv4_addr,
- get_ipv6_addr,
- get_netmask_for_address,
- format_ipv6_addr,
- is_bridge_member,
- is_ipv6_disabled,
- get_relation_ip,
-)
-from charmhelpers.contrib.openstack.utils import (
- config_flags_parser,
- get_os_codename_install_source,
- enable_memcache,
- CompareOpenStackReleases,
- os_release,
-)
-from charmhelpers.core.unitdata import kv
-
-try:
- import psutil
-except ImportError:
- if six.PY2:
- apt_install('python-psutil', fatal=True)
- else:
- apt_install('python3-psutil', fatal=True)
- import psutil
-
-CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
-ADDRESS_TYPES = ['admin', 'internal', 'public']
-HAPROXY_RUN_DIR = '/var/run/haproxy/'
-
-
-def ensure_packages(packages):
- """Install but do not upgrade required plugin packages."""
- required = filter_installed_packages(packages)
- if required:
- apt_install(required, fatal=True)
-
-
-def context_complete(ctxt):
- _missing = []
- for k, v in six.iteritems(ctxt):
- if v is None or v == '':
- _missing.append(k)
-
- if _missing:
- log('Missing required data: %s' % ' '.join(_missing), level=INFO)
- return False
-
- return True
-
-
-class OSContextGenerator(object):
- """Base class for all context generators."""
- interfaces = []
- related = False
- complete = False
- missing_data = []
-
- def __call__(self):
- raise NotImplementedError
-
- def context_complete(self, ctxt):
- """Check for missing data for the required context data.
- Set self.missing_data if it exists and return False.
- Set self.complete if no missing data and return True.
- """
- # Fresh start
- self.complete = False
- self.missing_data = []
- for k, v in six.iteritems(ctxt):
- if v is None or v == '':
- if k not in self.missing_data:
- self.missing_data.append(k)
-
- if self.missing_data:
- self.complete = False
- log('Missing required data: %s' % ' '.join(self.missing_data),
- level=INFO)
- else:
- self.complete = True
- return self.complete
-
- def get_related(self):
- """Check if any of the context interfaces have relation ids.
- Set self.related and return True if one of the interfaces
- has relation ids.
- """
- # Fresh start
- self.related = False
- try:
- for interface in self.interfaces:
- if relation_ids(interface):
- self.related = True
- return self.related
- except AttributeError as e:
- log("{} {}"
- "".format(self, e), 'INFO')
- return self.related
-
-
-class SharedDBContext(OSContextGenerator):
- interfaces = ['shared-db']
-
- def __init__(self, database=None, user=None, relation_prefix=None,
- ssl_dir=None, relation_id=None):
- """Allows inspecting relation for settings prefixed with
- relation_prefix. This is useful for parsing access for multiple
- databases returned via the shared-db interface (eg, nova_password,
- quantum_password)
- """
- self.relation_prefix = relation_prefix
- self.database = database
- self.user = user
- self.ssl_dir = ssl_dir
- self.rel_name = self.interfaces[0]
- self.relation_id = relation_id
-
- def __call__(self):
- self.database = self.database or config('database')
- self.user = self.user or config('database-user')
- if None in [self.database, self.user]:
- log("Could not generate shared_db context. Missing required charm "
- "config options. (database name and user)", level=ERROR)
- raise OSContextError
-
- ctxt = {}
-
- # NOTE(jamespage) if mysql charm provides a network upon which
- # access to the database should be made, reconfigure relation
- # with the service units local address and defer execution
- access_network = relation_get('access-network')
- if access_network is not None:
- if self.relation_prefix is not None:
- hostname_key = "{}_hostname".format(self.relation_prefix)
- else:
- hostname_key = "hostname"
- access_hostname = get_address_in_network(
- access_network,
- unit_get('private-address'))
- set_hostname = relation_get(attribute=hostname_key,
- unit=local_unit())
- if set_hostname != access_hostname:
- relation_set(relation_settings={hostname_key: access_hostname})
- return None # Defer any further hook execution for now....
-
- password_setting = 'password'
- if self.relation_prefix:
- password_setting = self.relation_prefix + '_password'
-
- if self.relation_id:
- rids = [self.relation_id]
- else:
- rids = relation_ids(self.interfaces[0])
-
- rel = (get_os_codename_install_source(config('openstack-origin')) or
- 'icehouse')
- for rid in rids:
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- host = rdata.get('db_host')
- host = format_ipv6_addr(host) or host
- ctxt = {
- 'database_host': host,
- 'database': self.database,
- 'database_user': self.user,
- 'database_password': rdata.get(password_setting),
- 'database_type': 'mysql+pymysql'
- }
- if CompareOpenStackReleases(rel) < 'stein':
- ctxt['database_type'] = 'mysql'
- if self.context_complete(ctxt):
- db_ssl(rdata, ctxt, self.ssl_dir)
- return ctxt
- return {}
-
-
-class PostgresqlDBContext(OSContextGenerator):
- interfaces = ['pgsql-db']
-
- def __init__(self, database=None):
- self.database = database
-
- def __call__(self):
- self.database = self.database or config('database')
- if self.database is None:
- log('Could not generate postgresql_db context. Missing required '
- 'charm config options. (database name)', level=ERROR)
- raise OSContextError
-
- ctxt = {}
- for rid in relation_ids(self.interfaces[0]):
- self.related = True
- for unit in related_units(rid):
- rel_host = relation_get('host', rid=rid, unit=unit)
- rel_user = relation_get('user', rid=rid, unit=unit)
- rel_passwd = relation_get('password', rid=rid, unit=unit)
- ctxt = {'database_host': rel_host,
- 'database': self.database,
- 'database_user': rel_user,
- 'database_password': rel_passwd,
- 'database_type': 'postgresql'}
- if self.context_complete(ctxt):
- return ctxt
-
- return {}
-
-
-def db_ssl(rdata, ctxt, ssl_dir):
- if 'ssl_ca' in rdata and ssl_dir:
- ca_path = os.path.join(ssl_dir, 'db-client.ca')
- with open(ca_path, 'wb') as fh:
- fh.write(b64decode(rdata['ssl_ca']))
-
- ctxt['database_ssl_ca'] = ca_path
- elif 'ssl_ca' in rdata:
- log("Charm not setup for ssl support but ssl ca found", level=INFO)
- return ctxt
-
- if 'ssl_cert' in rdata:
- cert_path = os.path.join(
- ssl_dir, 'db-client.cert')
- if not os.path.exists(cert_path):
- log("Waiting 1m for ssl client cert validity", level=INFO)
- time.sleep(60)
-
- with open(cert_path, 'wb') as fh:
- fh.write(b64decode(rdata['ssl_cert']))
-
- ctxt['database_ssl_cert'] = cert_path
- key_path = os.path.join(ssl_dir, 'db-client.key')
- with open(key_path, 'wb') as fh:
- fh.write(b64decode(rdata['ssl_key']))
-
- ctxt['database_ssl_key'] = key_path
-
- return ctxt
-
-
-class IdentityServiceContext(OSContextGenerator):
-
- def __init__(self,
- service=None,
- service_user=None,
- rel_name='identity-service'):
- self.service = service
- self.service_user = service_user
- self.rel_name = rel_name
- self.interfaces = [self.rel_name]
-
- def _setup_pki_cache(self):
- if self.service and self.service_user:
- # This is required for pki token signing if we don't want /tmp to
- # be used.
- cachedir = '/var/cache/%s' % (self.service)
- if not os.path.isdir(cachedir):
- log("Creating service cache dir %s" % (cachedir), level=DEBUG)
- mkdir(path=cachedir, owner=self.service_user,
- group=self.service_user, perms=0o700)
-
- return cachedir
- return None
-
- def __call__(self):
- log('Generating template context for ' + self.rel_name, level=DEBUG)
- ctxt = {}
-
- cachedir = self._setup_pki_cache()
- if cachedir:
- ctxt['signing_dir'] = cachedir
-
- for rid in relation_ids(self.rel_name):
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- serv_host = rdata.get('service_host')
- serv_host = format_ipv6_addr(serv_host) or serv_host
- auth_host = rdata.get('auth_host')
- auth_host = format_ipv6_addr(auth_host) or auth_host
- svc_protocol = rdata.get('service_protocol') or 'http'
- auth_protocol = rdata.get('auth_protocol') or 'http'
- api_version = rdata.get('api_version') or '2.0'
- ctxt.update({'service_port': rdata.get('service_port'),
- 'service_host': serv_host,
- 'auth_host': auth_host,
- 'auth_port': rdata.get('auth_port'),
- 'admin_tenant_name': rdata.get('service_tenant'),
- 'admin_user': rdata.get('service_username'),
- 'admin_password': rdata.get('service_password'),
- 'service_protocol': svc_protocol,
- 'auth_protocol': auth_protocol,
- 'api_version': api_version})
-
- if float(api_version) > 2:
- ctxt.update({'admin_domain_name':
- rdata.get('service_domain')})
-
- if self.context_complete(ctxt):
- # NOTE(jamespage) this is required for >= icehouse
- # so a missing value just indicates keystone needs
- # upgrading
- ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
- ctxt['admin_domain_id'] = rdata.get('service_domain_id')
- return ctxt
-
- return {}
-
-
-class IdentityCredentialsContext(IdentityServiceContext):
- '''Context for identity-credentials interface type'''
-
- def __init__(self,
- service=None,
- service_user=None,
- rel_name='identity-credentials'):
- super(IdentityCredentialsContext, self).__init__(service,
- service_user,
- rel_name)
-
- def __call__(self):
- log('Generating template context for ' + self.rel_name, level=DEBUG)
- ctxt = {}
-
- cachedir = self._setup_pki_cache()
- if cachedir:
- ctxt['signing_dir'] = cachedir
-
- for rid in relation_ids(self.rel_name):
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- credentials_host = rdata.get('credentials_host')
- credentials_host = (
- format_ipv6_addr(credentials_host) or credentials_host
- )
- auth_host = rdata.get('auth_host')
- auth_host = format_ipv6_addr(auth_host) or auth_host
- svc_protocol = rdata.get('credentials_protocol') or 'http'
- auth_protocol = rdata.get('auth_protocol') or 'http'
- api_version = rdata.get('api_version') or '2.0'
- ctxt.update({
- 'service_port': rdata.get('credentials_port'),
- 'service_host': credentials_host,
- 'auth_host': auth_host,
- 'auth_port': rdata.get('auth_port'),
- 'admin_tenant_name': rdata.get('credentials_project'),
- 'admin_tenant_id': rdata.get('credentials_project_id'),
- 'admin_user': rdata.get('credentials_username'),
- 'admin_password': rdata.get('credentials_password'),
- 'service_protocol': svc_protocol,
- 'auth_protocol': auth_protocol,
- 'api_version': api_version
- })
-
- if float(api_version) > 2:
- ctxt.update({'admin_domain_name':
- rdata.get('domain')})
-
- if self.context_complete(ctxt):
- return ctxt
-
- return {}
-
-
-class AMQPContext(OSContextGenerator):
-
- def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None,
- relation_id=None):
- self.ssl_dir = ssl_dir
- self.rel_name = rel_name
- self.relation_prefix = relation_prefix
- self.interfaces = [rel_name]
- self.relation_id = relation_id
-
- def __call__(self):
- log('Generating template context for amqp', level=DEBUG)
- conf = config()
- if self.relation_prefix:
- user_setting = '%s-rabbit-user' % (self.relation_prefix)
- vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
- else:
- user_setting = 'rabbit-user'
- vhost_setting = 'rabbit-vhost'
-
- try:
- username = conf[user_setting]
- vhost = conf[vhost_setting]
- except KeyError as e:
- log('Could not generate shared_db context. Missing required charm '
- 'config options: %s.' % e, level=ERROR)
- raise OSContextError
-
- ctxt = {}
- if self.relation_id:
- rids = [self.relation_id]
- else:
- rids = relation_ids(self.rel_name)
- for rid in rids:
- ha_vip_only = False
- self.related = True
- transport_hosts = None
- rabbitmq_port = '5672'
- for unit in related_units(rid):
- if relation_get('clustered', rid=rid, unit=unit):
- ctxt['clustered'] = True
- vip = relation_get('vip', rid=rid, unit=unit)
- vip = format_ipv6_addr(vip) or vip
- ctxt['rabbitmq_host'] = vip
- transport_hosts = [vip]
- else:
- host = relation_get('private-address', rid=rid, unit=unit)
- host = format_ipv6_addr(host) or host
- ctxt['rabbitmq_host'] = host
- transport_hosts = [host]
-
- ctxt.update({
- 'rabbitmq_user': username,
- 'rabbitmq_password': relation_get('password', rid=rid,
- unit=unit),
- 'rabbitmq_virtual_host': vhost,
- })
-
- ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
- if ssl_port:
- ctxt['rabbit_ssl_port'] = ssl_port
- rabbitmq_port = ssl_port
-
- ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
- if ssl_ca:
- ctxt['rabbit_ssl_ca'] = ssl_ca
-
- if relation_get('ha_queues', rid=rid, unit=unit) is not None:
- ctxt['rabbitmq_ha_queues'] = True
-
- ha_vip_only = relation_get('ha-vip-only',
- rid=rid, unit=unit) is not None
-
- if self.context_complete(ctxt):
- if 'rabbit_ssl_ca' in ctxt:
- if not self.ssl_dir:
- log("Charm not setup for ssl support but ssl ca "
- "found", level=INFO)
- break
-
- ca_path = os.path.join(
- self.ssl_dir, 'rabbit-client-ca.pem')
- with open(ca_path, 'wb') as fh:
- fh.write(b64decode(ctxt['rabbit_ssl_ca']))
- ctxt['rabbit_ssl_ca'] = ca_path
-
- # Sufficient information found = break out!
- break
-
- # Used for active/active rabbitmq >= grizzly
- if (('clustered' not in ctxt or ha_vip_only) and
- len(related_units(rid)) > 1):
- rabbitmq_hosts = []
- for unit in related_units(rid):
- host = relation_get('private-address', rid=rid, unit=unit)
- host = format_ipv6_addr(host) or host
- rabbitmq_hosts.append(host)
-
- rabbitmq_hosts = sorted(rabbitmq_hosts)
- ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
- transport_hosts = rabbitmq_hosts
-
- if transport_hosts:
- transport_url_hosts = ','.join([
- "{}:{}@{}:{}".format(ctxt['rabbitmq_user'],
- ctxt['rabbitmq_password'],
- host_,
- rabbitmq_port)
- for host_ in transport_hosts])
- ctxt['transport_url'] = "rabbit://{}/{}".format(
- transport_url_hosts, vhost)
-
- oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
- if oslo_messaging_flags:
- ctxt['oslo_messaging_flags'] = config_flags_parser(
- oslo_messaging_flags)
-
- if not self.complete:
- return {}
-
- return ctxt
-
-
-class CephContext(OSContextGenerator):
- """Generates context for /etc/ceph/ceph.conf templates."""
- interfaces = ['ceph']
-
- def __call__(self):
- if not relation_ids('ceph'):
- return {}
-
- log('Generating template context for ceph', level=DEBUG)
- mon_hosts = []
- ctxt = {
- 'use_syslog': str(config('use-syslog')).lower()
- }
- for rid in relation_ids('ceph'):
- for unit in related_units(rid):
- if not ctxt.get('auth'):
- ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
- if not ctxt.get('key'):
- ctxt['key'] = relation_get('key', rid=rid, unit=unit)
- if not ctxt.get('rbd_features'):
- default_features = relation_get('rbd-features', rid=rid, unit=unit)
- if default_features is not None:
- ctxt['rbd_features'] = default_features
-
- ceph_addrs = relation_get('ceph-public-address', rid=rid,
- unit=unit)
- if ceph_addrs:
- for addr in ceph_addrs.split(' '):
- mon_hosts.append(format_ipv6_addr(addr) or addr)
- else:
- priv_addr = relation_get('private-address', rid=rid,
- unit=unit)
- mon_hosts.append(format_ipv6_addr(priv_addr) or priv_addr)
-
- ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
-
- if not os.path.isdir('/etc/ceph'):
- os.mkdir('/etc/ceph')
-
- if not self.context_complete(ctxt):
- return {}
-
- ensure_packages(['ceph-common'])
- return ctxt
-
-
-class HAProxyContext(OSContextGenerator):
- """Provides half a context for the haproxy template, which describes
- all peers to be included in the cluster. Each charm needs to include
- its own context generator that describes the port mapping.
-
- :side effect: mkdir is called on HAPROXY_RUN_DIR
- """
- interfaces = ['cluster']
-
- def __init__(self, singlenode_mode=False,
- address_types=ADDRESS_TYPES):
- self.address_types = address_types
- self.singlenode_mode = singlenode_mode
-
- def __call__(self):
- if not os.path.isdir(HAPROXY_RUN_DIR):
- mkdir(path=HAPROXY_RUN_DIR)
- if not relation_ids('cluster') and not self.singlenode_mode:
- return {}
-
- l_unit = local_unit().replace('/', '-')
- cluster_hosts = collections.OrderedDict()
-
- # NOTE(jamespage): build out map of configured network endpoints
- # and associated backends
- for addr_type in self.address_types:
- cfg_opt = 'os-{}-network'.format(addr_type)
- # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather
- # than 'internal'
- if addr_type == 'internal':
- _addr_map_type = INTERNAL
- else:
- _addr_map_type = addr_type
- # Network spaces aware
- laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'],
- config(cfg_opt))
- if laddr:
- netmask = get_netmask_for_address(laddr)
- cluster_hosts[laddr] = {
- 'network': "{}/{}".format(laddr,
- netmask),
- 'backends': collections.OrderedDict([(l_unit,
- laddr)])
- }
- for rid in relation_ids('cluster'):
- for unit in sorted(related_units(rid)):
- # API Charms will need to set {addr_type}-address with
- # get_relation_ip(addr_type)
- _laddr = relation_get('{}-address'.format(addr_type),
- rid=rid, unit=unit)
- if _laddr:
- _unit = unit.replace('/', '-')
- cluster_hosts[laddr]['backends'][_unit] = _laddr
-
- # NOTE(jamespage) add backend based on get_relation_ip - this
- # will either be the only backend or the fallback if no acls
- # match in the frontend
- # Network spaces aware
- addr = get_relation_ip('cluster')
- cluster_hosts[addr] = {}
- netmask = get_netmask_for_address(addr)
- cluster_hosts[addr] = {
- 'network': "{}/{}".format(addr, netmask),
- 'backends': collections.OrderedDict([(l_unit,
- addr)])
- }
- for rid in relation_ids('cluster'):
- for unit in sorted(related_units(rid)):
- # API Charms will need to set their private-address with
- # get_relation_ip('cluster')
- _laddr = relation_get('private-address',
- rid=rid, unit=unit)
- if _laddr:
- _unit = unit.replace('/', '-')
- cluster_hosts[addr]['backends'][_unit] = _laddr
-
- ctxt = {
- 'frontends': cluster_hosts,
- 'default_backend': addr
- }
-
- if config('haproxy-server-timeout'):
- ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
-
- if config('haproxy-client-timeout'):
- ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
-
- if config('haproxy-queue-timeout'):
- ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
-
- if config('haproxy-connect-timeout'):
- ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
-
- if config('prefer-ipv6'):
- ctxt['local_host'] = 'ip6-localhost'
- ctxt['haproxy_host'] = '::'
- else:
- ctxt['local_host'] = '127.0.0.1'
- ctxt['haproxy_host'] = '0.0.0.0'
-
- ctxt['ipv6_enabled'] = not is_ipv6_disabled()
-
- ctxt['stat_port'] = '8888'
-
- db = kv()
- ctxt['stat_password'] = db.get('stat-password')
- if not ctxt['stat_password']:
- ctxt['stat_password'] = db.set('stat-password',
- pwgen(32))
- db.flush()
-
- for frontend in cluster_hosts:
- if (len(cluster_hosts[frontend]['backends']) > 1 or
- self.singlenode_mode):
- # Enable haproxy when we have enough peers.
- log('Ensuring haproxy enabled in /etc/default/haproxy.',
- level=DEBUG)
- with open('/etc/default/haproxy', 'w') as out:
- out.write('ENABLED=1\n')
-
- return ctxt
-
- log('HAProxy context is incomplete, this unit has no peers.',
- level=INFO)
- return {}
-
-
-class ImageServiceContext(OSContextGenerator):
- interfaces = ['image-service']
-
- def __call__(self):
- """Obtains the glance API server from the image-service relation.
- Useful in nova and cinder (currently).
- """
- log('Generating template context for image-service.', level=DEBUG)
- rids = relation_ids('image-service')
- if not rids:
- return {}
-
- for rid in rids:
- for unit in related_units(rid):
- api_server = relation_get('glance-api-server',
- rid=rid, unit=unit)
- if api_server:
- return {'glance_api_servers': api_server}
-
- log("ImageService context is incomplete. Missing required relation "
- "data.", level=INFO)
- return {}
-
-
-class ApacheSSLContext(OSContextGenerator):
- """Generates a context for an apache vhost configuration that configures
- HTTPS reverse proxying for one or many endpoints. Generated context
- looks something like::
-
- {
- 'namespace': 'cinder',
- 'private_address': 'iscsi.mycinderhost.com',
- 'endpoints': [(8776, 8766), (8777, 8767)]
- }
-
- The endpoints list consists of a tuples mapping external ports
- to internal ports.
- """
- interfaces = ['https']
-
- # charms should inherit this context and set external ports
- # and service namespace accordingly.
- external_ports = []
- service_namespace = None
- user = group = 'root'
-
- def enable_modules(self):
- cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers']
- check_call(cmd)
-
- def configure_cert(self, cn=None):
- ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
- mkdir(path=ssl_dir)
- cert, key = get_cert(cn)
- if cert and key:
- if cn:
- cert_filename = 'cert_{}'.format(cn)
- key_filename = 'key_{}'.format(cn)
- else:
- cert_filename = 'cert'
- key_filename = 'key'
-
- write_file(path=os.path.join(ssl_dir, cert_filename),
- content=b64decode(cert), owner=self.user,
- group=self.group, perms=0o640)
- write_file(path=os.path.join(ssl_dir, key_filename),
- content=b64decode(key), owner=self.user,
- group=self.group, perms=0o640)
-
- def configure_ca(self):
- ca_cert = get_ca_cert()
- if ca_cert:
- install_ca_cert(b64decode(ca_cert))
-
- def canonical_names(self):
- """Figure out which canonical names clients will access this service.
- """
- cns = []
- for r_id in relation_ids('identity-service'):
- for unit in related_units(r_id):
- rdata = relation_get(rid=r_id, unit=unit)
- for k in rdata:
- if k.startswith('ssl_key_'):
- cns.append(k.lstrip('ssl_key_'))
-
- return sorted(list(set(cns)))
-
- def get_network_addresses(self):
- """For each network configured, return corresponding address and
- hostnamr or vip (if available).
-
- Returns a list of tuples of the form:
-
- [(address_in_net_a, hostname_in_net_a),
- (address_in_net_b, hostname_in_net_b),
- ...]
-
- or, if no hostnames(s) available:
-
- [(address_in_net_a, vip_in_net_a),
- (address_in_net_b, vip_in_net_b),
- ...]
-
- or, if no vip(s) available:
-
- [(address_in_net_a, address_in_net_a),
- (address_in_net_b, address_in_net_b),
- ...]
- """
- addresses = []
- for net_type in [INTERNAL, ADMIN, PUBLIC]:
- net_config = config(ADDRESS_MAP[net_type]['config'])
- # NOTE(jamespage): Fallback must always be private address
- # as this is used to bind services on the
- # local unit.
- fallback = unit_get("private-address")
- if net_config:
- addr = get_address_in_network(net_config,
- fallback)
- else:
- try:
- addr = network_get_primary_address(
- ADDRESS_MAP[net_type]['binding']
- )
- except (NotImplementedError, NoNetworkBinding):
- addr = fallback
-
- endpoint = resolve_address(net_type)
- addresses.append((addr, endpoint))
-
- return sorted(set(addresses))
-
- def __call__(self):
- if isinstance(self.external_ports, six.string_types):
- self.external_ports = [self.external_ports]
-
- if not self.external_ports or not https():
- return {}
-
- use_keystone_ca = True
- for rid in relation_ids('certificates'):
- if related_units(rid):
- use_keystone_ca = False
-
- if use_keystone_ca:
- self.configure_ca()
-
- self.enable_modules()
-
- ctxt = {'namespace': self.service_namespace,
- 'endpoints': [],
- 'ext_ports': []}
-
- if use_keystone_ca:
- cns = self.canonical_names()
- if cns:
- for cn in cns:
- self.configure_cert(cn)
- else:
- # Expect cert/key provided in config (currently assumed that ca
- # uses ip for cn)
- for net_type in (INTERNAL, ADMIN, PUBLIC):
- cn = resolve_address(endpoint_type=net_type)
- self.configure_cert(cn)
-
- addresses = self.get_network_addresses()
- for address, endpoint in addresses:
- for api_port in self.external_ports:
- ext_port = determine_apache_port(api_port,
- singlenode_mode=True)
- int_port = determine_api_port(api_port, singlenode_mode=True)
- portmap = (address, endpoint, int(ext_port), int(int_port))
- ctxt['endpoints'].append(portmap)
- ctxt['ext_ports'].append(int(ext_port))
-
- ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
- return ctxt
-
-
-class NeutronContext(OSContextGenerator):
- interfaces = []
-
- @property
- def plugin(self):
- return None
-
- @property
- def network_manager(self):
- return None
-
- @property
- def packages(self):
- return neutron_plugin_attribute(self.plugin, 'packages',
- self.network_manager)
-
- @property
- def neutron_security_groups(self):
- return None
-
- def _ensure_packages(self):
- for pkgs in self.packages:
- ensure_packages(pkgs)
-
- def ovs_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- ovs_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'ovs',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return ovs_ctxt
-
- def nuage_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- nuage_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'vsp',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return nuage_ctxt
-
- def nvp_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- nvp_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'nvp',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return nvp_ctxt
-
- def n1kv_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- n1kv_user_config_flags = config('n1kv-config-flags')
- restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
- n1kv_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'n1kv',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': n1kv_config,
- 'vsm_ip': config('n1kv-vsm-ip'),
- 'vsm_username': config('n1kv-vsm-username'),
- 'vsm_password': config('n1kv-vsm-password'),
- 'restrict_policy_profiles': restrict_policy_profiles}
-
- if n1kv_user_config_flags:
- flags = config_flags_parser(n1kv_user_config_flags)
- n1kv_ctxt['user_config_flags'] = flags
-
- return n1kv_ctxt
-
- def calico_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- calico_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'Calico',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return calico_ctxt
-
- def neutron_ctxt(self):
- if https():
- proto = 'https'
- else:
- proto = 'http'
-
- if is_clustered():
- host = config('vip')
- else:
- host = unit_get('private-address')
-
- ctxt = {'network_manager': self.network_manager,
- 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
- return ctxt
-
- def pg_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- ovs_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'plumgrid',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
- return ovs_ctxt
-
- def midonet_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- midonet_config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- mido_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'midonet',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': midonet_config}
-
- return mido_ctxt
-
- def __call__(self):
- if self.network_manager not in ['quantum', 'neutron']:
- return {}
-
- if not self.plugin:
- return {}
-
- ctxt = self.neutron_ctxt()
-
- if self.plugin == 'ovs':
- ctxt.update(self.ovs_ctxt())
- elif self.plugin in ['nvp', 'nsx']:
- ctxt.update(self.nvp_ctxt())
- elif self.plugin == 'n1kv':
- ctxt.update(self.n1kv_ctxt())
- elif self.plugin == 'Calico':
- ctxt.update(self.calico_ctxt())
- elif self.plugin == 'vsp':
- ctxt.update(self.nuage_ctxt())
- elif self.plugin == 'plumgrid':
- ctxt.update(self.pg_ctxt())
- elif self.plugin == 'midonet':
- ctxt.update(self.midonet_ctxt())
-
- alchemy_flags = config('neutron-alchemy-flags')
- if alchemy_flags:
- flags = config_flags_parser(alchemy_flags)
- ctxt['neutron_alchemy_flags'] = flags
-
- return ctxt
-
-
-class NeutronPortContext(OSContextGenerator):
-
- def resolve_ports(self, ports):
- """Resolve NICs not yet bound to bridge(s)
-
- If hwaddress provided then returns resolved hwaddress otherwise NIC.
- """
- if not ports:
- return None
-
- hwaddr_to_nic = {}
- hwaddr_to_ip = {}
- for nic in list_nics():
- # Ignore virtual interfaces (bond masters will be identified from
- # their slaves)
- if not is_phy_iface(nic):
- continue
-
- _nic = get_bond_master(nic)
- if _nic:
- log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
- level=DEBUG)
- nic = _nic
-
- hwaddr = get_nic_hwaddr(nic)
- hwaddr_to_nic[hwaddr] = nic
- addresses = get_ipv4_addr(nic, fatal=False)
- addresses += get_ipv6_addr(iface=nic, fatal=False)
- hwaddr_to_ip[hwaddr] = addresses
-
- resolved = []
- mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
- for entry in ports:
- if re.match(mac_regex, entry):
- # NIC is in known NICs and does NOT hace an IP address
- if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
- # If the nic is part of a bridge then don't use it
- if is_bridge_member(hwaddr_to_nic[entry]):
- continue
-
- # Entry is a MAC address for a valid interface that doesn't
- # have an IP address assigned yet.
- resolved.append(hwaddr_to_nic[entry])
- else:
- # If the passed entry is not a MAC address, assume it's a valid
- # interface, and that the user put it there on purpose (we can
- # trust it to be the real external network).
- resolved.append(entry)
-
- # Ensure no duplicates
- return list(set(resolved))
-
-
-class OSConfigFlagContext(OSContextGenerator):
- """Provides support for user-defined config flags.
-
- Users can define a comma-seperated list of key=value pairs
- in the charm configuration and apply them at any point in
- any file by using a template flag.
-
- Sometimes users might want config flags inserted within a
- specific section so this class allows users to specify the
- template flag name, allowing for multiple template flags
- (sections) within the same context.
-
- NOTE: the value of config-flags may be a comma-separated list of
- key=value pairs and some Openstack config files support
- comma-separated lists as values.
- """
-
- def __init__(self, charm_flag='config-flags',
- template_flag='user_config_flags'):
- """
- :param charm_flag: config flags in charm configuration.
- :param template_flag: insert point for user-defined flags in template
- file.
- """
- super(OSConfigFlagContext, self).__init__()
- self._charm_flag = charm_flag
- self._template_flag = template_flag
-
- def __call__(self):
- config_flags = config(self._charm_flag)
- if not config_flags:
- return {}
-
- return {self._template_flag:
- config_flags_parser(config_flags)}
-
-
-class LibvirtConfigFlagsContext(OSContextGenerator):
- """
- This context provides support for extending
- the libvirt section through user-defined flags.
- """
- def __call__(self):
- ctxt = {}
- libvirt_flags = config('libvirt-flags')
- if libvirt_flags:
- ctxt['libvirt_flags'] = config_flags_parser(
- libvirt_flags)
- return ctxt
-
-
-class SubordinateConfigContext(OSContextGenerator):
-
- """
- Responsible for inspecting relations to subordinates that
- may be exporting required config via a json blob.
-
- The subordinate interface allows subordinates to export their
- configuration requirements to the principle for multiple config
- files and multiple services. Ie, a subordinate that has interfaces
- to both glance and nova may export to following yaml blob as json::
-
- glance:
- /etc/glance/glance-api.conf:
- sections:
- DEFAULT:
- - [key1, value1]
- /etc/glance/glance-registry.conf:
- MYSECTION:
- - [key2, value2]
- nova:
- /etc/nova/nova.conf:
- sections:
- DEFAULT:
- - [key3, value3]
-
-
- It is then up to the principle charms to subscribe this context to
- the service+config file it is interestd in. Configuration data will
- be available in the template context, in glance's case, as::
-
- ctxt = {
- ... other context ...
- 'subordinate_configuration': {
- 'DEFAULT': {
- 'key1': 'value1',
- },
- 'MYSECTION': {
- 'key2': 'value2',
- },
- }
- }
- """
-
- def __init__(self, service, config_file, interface):
- """
- :param service : Service name key to query in any subordinate
- data found
- :param config_file : Service's config file to query sections
- :param interface : Subordinate interface to inspect
- """
- self.config_file = config_file
- if isinstance(service, list):
- self.services = service
- else:
- self.services = [service]
- if isinstance(interface, list):
- self.interfaces = interface
- else:
- self.interfaces = [interface]
-
- def __call__(self):
- ctxt = {'sections': {}}
- rids = []
- for interface in self.interfaces:
- rids.extend(relation_ids(interface))
- for rid in rids:
- for unit in related_units(rid):
- sub_config = relation_get('subordinate_configuration',
- rid=rid, unit=unit)
- if sub_config and sub_config != '':
- try:
- sub_config = json.loads(sub_config)
- except Exception:
- log('Could not parse JSON from '
- 'subordinate_configuration setting from %s'
- % rid, level=ERROR)
- continue
-
- for service in self.services:
- if service not in sub_config:
- log('Found subordinate_configuration on %s but it '
- 'contained nothing for %s service'
- % (rid, service), level=INFO)
- continue
-
- sub_config = sub_config[service]
- if self.config_file not in sub_config:
- log('Found subordinate_configuration on %s but it '
- 'contained nothing for %s'
- % (rid, self.config_file), level=INFO)
- continue
-
- sub_config = sub_config[self.config_file]
- for k, v in six.iteritems(sub_config):
- if k == 'sections':
- for section, config_list in six.iteritems(v):
- log("adding section '%s'" % (section),
- level=DEBUG)
- if ctxt[k].get(section):
- ctxt[k][section].extend(config_list)
- else:
- ctxt[k][section] = config_list
- else:
- ctxt[k] = v
- log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
- return ctxt
-
-
-class LogLevelContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {}
- ctxt['debug'] = \
- False if config('debug') is None else config('debug')
- ctxt['verbose'] = \
- False if config('verbose') is None else config('verbose')
-
- return ctxt
-
-
-class SyslogContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {'use_syslog': config('use-syslog')}
- return ctxt
-
-
-class BindHostContext(OSContextGenerator):
-
- def __call__(self):
- if config('prefer-ipv6'):
- return {'bind_host': '::'}
- else:
- return {'bind_host': '0.0.0.0'}
-
-
-MAX_DEFAULT_WORKERS = 4
-DEFAULT_MULTIPLIER = 2
-
-
-def _calculate_workers():
- '''
- Determine the number of worker processes based on the CPU
- count of the unit containing the application.
-
- Workers will be limited to MAX_DEFAULT_WORKERS in
- container environments where no worker-multipler configuration
- option been set.
-
- @returns int: number of worker processes to use
- '''
- multiplier = config('worker-multiplier') or DEFAULT_MULTIPLIER
- count = int(_num_cpus() * multiplier)
- if multiplier > 0 and count == 0:
- count = 1
-
- if config('worker-multiplier') is None and is_container():
- # NOTE(jamespage): Limit unconfigured worker-multiplier
- # to MAX_DEFAULT_WORKERS to avoid insane
- # worker configuration in LXD containers
- # on large servers
- # Reference: https://pad.lv/1665270
- count = min(count, MAX_DEFAULT_WORKERS)
-
- return count
-
-
-def _num_cpus():
- '''
- Compatibility wrapper for calculating the number of CPU's
- a unit has.
-
- @returns: int: number of CPU cores detected
- '''
- try:
- return psutil.cpu_count()
- except AttributeError:
- return psutil.NUM_CPUS
-
-
-class WorkerConfigContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {"workers": _calculate_workers()}
- return ctxt
-
-
-class WSGIWorkerConfigContext(WorkerConfigContext):
-
- def __init__(self, name=None, script=None, admin_script=None,
- public_script=None, user=None, group=None,
- process_weight=1.00,
- admin_process_weight=0.25, public_process_weight=0.75):
- self.service_name = name
- self.user = user or name
- self.group = group or name
- self.script = script
- self.admin_script = admin_script
- self.public_script = public_script
- self.process_weight = process_weight
- self.admin_process_weight = admin_process_weight
- self.public_process_weight = public_process_weight
-
- def __call__(self):
- total_processes = _calculate_workers()
- ctxt = {
- "service_name": self.service_name,
- "user": self.user,
- "group": self.group,
- "script": self.script,
- "admin_script": self.admin_script,
- "public_script": self.public_script,
- "processes": int(math.ceil(self.process_weight * total_processes)),
- "admin_processes": int(math.ceil(self.admin_process_weight *
- total_processes)),
- "public_processes": int(math.ceil(self.public_process_weight *
- total_processes)),
- "threads": 1,
- }
- return ctxt
-
-
-class ZeroMQContext(OSContextGenerator):
- interfaces = ['zeromq-configuration']
-
- def __call__(self):
- ctxt = {}
- if is_relation_made('zeromq-configuration', 'host'):
- for rid in relation_ids('zeromq-configuration'):
- for unit in related_units(rid):
- ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
- ctxt['zmq_host'] = relation_get('host', unit, rid)
- ctxt['zmq_redis_address'] = relation_get(
- 'zmq_redis_address', unit, rid)
-
- return ctxt
-
-
-class NotificationDriverContext(OSContextGenerator):
-
- def __init__(self, zmq_relation='zeromq-configuration',
- amqp_relation='amqp'):
- """
- :param zmq_relation: Name of Zeromq relation to check
- """
- self.zmq_relation = zmq_relation
- self.amqp_relation = amqp_relation
-
- def __call__(self):
- ctxt = {'notifications': 'False'}
- if is_relation_made(self.amqp_relation):
- ctxt['notifications'] = "True"
-
- return ctxt
-
-
-class SysctlContext(OSContextGenerator):
- """This context check if the 'sysctl' option exists on configuration
- then creates a file with the loaded contents"""
- def __call__(self):
- sysctl_dict = config('sysctl')
- if sysctl_dict:
- sysctl_create(sysctl_dict,
- '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
- return {'sysctl': sysctl_dict}
-
-
-class NeutronAPIContext(OSContextGenerator):
- '''
- Inspects current neutron-plugin-api relation for neutron settings. Return
- defaults if it is not present.
- '''
- interfaces = ['neutron-plugin-api']
-
- def __call__(self):
- self.neutron_defaults = {
- 'l2_population': {
- 'rel_key': 'l2-population',
- 'default': False,
- },
- 'overlay_network_type': {
- 'rel_key': 'overlay-network-type',
- 'default': 'gre',
- },
- 'neutron_security_groups': {
- 'rel_key': 'neutron-security-groups',
- 'default': False,
- },
- 'network_device_mtu': {
- 'rel_key': 'network-device-mtu',
- 'default': None,
- },
- 'enable_dvr': {
- 'rel_key': 'enable-dvr',
- 'default': False,
- },
- 'enable_l3ha': {
- 'rel_key': 'enable-l3ha',
- 'default': False,
- },
- 'dns_domain': {
- 'rel_key': 'dns-domain',
- 'default': None,
- },
- 'polling_interval': {
- 'rel_key': 'polling-interval',
- 'default': 2,
- },
- 'rpc_response_timeout': {
- 'rel_key': 'rpc-response-timeout',
- 'default': 60,
- },
- 'report_interval': {
- 'rel_key': 'report-interval',
- 'default': 30,
- },
- 'enable_qos': {
- 'rel_key': 'enable-qos',
- 'default': False,
- },
- 'enable_nsg_logging': {
- 'rel_key': 'enable-nsg-logging',
- 'default': False,
- },
- }
- ctxt = self.get_neutron_options({})
- for rid in relation_ids('neutron-plugin-api'):
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- # The l2-population key is used by the context as a way of
- # checking if the api service on the other end is sending data
- # in a recent format.
- if 'l2-population' in rdata:
- ctxt.update(self.get_neutron_options(rdata))
-
- extension_drivers = []
-
- if ctxt['enable_qos']:
- extension_drivers.append('qos')
-
- if ctxt['enable_nsg_logging']:
- extension_drivers.append('log')
-
- ctxt['extension_drivers'] = ','.join(extension_drivers)
-
- return ctxt
-
- def get_neutron_options(self, rdata):
- settings = {}
- for nkey in self.neutron_defaults.keys():
- defv = self.neutron_defaults[nkey]['default']
- rkey = self.neutron_defaults[nkey]['rel_key']
- if rkey in rdata.keys():
- if type(defv) is bool:
- settings[nkey] = bool_from_string(rdata[rkey])
- else:
- settings[nkey] = rdata[rkey]
- else:
- settings[nkey] = defv
- return settings
-
-
-class ExternalPortContext(NeutronPortContext):
-
- def __call__(self):
- ctxt = {}
- ports = config('ext-port')
- if ports:
- ports = [p.strip() for p in ports.split()]
- ports = self.resolve_ports(ports)
- if ports:
- ctxt = {"ext_port": ports[0]}
- napi_settings = NeutronAPIContext()()
- mtu = napi_settings.get('network_device_mtu')
- if mtu:
- ctxt['ext_port_mtu'] = mtu
-
- return ctxt
-
-
-class DataPortContext(NeutronPortContext):
-
- def __call__(self):
- ports = config('data-port')
- if ports:
- # Map of {port/mac:bridge}
- portmap = parse_data_port_mappings(ports)
- ports = portmap.keys()
- # Resolve provided ports or mac addresses and filter out those
- # already attached to a bridge.
- resolved = self.resolve_ports(ports)
- # FIXME: is this necessary?
- normalized = {get_nic_hwaddr(port): port for port in resolved
- if port not in ports}
- normalized.update({port: port for port in resolved
- if port in ports})
- if resolved:
- return {normalized[port]: bridge for port, bridge in
- six.iteritems(portmap) if port in normalized.keys()}
-
- return None
-
-
-class PhyNICMTUContext(DataPortContext):
-
- def __call__(self):
- ctxt = {}
- mappings = super(PhyNICMTUContext, self).__call__()
- if mappings and mappings.keys():
- ports = sorted(mappings.keys())
- napi_settings = NeutronAPIContext()()
- mtu = napi_settings.get('network_device_mtu')
- all_ports = set()
- # If any of ports is a vlan device, its underlying device must have
- # mtu applied first.
- for port in ports:
- for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
- lport = os.path.basename(lport)
- all_ports.add(lport.split('_')[1])
-
- all_ports = list(all_ports)
- all_ports.extend(ports)
- if mtu:
- ctxt["devs"] = '\\n'.join(all_ports)
- ctxt['mtu'] = mtu
-
- return ctxt
-
-
-class NetworkServiceContext(OSContextGenerator):
-
- def __init__(self, rel_name='quantum-network-service'):
- self.rel_name = rel_name
- self.interfaces = [rel_name]
-
- def __call__(self):
- for rid in relation_ids(self.rel_name):
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- ctxt = {
- 'keystone_host': rdata.get('keystone_host'),
- 'service_port': rdata.get('service_port'),
- 'auth_port': rdata.get('auth_port'),
- 'service_tenant': rdata.get('service_tenant'),
- 'service_username': rdata.get('service_username'),
- 'service_password': rdata.get('service_password'),
- 'quantum_host': rdata.get('quantum_host'),
- 'quantum_port': rdata.get('quantum_port'),
- 'quantum_url': rdata.get('quantum_url'),
- 'region': rdata.get('region'),
- 'service_protocol':
- rdata.get('service_protocol') or 'http',
- 'auth_protocol':
- rdata.get('auth_protocol') or 'http',
- 'api_version':
- rdata.get('api_version') or '2.0',
- }
- if self.context_complete(ctxt):
- return ctxt
- return {}
-
-
-class InternalEndpointContext(OSContextGenerator):
- """Internal endpoint context.
-
- This context provides the endpoint type used for communication between
- services e.g. between Nova and Cinder internally. Openstack uses Public
- endpoints by default so this allows admins to optionally use internal
- endpoints.
- """
- def __call__(self):
- return {'use_internal_endpoints': config('use-internal-endpoints')}
-
-
-class VolumeAPIContext(InternalEndpointContext):
- """Volume API context.
-
- This context provides information regarding the volume endpoint to use
- when communicating between services. It determines which version of the
- API is appropriate for use.
-
- This value will be determined in the resulting context dictionary
- returned from calling the VolumeAPIContext object. Information provided
- by this context is as follows:
-
- volume_api_version: the volume api version to use, currently
- 'v2' or 'v3'
- volume_catalog_info: the information to use for a cinder client
- configuration that consumes API endpoints from the keystone
- catalog. This is defined as the type:name:endpoint_type string.
- """
- # FIXME(wolsen) This implementation is based on the provider being able
- # to specify the package version to check but does not guarantee that the
- # volume service api version selected is available. In practice, it is
- # quite likely the volume service *is* providing the v3 volume service.
- # This should be resolved when the service-discovery spec is implemented.
- def __init__(self, pkg):
- """
- Creates a new VolumeAPIContext for use in determining which version
- of the Volume API should be used for communication. A package codename
- should be supplied for determining the currently installed OpenStack
- version.
-
- :param pkg: the package codename to use in order to determine the
- component version (e.g. nova-common). See
- charmhelpers.contrib.openstack.utils.PACKAGE_CODENAMES for more.
- """
- super(VolumeAPIContext, self).__init__()
- self._ctxt = None
- if not pkg:
- raise ValueError('package name must be provided in order to '
- 'determine current OpenStack version.')
- self.pkg = pkg
-
- @property
- def ctxt(self):
- if self._ctxt is not None:
- return self._ctxt
- self._ctxt = self._determine_ctxt()
- return self._ctxt
-
- def _determine_ctxt(self):
- """Determines the Volume API endpoint information.
-
- Determines the appropriate version of the API that should be used
- as well as the catalog_info string that would be supplied. Returns
- a dict containing the volume_api_version and the volume_catalog_info.
- """
- rel = os_release(self.pkg, base='icehouse')
- version = '2'
- if CompareOpenStackReleases(rel) >= 'pike':
- version = '3'
-
- service_type = 'volumev{version}'.format(version=version)
- service_name = 'cinderv{version}'.format(version=version)
- endpoint_type = 'publicURL'
- if config('use-internal-endpoints'):
- endpoint_type = 'internalURL'
- catalog_info = '{type}:{name}:{endpoint}'.format(
- type=service_type, name=service_name, endpoint=endpoint_type)
-
- return {
- 'volume_api_version': version,
- 'volume_catalog_info': catalog_info,
- }
-
- def __call__(self):
- return self.ctxt
-
-
-class AppArmorContext(OSContextGenerator):
- """Base class for apparmor contexts."""
-
- def __init__(self, profile_name=None):
- self._ctxt = None
- self.aa_profile = profile_name
- self.aa_utils_packages = ['apparmor-utils']
-
- @property
- def ctxt(self):
- if self._ctxt is not None:
- return self._ctxt
- self._ctxt = self._determine_ctxt()
- return self._ctxt
-
- def _determine_ctxt(self):
- """
- Validate aa-profile-mode settings is disable, enforce, or complain.
-
- :return ctxt: Dictionary of the apparmor profile or None
- """
- if config('aa-profile-mode') in ['disable', 'enforce', 'complain']:
- ctxt = {'aa_profile_mode': config('aa-profile-mode'),
- 'ubuntu_release': lsb_release()['DISTRIB_RELEASE']}
- if self.aa_profile:
- ctxt['aa_profile'] = self.aa_profile
- else:
- ctxt = None
- return ctxt
-
- def __call__(self):
- return self.ctxt
-
- def install_aa_utils(self):
- """
- Install packages required for apparmor configuration.
- """
- log("Installing apparmor utils.")
- ensure_packages(self.aa_utils_packages)
-
- def manually_disable_aa_profile(self):
- """
- Manually disable an apparmor profile.
-
- If aa-profile-mode is set to disabled (default) this is required as the
- template has been written but apparmor is yet unaware of the profile
- and aa-disable aa-profile fails. Without this the profile would kick
- into enforce mode on the next service restart.
-
- """
- profile_path = '/etc/apparmor.d'
- disable_path = '/etc/apparmor.d/disable'
- if not os.path.lexists(os.path.join(disable_path, self.aa_profile)):
- os.symlink(os.path.join(profile_path, self.aa_profile),
- os.path.join(disable_path, self.aa_profile))
-
- def setup_aa_profile(self):
- """
- Setup an apparmor profile.
- The ctxt dictionary will contain the apparmor profile mode and
- the apparmor profile name.
- Makes calls out to aa-disable, aa-complain, or aa-enforce to setup
- the apparmor profile.
- """
- self()
- if not self.ctxt:
- log("Not enabling apparmor Profile")
- return
- self.install_aa_utils()
- cmd = ['aa-{}'.format(self.ctxt['aa_profile_mode'])]
- cmd.append(self.ctxt['aa_profile'])
- log("Setting up the apparmor profile for {} in {} mode."
- "".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode']))
- try:
- check_call(cmd)
- except CalledProcessError as e:
- # If aa-profile-mode is set to disabled (default) manual
- # disabling is required as the template has been written but
- # apparmor is yet unaware of the profile and aa-disable aa-profile
- # fails. If aa-disable learns to read profile files first this can
- # be removed.
- if self.ctxt['aa_profile_mode'] == 'disable':
- log("Manually disabling the apparmor profile for {}."
- "".format(self.ctxt['aa_profile']))
- self.manually_disable_aa_profile()
- return
- status_set('blocked', "Apparmor profile {} failed to be set to {}."
- "".format(self.ctxt['aa_profile'],
- self.ctxt['aa_profile_mode']))
- raise e
-
-
-class MemcacheContext(OSContextGenerator):
- """Memcache context
-
- This context provides options for configuring a local memcache client and
- server for both IPv4 and IPv6
- """
-
- def __init__(self, package=None):
- """
- @param package: Package to examine to extrapolate OpenStack release.
- Used when charms have no openstack-origin config
- option (ie subordinates)
- """
- self.package = package
-
- def __call__(self):
- ctxt = {}
- ctxt['use_memcache'] = enable_memcache(package=self.package)
- if ctxt['use_memcache']:
- # Trusty version of memcached does not support ::1 as a listen
- # address so use host file entry instead
- release = lsb_release()['DISTRIB_CODENAME'].lower()
- if is_ipv6_disabled():
- if CompareHostReleases(release) > 'trusty':
- ctxt['memcache_server'] = '127.0.0.1'
- else:
- ctxt['memcache_server'] = 'localhost'
- ctxt['memcache_server_formatted'] = '127.0.0.1'
- ctxt['memcache_port'] = '11211'
- ctxt['memcache_url'] = '{}:{}'.format(
- ctxt['memcache_server_formatted'],
- ctxt['memcache_port'])
- else:
- if CompareHostReleases(release) > 'trusty':
- ctxt['memcache_server'] = '::1'
- else:
- ctxt['memcache_server'] = 'ip6-localhost'
- ctxt['memcache_server_formatted'] = '[::1]'
- ctxt['memcache_port'] = '11211'
- ctxt['memcache_url'] = 'inet6:{}:{}'.format(
- ctxt['memcache_server_formatted'],
- ctxt['memcache_port'])
- return ctxt
-
-
-class EnsureDirContext(OSContextGenerator):
- '''
- Serves as a generic context to create a directory as a side-effect.
-
- Useful for software that supports drop-in files (.d) in conjunction
- with config option-based templates. Examples include:
- * OpenStack oslo.policy drop-in files;
- * systemd drop-in config files;
- * other software that supports overriding defaults with .d files
-
- Another use-case is when a subordinate generates a configuration for
- primary to render in a separate directory.
-
- Some software requires a user to create a target directory to be
- scanned for drop-in files with a specific format. This is why this
- context is needed to do that before rendering a template.
- '''
-
- def __init__(self, dirname, **kwargs):
- '''Used merely to ensure that a given directory exists.'''
- self.dirname = dirname
- self.kwargs = kwargs
-
- def __call__(self):
- mkdir(self.dirname, **self.kwargs)
- return {}
-
-
-class VersionsContext(OSContextGenerator):
- """Context to return the openstack and operating system versions.
-
- """
- def __init__(self, pkg='python-keystone'):
- """Initialise context.
-
- :param pkg: Package to extrapolate openstack version from.
- :type pkg: str
- """
- self.pkg = pkg
-
- def __call__(self):
- ostack = os_release(self.pkg, base='icehouse')
- osystem = lsb_release()['DISTRIB_CODENAME'].lower()
- return {
- 'openstack_release': ostack,
- 'operating_system_release': osystem}
-
-
-class LogrotateContext(OSContextGenerator):
- """Common context generator for logrotate."""
-
- def __init__(self, location, interval, count):
- """
- :param location: Absolute path for the logrotate config file
- :type location: str
- :param interval: The interval for the rotations. Valid values are
- 'daily', 'weekly', 'monthly', 'yearly'
- :type interval: str
- :param count: The logrotate count option configures the 'count' times
- the log files are being rotated before being
- :type count: int
- """
- self.location = location
- self.interval = interval
- self.count = 'rotate {}'.format(count)
-
- def __call__(self):
- ctxt = {
- 'logrotate_logs_location': self.location,
- 'logrotate_interval': self.interval,
- 'logrotate_count': self.count,
- }
- return ctxt
diff --git a/hooks/charmhelpers/contrib/openstack/exceptions.py b/hooks/charmhelpers/contrib/openstack/exceptions.py
deleted file mode 100644
index f85ae4f..0000000
--- a/hooks/charmhelpers/contrib/openstack/exceptions.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-class OSContextError(Exception):
- """Raised when an error occurs during context generation.
-
- This exception is principally used in contrib.openstack.context
- """
- pass
diff --git a/hooks/charmhelpers/contrib/openstack/files/__init__.py b/hooks/charmhelpers/contrib/openstack/files/__init__.py
deleted file mode 100644
index 9df5f74..0000000
--- a/hooks/charmhelpers/contrib/openstack/files/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# dummy __init__.py to fool syncer into thinking this is a syncable python
-# module
diff --git a/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh b/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
deleted file mode 100755
index 1df55db..0000000
--- a/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-#--------------------------------------------
-# This file is managed by Juju
-#--------------------------------------------
-#
-# Copyright 2009,2012 Canonical Ltd.
-# Author: Tom Haddon
-
-CRITICAL=0
-NOTACTIVE=''
-LOGFILE=/var/log/nagios/check_haproxy.log
-AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}')
-
-typeset -i N_INSTANCES=0
-for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
-do
- N_INSTANCES=N_INSTANCES+1
- output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK')
- if [ $? != 0 ]; then
- date >> $LOGFILE
- echo $output >> $LOGFILE
- /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1
- CRITICAL=1
- NOTACTIVE="${NOTACTIVE} $appserver"
- fi
-done
-
-if [ $CRITICAL = 1 ]; then
- echo "CRITICAL:${NOTACTIVE}"
- exit 2
-fi
-
-echo "OK: All haproxy instances ($N_INSTANCES) looking good"
-exit 0
diff --git a/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
deleted file mode 100755
index 91ce024..0000000
--- a/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-#--------------------------------------------
-# This file is managed by Juju
-#--------------------------------------------
-#
-# Copyright 2009,2012 Canonical Ltd.
-# Author: Tom Haddon
-
-# These should be config options at some stage
-CURRQthrsh=0
-MAXQthrsh=100
-
-AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}')
-
-HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
-
-for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
-do
- CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
- MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4)
-
- if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
- echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
- exit 2
- fi
-done
-
-echo "OK: All haproxy queue depths looking good"
-exit 0
-
diff --git a/hooks/charmhelpers/contrib/openstack/ha/__init__.py b/hooks/charmhelpers/contrib/openstack/ha/__init__.py
deleted file mode 100644
index 9b088de..0000000
--- a/hooks/charmhelpers/contrib/openstack/ha/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/openstack/ha/utils.py b/hooks/charmhelpers/contrib/openstack/ha/utils.py
deleted file mode 100644
index 718c6d6..0000000
--- a/hooks/charmhelpers/contrib/openstack/ha/utils.py
+++ /dev/null
@@ -1,348 +0,0 @@
-# Copyright 2014-2016 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Copyright 2016 Canonical Ltd.
-#
-# Authors:
-# Openstack Charmers <
-#
-
-"""
-Helpers for high availability.
-"""
-
-import hashlib
-import json
-
-import re
-
-from charmhelpers.core.hookenv import (
- expected_related_units,
- log,
- relation_set,
- charm_name,
- config,
- status_set,
- DEBUG,
-)
-
-from charmhelpers.core.host import (
- lsb_release
-)
-
-from charmhelpers.contrib.openstack.ip import (
- resolve_address,
- is_ipv6,
-)
-
-from charmhelpers.contrib.network.ip import (
- get_iface_for_address,
- get_netmask_for_address,
-)
-
-from charmhelpers.contrib.hahelpers.cluster import (
- get_hacluster_config
-)
-
-JSON_ENCODE_OPTIONS = dict(
- sort_keys=True,
- allow_nan=False,
- indent=None,
- separators=(',', ':'),
-)
-
-VIP_GROUP_NAME = 'grp_{service}_vips'
-DNSHA_GROUP_NAME = 'grp_{service}_hostnames'
-
-
-class DNSHAException(Exception):
- """Raised when an error occurs setting up DNS HA
- """
-
- pass
-
-
-def update_dns_ha_resource_params(resources, resource_params,
- relation_id=None,
- crm_ocf='ocf:maas:dns'):
- """ Configure DNS-HA resources based on provided configuration and
- update resource dictionaries for the HA relation.
-
- @param resources: Pointer to dictionary of resources.
- Usually instantiated in ha_joined().
- @param resource_params: Pointer to dictionary of resource parameters.
- Usually instantiated in ha_joined()
- @param relation_id: Relation ID of the ha relation
- @param crm_ocf: Corosync Open Cluster Framework resource agent to use for
- DNS HA
- """
- _relation_data = {'resources': {}, 'resource_params': {}}
- update_hacluster_dns_ha(charm_name(),
- _relation_data,
- crm_ocf)
- resources.update(_relation_data['resources'])
- resource_params.update(_relation_data['resource_params'])
- relation_set(relation_id=relation_id, groups=_relation_data['groups'])
-
-
-def assert_charm_supports_dns_ha():
- """Validate prerequisites for DNS HA
- The MAAS client is only available on Xenial or greater
-
- :raises DNSHAException: if release is < 16.04
- """
- if lsb_release().get('DISTRIB_RELEASE') < '16.04':
- msg = ('DNS HA is only supported on 16.04 and greater '
- 'versions of Ubuntu.')
- status_set('blocked', msg)
- raise DNSHAException(msg)
- return True
-
-
-def expect_ha():
- """ Determine if the unit expects to be in HA
-
- Check juju goal-state if ha relation is expected, check for VIP or dns-ha
- settings which indicate the unit should expect to be related to hacluster.
-
- @returns boolean
- """
- ha_related_units = []
- try:
- ha_related_units = list(expected_related_units(reltype='ha'))
- except (NotImplementedError, KeyError):
- pass
- return len(ha_related_units) > 0 or config('vip') or config('dns-ha')
-
-
-def generate_ha_relation_data(service, extra_settings=None):
- """ Generate relation data for ha relation
-
- Based on configuration options and unit interfaces, generate a json
- encoded dict of relation data items for the hacluster relation,
- providing configuration for DNS HA or VIP's + haproxy clone sets.
-
- Example of supplying additional settings::
-
- COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips'
- AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth'
- AGENT_CA_PARAMS = 'op monitor interval="5s"'
-
- ha_console_settings = {
- 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH},
- 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'},
- 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH},
- 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS})
- generate_ha_relation_data('nova', extra_settings=ha_console_settings)
-
-
- @param service: Name of the service being configured
- @param extra_settings: Dict of additional resource data
- @returns dict: json encoded data for use with relation_set
- """
- _haproxy_res = 'res_{}_haproxy'.format(service)
- _relation_data = {
- 'resources': {
- _haproxy_res: 'lsb:haproxy',
- },
- 'resource_params': {
- _haproxy_res: 'op monitor interval="5s"'
- },
- 'init_services': {
- _haproxy_res: 'haproxy'
- },
- 'clones': {
- 'cl_{}_haproxy'.format(service): _haproxy_res
- },
- }
-
- if extra_settings:
- for k, v in extra_settings.items():
- if _relation_data.get(k):
- _relation_data[k].update(v)
- else:
- _relation_data[k] = v
-
- if config('dns-ha'):
- update_hacluster_dns_ha(service, _relation_data)
- else:
- update_hacluster_vip(service, _relation_data)
-
- return {
- 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS)
- for k, v in _relation_data.items() if v
- }
-
-
-def update_hacluster_dns_ha(service, relation_data,
- crm_ocf='ocf:maas:dns'):
- """ Configure DNS-HA resources based on provided configuration
-
- @param service: Name of the service being configured
- @param relation_data: Pointer to dictionary of relation data.
- @param crm_ocf: Corosync Open Cluster Framework resource agent to use for
- DNS HA
- """
- # Validate the charm environment for DNS HA
- assert_charm_supports_dns_ha()
-
- settings = ['os-admin-hostname', 'os-internal-hostname',
- 'os-public-hostname', 'os-access-hostname']
-
- # Check which DNS settings are set and update dictionaries
- hostname_group = []
- for setting in settings:
- hostname = config(setting)
- if hostname is None:
- log('DNS HA: Hostname setting {} is None. Ignoring.'
- ''.format(setting),
- DEBUG)
- continue
- m = re.search('os-(.+?)-hostname', setting)
- if m:
- endpoint_type = m.group(1)
- # resolve_address's ADDRESS_MAP uses 'int' not 'internal'
- if endpoint_type == 'internal':
- endpoint_type = 'int'
- else:
- msg = ('Unexpected DNS hostname setting: {}. '
- 'Cannot determine endpoint_type name'
- ''.format(setting))
- status_set('blocked', msg)
- raise DNSHAException(msg)
-
- hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type)
- if hostname_key in hostname_group:
- log('DNS HA: Resource {}: {} already exists in '
- 'hostname group - skipping'.format(hostname_key, hostname),
- DEBUG)
- continue
-
- hostname_group.append(hostname_key)
- relation_data['resources'][hostname_key] = crm_ocf
- relation_data['resource_params'][hostname_key] = (
- 'params fqdn="{}" ip_address="{}"'
- .format(hostname, resolve_address(endpoint_type=endpoint_type,
- override=False)))
-
- if len(hostname_group) >= 1:
- log('DNS HA: Hostname group is set with {} as members. '
- 'Informing the ha relation'.format(' '.join(hostname_group)),
- DEBUG)
- relation_data['groups'] = {
- DNSHA_GROUP_NAME.format(service=service): ' '.join(hostname_group)
- }
- else:
- msg = 'DNS HA: Hostname group has no members.'
- status_set('blocked', msg)
- raise DNSHAException(msg)
-
-
-def get_vip_settings(vip):
- """Calculate which nic is on the correct network for the given vip.
-
- If nic or netmask discovery fail then fallback to using charm supplied
- config. If fallback is used this is indicated via the fallback variable.
-
- @param vip: VIP to lookup nic and cidr for.
- @returns (str, str, bool): eg (iface, netmask, fallback)
- """
- iface = get_iface_for_address(vip)
- netmask = get_netmask_for_address(vip)
- fallback = False
- if iface is None:
- iface = config('vip_iface')
- fallback = True
- if netmask is None:
- netmask = config('vip_cidr')
- fallback = True
- return iface, netmask, fallback
-
-
-def update_hacluster_vip(service, relation_data):
- """ Configure VIP resources based on provided configuration
-
- @param service: Name of the service being configured
- @param relation_data: Pointer to dictionary of relation data.
- """
- cluster_config = get_hacluster_config()
- vip_group = []
- vips_to_delete = []
- for vip in cluster_config['vip'].split():
- if is_ipv6(vip):
- res_vip = 'ocf:heartbeat:IPv6addr'
- vip_params = 'ipv6addr'
- else:
- res_vip = 'ocf:heartbeat:IPaddr2'
- vip_params = 'ip'
-
- iface, netmask, fallback = get_vip_settings(vip)
-
- vip_monitoring = 'op monitor depth="0" timeout="20s" interval="10s"'
- if iface is not None:
- # NOTE(jamespage): Delete old VIP resources
- # Old style naming encoding iface in name
- # does not work well in environments where
- # interface/subnet wiring is not consistent
- vip_key = 'res_{}_{}_vip'.format(service, iface)
- if vip_key in vips_to_delete:
- vip_key = '{}_{}'.format(vip_key, vip_params)
- vips_to_delete.append(vip_key)
-
- vip_key = 'res_{}_{}_vip'.format(
- service,
- hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7])
-
- relation_data['resources'][vip_key] = res_vip
- # NOTE(jamespage):
- # Use option provided vip params if these where used
- # instead of auto-detected values
- if fallback:
- relation_data['resource_params'][vip_key] = (
- 'params {ip}="{vip}" cidr_netmask="{netmask}" '
- 'nic="{iface}" {vip_monitoring}'.format(
- ip=vip_params,
- vip=vip,
- iface=iface,
- netmask=netmask,
- vip_monitoring=vip_monitoring))
- else:
- # NOTE(jamespage):
- # let heartbeat figure out which interface and
- # netmask to configure, which works nicely
- # when network interface naming is not
- # consistent across units.
- relation_data['resource_params'][vip_key] = (
- 'params {ip}="{vip}" {vip_monitoring}'.format(
- ip=vip_params,
- vip=vip,
- vip_monitoring=vip_monitoring))
-
- vip_group.append(vip_key)
-
- if vips_to_delete:
- try:
- relation_data['delete_resources'].extend(vips_to_delete)
- except KeyError:
- relation_data['delete_resources'] = vips_to_delete
-
- if len(vip_group) >= 1:
- key = VIP_GROUP_NAME.format(service=service)
- try:
- relation_data['groups'][key] = ' '.join(vip_group)
- except KeyError:
- relation_data['groups'] = {
- key: ' '.join(vip_group)
- }
diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py
deleted file mode 100644
index 723aebc..0000000
--- a/hooks/charmhelpers/contrib/openstack/ip.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from charmhelpers.core.hookenv import (
- NoNetworkBinding,
- config,
- unit_get,
- service_name,
- network_get_primary_address,
-)
-from charmhelpers.contrib.network.ip import (
- get_address_in_network,
- is_address_in_network,
- is_ipv6,
- get_ipv6_addr,
- resolve_network_cidr,
-)
-from charmhelpers.contrib.hahelpers.cluster import is_clustered
-
-PUBLIC = 'public'
-INTERNAL = 'int'
-ADMIN = 'admin'
-ACCESS = 'access'
-
-ADDRESS_MAP = {
- PUBLIC: {
- 'binding': 'public',
- 'config': 'os-public-network',
- 'fallback': 'public-address',
- 'override': 'os-public-hostname',
- },
- INTERNAL: {
- 'binding': 'internal',
- 'config': 'os-internal-network',
- 'fallback': 'private-address',
- 'override': 'os-internal-hostname',
- },
- ADMIN: {
- 'binding': 'admin',
- 'config': 'os-admin-network',
- 'fallback': 'private-address',
- 'override': 'os-admin-hostname',
- },
- ACCESS: {
- 'binding': 'access',
- 'config': 'access-network',
- 'fallback': 'private-address',
- 'override': 'os-access-hostname',
- },
-}
-
-
-def canonical_url(configs, endpoint_type=PUBLIC):
- """Returns the correct HTTP URL to this host given the state of HTTPS
- configuration, hacluster and charm configuration.
-
- :param configs: OSTemplateRenderer config templating object to inspect
- for a complete https context.
- :param endpoint_type: str endpoint type to resolve.
- :param returns: str base URL for services on the current service unit.
- """
- scheme = _get_scheme(configs)
-
- address = resolve_address(endpoint_type)
- if is_ipv6(address):
- address = "[{}]".format(address)
-
- return '%s://%s' % (scheme, address)
-
-
-def _get_scheme(configs):
- """Returns the scheme to use for the url (either http or https)
- depending upon whether https is in the configs value.
-
- :param configs: OSTemplateRenderer config templating object to inspect
- for a complete https context.
- :returns: either 'http' or 'https' depending on whether https is
- configured within the configs context.
- """
- scheme = 'http'
- if configs and 'https' in configs.complete_contexts():
- scheme = 'https'
- return scheme
-
-
-def _get_address_override(endpoint_type=PUBLIC):
- """Returns any address overrides that the user has defined based on the
- endpoint type.
-
- Note: this function allows for the service name to be inserted into the
- address if the user specifies {service_name}.somehost.org.
-
- :param endpoint_type: the type of endpoint to retrieve the override
- value for.
- :returns: any endpoint address or hostname that the user has overridden
- or None if an override is not present.
- """
- override_key = ADDRESS_MAP[endpoint_type]['override']
- addr_override = config(override_key)
- if not addr_override:
- return None
- else:
- return addr_override.format(service_name=service_name())
-
-
-def resolve_address(endpoint_type=PUBLIC, override=True):
- """Return unit address depending on net config.
-
- If unit is clustered with vip(s) and has net splits defined, return vip on
- correct network. If clustered with no nets defined, return primary vip.
-
- If not clustered, return unit address ensuring address is on configured net
- split if one is configured, or a Juju 2.0 extra-binding has been used.
-
- :param endpoint_type: Network endpoing type
- :param override: Accept hostname overrides or not
- """
- resolved_address = None
- if override:
- resolved_address = _get_address_override(endpoint_type)
- if resolved_address:
- return resolved_address
-
- vips = config('vip')
- if vips:
- vips = vips.split()
-
- net_type = ADDRESS_MAP[endpoint_type]['config']
- net_addr = config(net_type)
- net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
- binding = ADDRESS_MAP[endpoint_type]['binding']
- clustered = is_clustered()
-
- if clustered and vips:
- if net_addr:
- for vip in vips:
- if is_address_in_network(net_addr, vip):
- resolved_address = vip
- break
- else:
- # NOTE: endeavour to check vips against network space
- # bindings
- try:
- bound_cidr = resolve_network_cidr(
- network_get_primary_address(binding)
- )
- for vip in vips:
- if is_address_in_network(bound_cidr, vip):
- resolved_address = vip
- break
- except (NotImplementedError, NoNetworkBinding):
- # If no net-splits configured and no support for extra
- # bindings/network spaces so we expect a single vip
- resolved_address = vips[0]
- else:
- if config('prefer-ipv6'):
- fallback_addr = get_ipv6_addr(exc_list=vips)[0]
- else:
- fallback_addr = unit_get(net_fallback)
-
- if net_addr:
- resolved_address = get_address_in_network(net_addr, fallback_addr)
- else:
- # NOTE: only try to use extra bindings if legacy network
- # configuration is not in use
- try:
- resolved_address = network_get_primary_address(binding)
- except (NotImplementedError, NoNetworkBinding):
- resolved_address = fallback_addr
-
- if resolved_address is None:
- raise ValueError("Unable to resolve a suitable IP address based on "
- "charm state and configuration. (net_type=%s, "
- "clustered=%s)" % (net_type, clustered))
-
- return resolved_address
-
-
-def get_vip_in_network(network):
- matching_vip = None
- vips = config('vip')
- if vips:
- for vip in vips.split():
- if is_address_in_network(network, vip):
- matching_vip = vip
- return matching_vip
diff --git a/hooks/charmhelpers/contrib/openstack/keystone.py b/hooks/charmhelpers/contrib/openstack/keystone.py
deleted file mode 100644
index d7e02cc..0000000
--- a/hooks/charmhelpers/contrib/openstack/keystone.py
+++ /dev/null
@@ -1,178 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright 2017 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import six
-from charmhelpers.fetch import apt_install
-from charmhelpers.contrib.openstack.context import IdentityServiceContext
-from charmhelpers.core.hookenv import (
- log,
- ERROR,
-)
-
-
-def get_api_suffix(api_version):
- """Return the formatted api suffix for the given version
- @param api_version: version of the keystone endpoint
- @returns the api suffix formatted according to the given api
- version
- """
- return 'v2.0' if api_version in (2, "2", "2.0") else 'v3'
-
-
-def format_endpoint(schema, addr, port, api_version):
- """Return a formatted keystone endpoint
- @param schema: http or https
- @param addr: ipv4/ipv6 host of the keystone service
- @param port: port of the keystone service
- @param api_version: 2 or 3
- @returns a fully formatted keystone endpoint
- """
- return '{}://{}:{}/{}/'.format(schema, addr, port,
- get_api_suffix(api_version))
-
-
-def get_keystone_manager(endpoint, api_version, **kwargs):
- """Return a keystonemanager for the correct API version
-
- @param endpoint: the keystone endpoint to point client at
- @param api_version: version of the keystone api the client should use
- @param kwargs: token or username/tenant/password information
- @returns keystonemanager class used for interrogating keystone
- """
- if api_version == 2:
- return KeystoneManager2(endpoint, **kwargs)
- if api_version == 3:
- return KeystoneManager3(endpoint, **kwargs)
- raise ValueError('No manager found for api version {}'.format(api_version))
-
-
-def get_keystone_manager_from_identity_service_context():
- """Return a keystonmanager generated from a
- instance of charmhelpers.contrib.openstack.context.IdentityServiceContext
- @returns keystonamenager instance
- """
- context = IdentityServiceContext()()
- if not context:
- msg = "Identity service context cannot be generated"
- log(msg, level=ERROR)
- raise ValueError(msg)
-
- endpoint = format_endpoint(context['service_protocol'],
- context['service_host'],
- context['service_port'],
- context['api_version'])
-
- if context['api_version'] in (2, "2.0"):
- api_version = 2
- else:
- api_version = 3
-
- return get_keystone_manager(endpoint, api_version,
- username=context['admin_user'],
- password=context['admin_password'],
- tenant_name=context['admin_tenant_name'])
-
-
-class KeystoneManager(object):
-
- def resolve_service_id(self, service_name=None, service_type=None):
- """Find the service_id of a given service"""
- services = [s._info for s in self.api.services.list()]
-
- service_name = service_name.lower()
- for s in services:
- name = s['name'].lower()
- if service_type and service_name:
- if (service_name == name and service_type == s['type']):
- return s['id']
- elif service_name and service_name == name:
- return s['id']
- elif service_type and service_type == s['type']:
- return s['id']
- return None
-
- def service_exists(self, service_name=None, service_type=None):
- """Determine if the given service exists on the service list"""
- return self.resolve_service_id(service_name, service_type) is not None
-
-
-class KeystoneManager2(KeystoneManager):
-
- def __init__(self, endpoint, **kwargs):
- try:
- from keystoneclient.v2_0 import client
- from keystoneclient.auth.identity import v2
- from keystoneclient import session
- except ImportError:
- if six.PY2:
- apt_install(["python-keystoneclient"], fatal=True)
- else:
- apt_install(["python3-keystoneclient"], fatal=True)
-
- from keystoneclient.v2_0 import client
- from keystoneclient.auth.identity import v2
- from keystoneclient import session
-
- self.api_version = 2
-
- token = kwargs.get("token", None)
- if token:
- api = client.Client(endpoint=endpoint, token=token)
- else:
- auth = v2.Password(username=kwargs.get("username"),
- password=kwargs.get("password"),
- tenant_name=kwargs.get("tenant_name"),
- auth_url=endpoint)
- sess = session.Session(auth=auth)
- api = client.Client(session=sess)
-
- self.api = api
-
-
-class KeystoneManager3(KeystoneManager):
-
- def __init__(self, endpoint, **kwargs):
- try:
- from keystoneclient.v3 import client
- from keystoneclient.auth import token_endpoint
- from keystoneclient import session
- from keystoneclient.auth.identity import v3
- except ImportError:
- if six.PY2:
- apt_install(["python-keystoneclient"], fatal=True)
- else:
- apt_install(["python3-keystoneclient"], fatal=True)
-
- from keystoneclient.v3 import client
- from keystoneclient.auth import token_endpoint
- from keystoneclient import session
- from keystoneclient.auth.identity import v3
-
- self.api_version = 3
-
- token = kwargs.get("token", None)
- if token:
- auth = token_endpoint.Token(endpoint=endpoint,
- token=token)
- sess = session.Session(auth=auth)
- else:
- auth = v3.Password(auth_url=endpoint,
- user_id=kwargs.get("username"),
- password=kwargs.get("password"),
- project_id=kwargs.get("tenant_name"))
- sess = session.Session(auth=auth)
-
- self.api = client.Client(session=sess)
diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py
deleted file mode 100644
index 0f847f5..0000000
--- a/hooks/charmhelpers/contrib/openstack/neutron.py
+++ /dev/null
@@ -1,354 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Various utilies for dealing with Neutron and the renaming from Quantum.
-
-import six
-from subprocess import check_output
-
-from charmhelpers.core.hookenv import (
- config,
- log,
- ERROR,
-)
-
-from charmhelpers.contrib.openstack.utils import (
- os_release,
- CompareOpenStackReleases,
-)
-
-
-def headers_package():
- """Ensures correct linux-headers for running kernel are installed,
- for building DKMS package"""
- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
- return 'linux-headers-%s' % kver
-
-
-QUANTUM_CONF_DIR = '/etc/quantum'
-
-
-def kernel_version():
- """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
- kver = kver.split('.')
- return (int(kver[0]), int(kver[1]))
-
-
-def determine_dkms_package():
- """ Determine which DKMS package should be used based on kernel version """
- # NOTE: 3.13 kernels have support for GRE and VXLAN native
- if kernel_version() >= (3, 13):
- return []
- else:
- return [headers_package(), 'openvswitch-datapath-dkms']
-
-
-# legacy
-
-
-def quantum_plugins():
- return {
- 'ovs': {
- 'config': '/etc/quantum/plugins/openvswitch/'
- 'ovs_quantum_plugin.ini',
- 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
- 'OVSQuantumPluginV2',
- 'contexts': [],
- 'services': ['quantum-plugin-openvswitch-agent'],
- 'packages': [determine_dkms_package(),
- ['quantum-plugin-openvswitch-agent']],
- 'server_packages': ['quantum-server',
- 'quantum-plugin-openvswitch'],
- 'server_services': ['quantum-server']
- },
- 'nvp': {
- 'config': '/etc/quantum/plugins/nicira/nvp.ini',
- 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
- 'QuantumPlugin.NvpPluginV2',
- 'contexts': [],
- 'services': [],
- 'packages': [],
- 'server_packages': ['quantum-server',
- 'quantum-plugin-nicira'],
- 'server_services': ['quantum-server']
- }
- }
-
-
-NEUTRON_CONF_DIR = '/etc/neutron'
-
-
-def neutron_plugins():
- release = os_release('nova-common')
- plugins = {
- 'ovs': {
- 'config': '/etc/neutron/plugins/openvswitch/'
- 'ovs_neutron_plugin.ini',
- 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
- 'OVSNeutronPluginV2',
- 'contexts': [],
- 'services': ['neutron-plugin-openvswitch-agent'],
- 'packages': [determine_dkms_package(),
- ['neutron-plugin-openvswitch-agent']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-openvswitch'],
- 'server_services': ['neutron-server']
- },
- 'nvp': {
- 'config': '/etc/neutron/plugins/nicira/nvp.ini',
- 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
- 'NeutronPlugin.NvpPluginV2',
- 'contexts': [],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-nicira'],
- 'server_services': ['neutron-server']
- },
- 'nsx': {
- 'config': '/etc/neutron/plugins/vmware/nsx.ini',
- 'driver': 'vmware',
- 'contexts': [],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-vmware'],
- 'server_services': ['neutron-server']
- },
- 'n1kv': {
- 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
- 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
- 'contexts': [],
- 'services': [],
- 'packages': [determine_dkms_package(),
- ['neutron-plugin-cisco']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-cisco'],
- 'server_services': ['neutron-server']
- },
- 'Calico': {
- 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
- 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
- 'contexts': [],
- 'services': ['calico-felix',
- 'bird',
- 'neutron-dhcp-agent',
- 'nova-api-metadata',
- 'etcd'],
- 'packages': [determine_dkms_package(),
- ['calico-compute',
- 'bird',
- 'neutron-dhcp-agent',
- 'nova-api-metadata',
- 'etcd']],
- 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
- 'server_services': ['neutron-server', 'etcd']
- },
- 'vsp': {
- 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
- 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
- 'contexts': [],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
- 'server_services': ['neutron-server']
- },
- 'plumgrid': {
- 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
- 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin'
- '.plumgrid_plugin.NeutronPluginPLUMgridV2'),
- 'contexts': [],
- 'services': [],
- 'packages': ['plumgrid-lxc',
- 'iovisor-dkms'],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-plumgrid'],
- 'server_services': ['neutron-server']
- },
- 'midonet': {
- 'config': '/etc/neutron/plugins/midonet/midonet.ini',
- 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
- 'contexts': [],
- 'services': [],
- 'packages': [determine_dkms_package()],
- 'server_packages': ['neutron-server',
- 'python-neutron-plugin-midonet'],
- 'server_services': ['neutron-server']
- }
- }
- if CompareOpenStackReleases(release) >= 'icehouse':
- # NOTE: patch in ml2 plugin for icehouse onwards
- plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
- plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
- plugins['ovs']['server_packages'] = ['neutron-server',
- 'neutron-plugin-ml2']
- # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
- plugins['nvp'] = plugins['nsx']
- if CompareOpenStackReleases(release) >= 'kilo':
- plugins['midonet']['driver'] = (
- 'neutron.plugins.midonet.plugin.MidonetPluginV2')
- if CompareOpenStackReleases(release) >= 'liberty':
- plugins['midonet']['driver'] = (
- 'midonet.neutron.plugin_v1.MidonetPluginV2')
- plugins['midonet']['server_packages'].remove(
- 'python-neutron-plugin-midonet')
- plugins['midonet']['server_packages'].append(
- 'python-networking-midonet')
- plugins['plumgrid']['driver'] = (
- 'networking_plumgrid.neutron.plugins'
- '.plugin.NeutronPluginPLUMgridV2')
- plugins['plumgrid']['server_packages'].remove(
- 'neutron-plugin-plumgrid')
- if CompareOpenStackReleases(release) >= 'mitaka':
- plugins['nsx']['server_packages'].remove('neutron-plugin-vmware')
- plugins['nsx']['server_packages'].append('python-vmware-nsx')
- plugins['nsx']['config'] = '/etc/neutron/nsx.ini'
- plugins['vsp']['driver'] = (
- 'nuage_neutron.plugins.nuage.plugin.NuagePlugin')
- return plugins
-
-
-def neutron_plugin_attribute(plugin, attr, net_manager=None):
- manager = net_manager or network_manager()
- if manager == 'quantum':
- plugins = quantum_plugins()
- elif manager == 'neutron':
- plugins = neutron_plugins()
- else:
- log("Network manager '%s' does not support plugins." % (manager),
- level=ERROR)
- raise Exception
-
- try:
- _plugin = plugins[plugin]
- except KeyError:
- log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
- raise Exception
-
- try:
- return _plugin[attr]
- except KeyError:
- return None
-
-
-def network_manager():
- '''
- Deals with the renaming of Quantum to Neutron in H and any situations
- that require compatability (eg, deploying H with network-manager=quantum,
- upgrading from G).
- '''
- release = os_release('nova-common')
- manager = config('network-manager').lower()
-
- if manager not in ['quantum', 'neutron']:
- return manager
-
- if release in ['essex']:
- # E does not support neutron
- log('Neutron networking not supported in Essex.', level=ERROR)
- raise Exception
- elif release in ['folsom', 'grizzly']:
- # neutron is named quantum in F and G
- return 'quantum'
- else:
- # ensure accurate naming for all releases post-H
- return 'neutron'
-
-
-def parse_mappings(mappings, key_rvalue=False):
- """By default mappings are lvalue keyed.
-
- If key_rvalue is True, the mapping will be reversed to allow multiple
- configs for the same lvalue.
- """
- parsed = {}
- if mappings:
- mappings = mappings.split()
- for m in mappings:
- p = m.partition(':')
-
- if key_rvalue:
- key_index = 2
- val_index = 0
- # if there is no rvalue skip to next
- if not p[1]:
- continue
- else:
- key_index = 0
- val_index = 2
-
- key = p[key_index].strip()
- parsed[key] = p[val_index].strip()
-
- return parsed
-
-
-def parse_bridge_mappings(mappings):
- """Parse bridge mappings.
-
- Mappings must be a space-delimited list of provider:bridge mappings.
-
- Returns dict of the form {provider:bridge}.
- """
- return parse_mappings(mappings)
-
-
-def parse_data_port_mappings(mappings, default_bridge='br-data'):
- """Parse data port mappings.
-
- Mappings must be a space-delimited list of bridge:port.
-
- Returns dict of the form {port:bridge} where ports may be mac addresses or
- interface names.
- """
-
- # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
- # proposed for since it may be a mac address which will differ
- # across units this allowing first-known-good to be chosen.
- _mappings = parse_mappings(mappings, key_rvalue=True)
- if not _mappings or list(_mappings.values()) == ['']:
- if not mappings:
- return {}
-
- # For backwards-compatibility we need to support port-only provided in
- # config.
- _mappings = {mappings.split()[0]: default_bridge}
-
- ports = _mappings.keys()
- if len(set(ports)) != len(ports):
- raise Exception("It is not allowed to have the same port configured "
- "on more than one bridge")
-
- return _mappings
-
-
-def parse_vlan_range_mappings(mappings):
- """Parse vlan range mappings.
-
- Mappings must be a space-delimited list of provider:start:end mappings.
-
- The start:end range is optional and may be omitted.
-
- Returns dict of the form {provider: (start, end)}.
- """
- _mappings = parse_mappings(mappings)
- if not _mappings:
- return {}
-
- mappings = {}
- for p, r in six.iteritems(_mappings):
- mappings[p] = tuple(r.split(':'))
-
- return mappings
diff --git a/hooks/charmhelpers/contrib/openstack/ssh_migrations.py b/hooks/charmhelpers/contrib/openstack/ssh_migrations.py
deleted file mode 100644
index 96b9f71..0000000
--- a/hooks/charmhelpers/contrib/openstack/ssh_migrations.py
+++ /dev/null
@@ -1,412 +0,0 @@
-# Copyright 2018 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import subprocess
-
-from charmhelpers.core.hookenv import (
- ERROR,
- log,
- relation_get,
-)
-from charmhelpers.contrib.network.ip import (
- is_ipv6,
- ns_query,
-)
-from charmhelpers.contrib.openstack.utils import (
- get_hostname,
- get_host_ip,
- is_ip,
-)
-
-NOVA_SSH_DIR = '/etc/nova/compute_ssh/'
-
-
-def ssh_directory_for_unit(application_name, user=None):
- """Return the directory used to store ssh assets for the application.
-
- :param application_name: Name of application eg nova-compute-something
- :type application_name: str
- :param user: The user that the ssh asserts are for.
- :type user: str
- :returns: Fully qualified directory path.
- :rtype: str
- """
- if user:
- application_name = "{}_{}".format(application_name, user)
- _dir = os.path.join(NOVA_SSH_DIR, application_name)
- for d in [NOVA_SSH_DIR, _dir]:
- if not os.path.isdir(d):
- os.mkdir(d)
- for f in ['authorized_keys', 'known_hosts']:
- f = os.path.join(_dir, f)
- if not os.path.isfile(f):
- open(f, 'w').close()
- return _dir
-
-
-def known_hosts(application_name, user=None):
- """Return the known hosts file for the application.
-
- :param application_name: Name of application eg nova-compute-something
- :type application_name: str
- :param user: The user that the ssh asserts are for.
- :type user: str
- :returns: Fully qualified path to file.
- :rtype: str
- """
- return os.path.join(
- ssh_directory_for_unit(application_name, user),
- 'known_hosts')
-
-
-def authorized_keys(application_name, user=None):
- """Return the authorized keys file for the application.
-
- :param application_name: Name of application eg nova-compute-something
- :type application_name: str
- :param user: The user that the ssh asserts are for.
- :type user: str
- :returns: Fully qualified path to file.
- :rtype: str
- """
- return os.path.join(
- ssh_directory_for_unit(application_name, user),
- 'authorized_keys')
-
-
-def ssh_known_host_key(host, application_name, user=None):
- """Return the first entry in known_hosts for host.
-
- :param host: hostname to lookup in file.
- :type host: str
- :param application_name: Name of application eg nova-compute-something
- :type application_name: str
- :param user: The user that the ssh asserts are for.
- :type user: str
- :returns: Host key
- :rtype: str or None
- """
- cmd = [
- 'ssh-keygen',
- '-f', known_hosts(application_name, user),
- '-H',
- '-F',
- host]
- try:
- # The first line of output is like '# Host xx found: line 1 type RSA',
- # which should be excluded.
- output = subprocess.check_output(cmd)
- except subprocess.CalledProcessError as e:
- # RC of 1 seems to be legitimate for most ssh-keygen -F calls.
- if e.returncode == 1:
- output = e.output
- else:
- raise
- output = output.strip()
-
- if output:
- # Bug #1500589 cmd has 0 rc on precise if entry not present
- lines = output.split('\n')
- if len(lines) >= 1:
- return lines[0]
-
- return None
-
-
-def remove_known_host(host, application_name, user=None):
- """Remove the entry in known_hosts for host.
-
- :param host: hostname to lookup in file.
- :type host: str
- :param application_name: Name of application eg nova-compute-something
- :type application_name: str
- :param user: The user that the ssh asserts are for.
- :type user: str
- """
- log('Removing SSH known host entry for compute host at %s' % host)
- cmd = ['ssh-keygen', '-f', known_hosts(application_name, user), '-R', host]
- subprocess.check_call(cmd)
-
-
-def is_same_key(key_1, key_2):
- """Extract the key from two host entries and compare them.
-
- :param key_1: Host key
- :type key_1: str
- :param key_2: Host key
- :type key_2: str
- """
- # The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp'
- # 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare
- # the part start with 'ssh-rsa' followed with '= ', because the hash
- # value in the beginning will change each time.
- k_1 = key_1.split('= ')[1]
- k_2 = key_2.split('= ')[1]
- return k_1 == k_2
-
-
-def add_known_host(host, application_name, user=None):
- """Add the given host key to the known hosts file.
-
- :param host: host name
- :type host: str
- :param application_name: Name of application eg nova-compute-something
- :type application_name: str
- :param user: The user that the ssh asserts are for.
- :type user: str
- """
- cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
- try:
- remote_key = subprocess.check_output(cmd).strip()
- except Exception as e:
- log('Could not obtain SSH host key from %s' % host, level=ERROR)
- raise e
-
- current_key = ssh_known_host_key(host, application_name, user)
- if current_key and remote_key:
- if is_same_key(remote_key, current_key):
- log('Known host key for compute host %s up to date.' % host)
- return
- else:
- remove_known_host(host, application_name, user)
-
- log('Adding SSH host key to known hosts for compute node at %s.' % host)
- with open(known_hosts(application_name, user), 'a') as out:
- out.write("{}\n".format(remote_key))
-
-
-def ssh_authorized_key_exists(public_key, application_name, user=None):
- """Check if given key is in the authorized_key file.
-
- :param public_key: Public key.
- :type public_key: str
- :param application_name: Name of application eg nova-compute-something
- :type application_name: str
- :param user: The user that the ssh asserts are for.
- :type user: str
- :returns: Whether given key is in the authorized_key file.
- :rtype: boolean
- """
- with open(authorized_keys(application_name, user)) as keys:
- return ('%s' % public_key) in keys.read()
-
-
-def add_authorized_key(public_key, application_name, user=None):
- """Add given key to the authorized_key file.
-
- :param public_key: Public key.
- :type public_key: str
- :param application_name: Name of application eg nova-compute-something
- :type application_name: str
- :param user: The user that the ssh asserts are for.
- :type user: str
- """
- with open(authorized_keys(application_name, user), 'a') as keys:
- keys.write("{}\n".format(public_key))
-
-
-def ssh_compute_add_host_and_key(public_key, hostname, private_address,
- application_name, user=None):
- """Add a compute nodes ssh details to local cache.
-
- Collect various hostname variations and add the corresponding host keys to
- the local known hosts file. Finally, add the supplied public key to the
- authorized_key file.
-
- :param public_key: Public key.
- :type public_key: str
- :param hostname: Hostname to collect host keys from.
- :type hostname: str
- :param private_address:aCorresponding private address for hostname
- :type private_address: str
- :param application_name: Name of application eg nova-compute-something
- :type application_name: str
- :param user: The user that the ssh asserts are for.
- :type user: str
- """
- # If remote compute node hands us a hostname, ensure we have a
- # known hosts entry for its IP, hostname and FQDN.
- hosts = [private_address]
-
- if not is_ipv6(private_address):
- if hostname:
- hosts.append(hostname)
-
- if is_ip(private_address):
- hn = get_hostname(private_address)
- if hn:
- hosts.append(hn)
- short = hn.split('.')[0]
- if ns_query(short):
- hosts.append(short)
- else:
- hosts.append(get_host_ip(private_address))
- short = private_address.split('.')[0]
- if ns_query(short):
- hosts.append(short)
-
- for host in list(set(hosts)):
- add_known_host(host, application_name, user)
-
- if not ssh_authorized_key_exists(public_key, application_name, user):
- log('Saving SSH authorized key for compute host at %s.' %
- private_address)
- add_authorized_key(public_key, application_name, user)
-
-
-def ssh_compute_add(public_key, application_name, rid=None, unit=None,
- user=None):
- """Add a compute nodes ssh details to local cache.
-
- Collect various hostname variations and add the corresponding host keys to
- the local known hosts file. Finally, add the supplied public key to the
- authorized_key file.
-
- :param public_key: Public key.
- :type public_key: str
- :param application_name: Name of application eg nova-compute-something
- :type application_name: str
- :param rid: Relation id of the relation between this charm and the app. If
- none is supplied it is assumed its the relation relating to
- the current hook context.
- :type rid: str
- :param unit: Unit to add ssh asserts for if none is supplied it is assumed
- its the unit relating to the current hook context.
- :type unit: str
- :param user: The user that the ssh asserts are for.
- :type user: str
- """
- relation_data = relation_get(rid=rid, unit=unit)
- ssh_compute_add_host_and_key(
- public_key,
- relation_data.get('hostname'),
- relation_data.get('private-address'),
- application_name,
- user=user)
-
-
-def ssh_known_hosts_lines(application_name, user=None):
- """Return contents of known_hosts file for given application.
-
- :param application_name: Name of application eg nova-compute-something
- :type application_name: str
- :param user: The user that the ssh asserts are for.
- :type user: str
- """
- known_hosts_list = []
- with open(known_hosts(application_name, user)) as hosts:
- for hosts_line in hosts:
- if hosts_line.rstrip():
- known_hosts_list.append(hosts_line.rstrip())
- return(known_hosts_list)
-
-
-def ssh_authorized_keys_lines(application_name, user=None):
- """Return contents of authorized_keys file for given application.
-
- :param application_name: Name of application eg nova-compute-something
- :type application_name: str
- :param user: The user that the ssh asserts are for.
- :type user: str
- """
- authorized_keys_list = []
-
- with open(authorized_keys(application_name, user)) as keys:
- for authkey_line in keys:
- if authkey_line.rstrip():
- authorized_keys_list.append(authkey_line.rstrip())
- return(authorized_keys_list)
-
-
-def ssh_compute_remove(public_key, application_name, user=None):
- """Remove given public key from authorized_keys file.
-
- :param public_key: Public key.
- :type public_key: str
- :param application_name: Name of application eg nova-compute-something
- :type application_name: str
- :param user: The user that the ssh asserts are for.
- :type user: str
- """
- if not (os.path.isfile(authorized_keys(application_name, user)) or
- os.path.isfile(known_hosts(application_name, user))):
- return
-
- keys = ssh_authorized_keys_lines(application_name, user=None)
- keys = [k.strip() for k in keys]
-
- if public_key not in keys:
- return
-
- [keys.remove(key) for key in keys if key == public_key]
-
- with open(authorized_keys(application_name, user), 'w') as _keys:
- keys = '\n'.join(keys)
- if not keys.endswith('\n'):
- keys += '\n'
- _keys.write(keys)
-
-
-def get_ssh_settings(application_name, user=None):
- """Retrieve the known host entries and public keys for application
-
- Retrieve the known host entries and public keys for application for all
- units of the given application related to this application for the
- app + user combination.
-
- :param application_name: Name of application eg nova-compute-something
- :type application_name: str
- :param user: The user that the ssh asserts are for.
- :type user: str
- :returns: Public keys + host keys for all units for app + user combination.
- :rtype: dict
- """
- settings = {}
- keys = {}
- prefix = ''
- if user:
- prefix = '{}_'.format(user)
-
- for i, line in enumerate(ssh_known_hosts_lines(
- application_name=application_name, user=user)):
- settings['{}known_hosts_{}'.format(prefix, i)] = line
- if settings:
- settings['{}known_hosts_max_index'.format(prefix)] = len(
- settings.keys())
-
- for i, line in enumerate(ssh_authorized_keys_lines(
- application_name=application_name, user=user)):
- keys['{}authorized_keys_{}'.format(prefix, i)] = line
- if keys:
- keys['{}authorized_keys_max_index'.format(prefix)] = len(keys.keys())
- settings.update(keys)
- return settings
-
-
-def get_all_user_ssh_settings(application_name):
- """Retrieve the known host entries and public keys for application
-
- Retrieve the known host entries and public keys for application for all
- units of the given application related to this application for root user
- and nova user.
-
- :param application_name: Name of application eg nova-compute-something
- :type application_name: str
- :returns: Public keys + host keys for all units for app + user combination.
- :rtype: dict
- """
- settings = get_ssh_settings(application_name)
- settings.update(get_ssh_settings(application_name, user='nova'))
- return settings
diff --git a/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/hooks/charmhelpers/contrib/openstack/templates/__init__.py
deleted file mode 100644
index 9df5f74..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# dummy __init__.py to fool syncer into thinking this is a syncable python
-# module
diff --git a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
deleted file mode 100644
index a11ce8a..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
+++ /dev/null
@@ -1,24 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# ceph configuration file maintained by Juju
-# local changes may be overwritten.
-###############################################################################
-[global]
-{% if auth -%}
-auth_supported = {{ auth }}
-keyring = /etc/ceph/$cluster.$name.keyring
-mon host = {{ mon_hosts }}
-{% endif -%}
-log to syslog = {{ use_syslog }}
-err to syslog = {{ use_syslog }}
-clog to syslog = {{ use_syslog }}
-{% if rbd_features %}
-rbd default features = {{ rbd_features }}
-{% endif %}
-
-[client]
-{% if rbd_client_cache_settings -%}
-{% for key, value in rbd_client_cache_settings.items() -%}
-{{ key }} = {{ value }}
-{% endfor -%}
-{%- endif %}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/git.upstart b/hooks/charmhelpers/contrib/openstack/templates/git.upstart
deleted file mode 100644
index 4bed404..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/git.upstart
+++ /dev/null
@@ -1,17 +0,0 @@
-description "{{ service_description }}"
-author "Juju {{ service_name }} Charm "
-
-start on runlevel [2345]
-stop on runlevel [!2345]
-
-respawn
-
-exec start-stop-daemon --start --chuid {{ user_name }} \
- --chdir {{ start_dir }} --name {{ process_name }} \
- --exec {{ executable_name }} -- \
- {% for config_file in config_files -%}
- --config-file={{ config_file }} \
- {% endfor -%}
- {% if log_file -%}
- --log-file={{ log_file }}
- {% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
deleted file mode 100644
index d36af2a..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
+++ /dev/null
@@ -1,77 +0,0 @@
-global
- log /var/lib/haproxy/dev/log local0
- log /var/lib/haproxy/dev/log local1 notice
- maxconn 20000
- user haproxy
- group haproxy
- spread-checks 0
- stats socket /var/run/haproxy/admin.sock mode 600 level admin
- stats timeout 2m
-
-defaults
- log global
- mode tcp
- option tcplog
- option dontlognull
- retries 3
-{%- if haproxy_queue_timeout %}
- timeout queue {{ haproxy_queue_timeout }}
-{%- else %}
- timeout queue 9000
-{%- endif %}
-{%- if haproxy_connect_timeout %}
- timeout connect {{ haproxy_connect_timeout }}
-{%- else %}
- timeout connect 9000
-{%- endif %}
-{%- if haproxy_client_timeout %}
- timeout client {{ haproxy_client_timeout }}
-{%- else %}
- timeout client 90000
-{%- endif %}
-{%- if haproxy_server_timeout %}
- timeout server {{ haproxy_server_timeout }}
-{%- else %}
- timeout server 90000
-{%- endif %}
-
-listen stats
- bind {{ local_host }}:{{ stat_port }}
- mode http
- stats enable
- stats hide-version
- stats realm Haproxy\ Statistics
- stats uri /
- stats auth admin:{{ stat_password }}
-
-{% if frontends -%}
-{% for service, ports in service_ports.items() -%}
-frontend tcp-in_{{ service }}
- bind *:{{ ports[0] }}
- {% if ipv6_enabled -%}
- bind :::{{ ports[0] }}
- {% endif -%}
- {% for frontend in frontends -%}
- acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
- use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
- {% endfor -%}
- default_backend {{ service }}_{{ default_backend }}
-
-{% for frontend in frontends -%}
-backend {{ service }}_{{ frontend }}
- balance leastconn
- {% if backend_options -%}
- {% if backend_options[service] -%}
- {% for option in backend_options[service] -%}
- {% for key, value in option.items() -%}
- {{ key }} {{ value }}
- {% endfor -%}
- {% endfor -%}
- {% endif -%}
- {% endif -%}
- {% for unit, address in frontends[frontend]['backends'].items() -%}
- server {{ unit }} {{ address }}:{{ ports[1] }} check
- {% endfor %}
-{% endfor -%}
-{% endfor -%}
-{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/logrotate b/hooks/charmhelpers/contrib/openstack/templates/logrotate
deleted file mode 100644
index b2900d0..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/logrotate
+++ /dev/null
@@ -1,9 +0,0 @@
-/var/log/{{ logrotate_logs_location }}/*.log {
- {{ logrotate_interval }}
- {{ logrotate_count }}
- compress
- delaycompress
- missingok
- notifempty
- copytruncate
-}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/memcached.conf b/hooks/charmhelpers/contrib/openstack/templates/memcached.conf
deleted file mode 100644
index 26cb037..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/memcached.conf
+++ /dev/null
@@ -1,53 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# memcached configuration file maintained by Juju
-# local changes may be overwritten.
-###############################################################################
-
-# memcached default config file
-# 2003 - Jay Bonci
-# This configuration file is read by the start-memcached script provided as
-# part of the Debian GNU/Linux distribution.
-
-# Run memcached as a daemon. This command is implied, and is not needed for the
-# daemon to run. See the README.Debian that comes with this package for more
-# information.
--d
-
-# Log memcached's output to /var/log/memcached
-logfile /var/log/memcached.log
-
-# Be verbose
-# -v
-
-# Be even more verbose (print client commands as well)
-# -vv
-
-# Start with a cap of 64 megs of memory. It's reasonable, and the daemon default
-# Note that the daemon will grow to this size, but does not start out holding this much
-# memory
--m 64
-
-# Default connection port is 11211
--p {{ memcache_port }}
-
-# Run the daemon as root. The start-memcached will default to running as root if no
-# -u command is present in this config file
--u memcache
-
-# Specify which IP address to listen on. The default is to listen on all IP addresses
-# This parameter is one of the only security measures that memcached has, so make sure
-# it's listening on a firewalled interface.
--l {{ memcache_server }}
-
-# Limit the number of simultaneous incoming connections. The daemon default is 1024
-# -c 1024
-
-# Lock down all paged memory. Consult with the README and homepage before you do this
-# -k
-
-# Return error when memory is exhausted (rather than removing items)
-# -M
-
-# Maximize core file limit
-# -r
diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend
deleted file mode 100644
index f614b3f..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend
+++ /dev/null
@@ -1,29 +0,0 @@
-{% if endpoints -%}
-{% for ext_port in ext_ports -%}
-Listen {{ ext_port }}
-{% endfor -%}
-{% for address, endpoint, ext, int in endpoints -%}
-
- ServerName {{ endpoint }}
- SSLEngine on
- SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2
- SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM
- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
- # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8
- SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
- ProxyPass / http://localhost:{{ int }}/
- ProxyPassReverse / http://localhost:{{ int }}/
- ProxyPreserveHost on
- RequestHeader set X-Forwarded-Proto "https"
-
-{% endfor -%}
-
- Order deny,allow
- Allow from all
-
-
- Order allow,deny
- Allow from all
-
-{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf
deleted file mode 100644
index f614b3f..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf
+++ /dev/null
@@ -1,29 +0,0 @@
-{% if endpoints -%}
-{% for ext_port in ext_ports -%}
-Listen {{ ext_port }}
-{% endfor -%}
-{% for address, endpoint, ext, int in endpoints -%}
-
- ServerName {{ endpoint }}
- SSLEngine on
- SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2
- SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM
- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
- # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8
- SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
- ProxyPass / http://localhost:{{ int }}/
- ProxyPassReverse / http://localhost:{{ int }}/
- ProxyPreserveHost on
- RequestHeader set X-Forwarded-Proto "https"
-
-{% endfor -%}
-
- Order deny,allow
- Allow from all
-
-
- Order allow,deny
- Allow from all
-
-{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
deleted file mode 100644
index 5dcebe7..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
+++ /dev/null
@@ -1,12 +0,0 @@
-{% if auth_host -%}
-[keystone_authtoken]
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
-auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
-auth_plugin = password
-project_domain_id = default
-user_domain_id = default
-project_name = {{ admin_tenant_name }}
-username = {{ admin_user }}
-password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
-{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy
deleted file mode 100644
index 9356b2b..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy
+++ /dev/null
@@ -1,10 +0,0 @@
-{% if auth_host -%}
-[keystone_authtoken]
-# Juno specific config (Bug #1557223)
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
-identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
-admin_tenant_name = {{ admin_tenant_name }}
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
-{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka
deleted file mode 100644
index c281868..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka
+++ /dev/null
@@ -1,22 +0,0 @@
-{% if auth_host -%}
-[keystone_authtoken]
-auth_type = password
-{% if api_version == "3" -%}
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v3
-auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v3
-project_domain_name = {{ admin_domain_name }}
-user_domain_name = {{ admin_domain_name }}
-{% else -%}
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
-auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
-project_domain_name = default
-user_domain_name = default
-{% endif -%}
-project_name = {{ admin_tenant_name }}
-username = {{ admin_user }}
-password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
-{% if use_memcache == true %}
-memcached_servers = {{ memcache_url }}
-{% endif -%}
-{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache
deleted file mode 100644
index e056a32..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache
+++ /dev/null
@@ -1,6 +0,0 @@
-[cache]
-{% if memcache_url %}
-enabled = true
-backend = oslo_cache.memcache_pool
-memcache_servers = {{ memcache_url }}
-{% endif %}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit
deleted file mode 100644
index bed2216..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit
+++ /dev/null
@@ -1,10 +0,0 @@
-[oslo_messaging_rabbit]
-{% if rabbitmq_ha_queues -%}
-rabbit_ha_queues = True
-{% endif -%}
-{% if rabbit_ssl_port -%}
-ssl = True
-{% endif -%}
-{% if rabbit_ssl_ca -%}
-ssl_ca_file = {{ rabbit_ssl_ca }}
-{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware
deleted file mode 100644
index dd73230..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware
+++ /dev/null
@@ -1,5 +0,0 @@
-[oslo_middleware]
-
-# Bug #1758675
-enable_proxy_headers_parsing = true
-
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications
deleted file mode 100644
index 021a3c2..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications
+++ /dev/null
@@ -1,11 +0,0 @@
-{% if transport_url -%}
-[oslo_messaging_notifications]
-driver = messagingv2
-transport_url = {{ transport_url }}
-{% if notification_topics -%}
-topics = {{ notification_topics }}
-{% endif -%}
-{% if notification_format -%}
-notification_format = {{ notification_format }}
-{% endif -%}
-{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo b/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo
deleted file mode 100644
index b444c9c..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo
+++ /dev/null
@@ -1,22 +0,0 @@
-{% if rabbitmq_host or rabbitmq_hosts -%}
-[oslo_messaging_rabbit]
-rabbit_userid = {{ rabbitmq_user }}
-rabbit_virtual_host = {{ rabbitmq_virtual_host }}
-rabbit_password = {{ rabbitmq_password }}
-{% if rabbitmq_hosts -%}
-rabbit_hosts = {{ rabbitmq_hosts }}
-{% if rabbitmq_ha_queues -%}
-rabbit_ha_queues = True
-rabbit_durable_queues = False
-{% endif -%}
-{% else -%}
-rabbit_host = {{ rabbitmq_host }}
-{% endif -%}
-{% if rabbit_ssl_port -%}
-rabbit_use_ssl = True
-rabbit_port = {{ rabbit_ssl_port }}
-{% if rabbit_ssl_ca -%}
-kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
-{% endif -%}
-{% endif -%}
-{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-zeromq b/hooks/charmhelpers/contrib/openstack/templates/section-zeromq
deleted file mode 100644
index 95f1a76..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/section-zeromq
+++ /dev/null
@@ -1,14 +0,0 @@
-{% if zmq_host -%}
-# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }})
-rpc_backend = zmq
-rpc_zmq_host = {{ zmq_host }}
-{% if zmq_redis_address -%}
-rpc_zmq_matchmaker = redis
-matchmaker_heartbeat_freq = 15
-matchmaker_heartbeat_ttl = 30
-[matchmaker_redis]
-host = {{ zmq_redis_address }}
-{% else -%}
-rpc_zmq_matchmaker = ring
-{% endif -%}
-{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
deleted file mode 100644
index 23b62a3..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
+++ /dev/null
@@ -1,91 +0,0 @@
-# Configuration file maintained by Juju. Local changes may be overwritten.
-
-{% if port -%}
-Listen {{ port }}
-{% endif -%}
-
-{% if admin_port -%}
-Listen {{ admin_port }}
-{% endif -%}
-
-{% if public_port -%}
-Listen {{ public_port }}
-{% endif -%}
-
-{% if port -%}
-
- WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
- display-name=%{GROUP}
- WSGIProcessGroup {{ service_name }}
- WSGIScriptAlias / {{ script }}
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
- ErrorLog /var/log/apache2/{{ service_name }}_error.log
- CustomLog /var/log/apache2/{{ service_name }}_access.log combined
-
-
- = 2.4>
- Require all granted
-
-
- Order allow,deny
- Allow from all
-
-
-
-{% endif -%}
-
-{% if admin_port -%}
-
- WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
- display-name=%{GROUP}
- WSGIProcessGroup {{ service_name }}-admin
- WSGIScriptAlias / {{ admin_script }}
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
- ErrorLog /var/log/apache2/{{ service_name }}_error.log
- CustomLog /var/log/apache2/{{ service_name }}_access.log combined
-
-
- = 2.4>
- Require all granted
-
-
- Order allow,deny
- Allow from all
-
-
-
-{% endif -%}
-
-{% if public_port -%}
-
- WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
- display-name=%{GROUP}
- WSGIProcessGroup {{ service_name }}-public
- WSGIScriptAlias / {{ public_script }}
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
- ErrorLog /var/log/apache2/{{ service_name }}_error.log
- CustomLog /var/log/apache2/{{ service_name }}_access.log combined
-
-
- = 2.4>
- Require all granted
-
-
- Order allow,deny
- Allow from all
-
-
-
-{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf
deleted file mode 100644
index 23b62a3..0000000
--- a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf
+++ /dev/null
@@ -1,91 +0,0 @@
-# Configuration file maintained by Juju. Local changes may be overwritten.
-
-{% if port -%}
-Listen {{ port }}
-{% endif -%}
-
-{% if admin_port -%}
-Listen {{ admin_port }}
-{% endif -%}
-
-{% if public_port -%}
-Listen {{ public_port }}
-{% endif -%}
-
-{% if port -%}
-
- WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
- display-name=%{GROUP}
- WSGIProcessGroup {{ service_name }}
- WSGIScriptAlias / {{ script }}
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
- ErrorLog /var/log/apache2/{{ service_name }}_error.log
- CustomLog /var/log/apache2/{{ service_name }}_access.log combined
-
-
- = 2.4>
- Require all granted
-
-
- Order allow,deny
- Allow from all
-
-
-
-{% endif -%}
-
-{% if admin_port -%}
-
- WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
- display-name=%{GROUP}
- WSGIProcessGroup {{ service_name }}-admin
- WSGIScriptAlias / {{ admin_script }}
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
- ErrorLog /var/log/apache2/{{ service_name }}_error.log
- CustomLog /var/log/apache2/{{ service_name }}_access.log combined
-
-
- = 2.4>
- Require all granted
-
-
- Order allow,deny
- Allow from all
-
-
-
-{% endif -%}
-
-{% if public_port -%}
-
- WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
- display-name=%{GROUP}
- WSGIProcessGroup {{ service_name }}-public
- WSGIScriptAlias / {{ public_script }}
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- = 2.4>
- ErrorLogFormat "%{cu}t %M"
-
- ErrorLog /var/log/apache2/{{ service_name }}_error.log
- CustomLog /var/log/apache2/{{ service_name }}_access.log combined
-
-
- = 2.4>
- Require all granted
-
-
- Order allow,deny
- Allow from all
-
-
-
-{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py
deleted file mode 100644
index 050f8af..0000000
--- a/hooks/charmhelpers/contrib/openstack/templating.py
+++ /dev/null
@@ -1,379 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import six
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import (
- log,
- ERROR,
- INFO,
- TRACE
-)
-from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
-
-try:
- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
-except ImportError:
- apt_update(fatal=True)
- if six.PY2:
- apt_install('python-jinja2', fatal=True)
- else:
- apt_install('python3-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
-
-
-class OSConfigException(Exception):
- pass
-
-
-def get_loader(templates_dir, os_release):
- """
- Create a jinja2.ChoiceLoader containing template dirs up to
- and including os_release. If directory template directory
- is missing at templates_dir, it will be omitted from the loader.
- templates_dir is added to the bottom of the search list as a base
- loading dir.
-
- A charm may also ship a templates dir with this module
- and it will be appended to the bottom of the search list, eg::
-
- hooks/charmhelpers/contrib/openstack/templates
-
- :param templates_dir (str): Base template directory containing release
- sub-directories.
- :param os_release (str): OpenStack release codename to construct template
- loader.
- :returns: jinja2.ChoiceLoader constructed with a list of
- jinja2.FilesystemLoaders, ordered in descending
- order by OpenStack release.
- """
- tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
- for rel in six.itervalues(OPENSTACK_CODENAMES)]
-
- if not os.path.isdir(templates_dir):
- log('Templates directory not found @ %s.' % templates_dir,
- level=ERROR)
- raise OSConfigException
-
- # the bottom contains tempaltes_dir and possibly a common templates dir
- # shipped with the helper.
- loaders = [FileSystemLoader(templates_dir)]
- helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
- if os.path.isdir(helper_templates):
- loaders.append(FileSystemLoader(helper_templates))
-
- for rel, tmpl_dir in tmpl_dirs:
- if os.path.isdir(tmpl_dir):
- loaders.insert(0, FileSystemLoader(tmpl_dir))
- if rel == os_release:
- break
- # demote this log to the lowest level; we don't really need to see these
- # lots in production even when debugging.
- log('Creating choice loader with dirs: %s' %
- [l.searchpath for l in loaders], level=TRACE)
- return ChoiceLoader(loaders)
-
-
-class OSConfigTemplate(object):
- """
- Associates a config file template with a list of context generators.
- Responsible for constructing a template context based on those generators.
- """
-
- def __init__(self, config_file, contexts, config_template=None):
- self.config_file = config_file
-
- if hasattr(contexts, '__call__'):
- self.contexts = [contexts]
- else:
- self.contexts = contexts
-
- self._complete_contexts = []
-
- self.config_template = config_template
-
- def context(self):
- ctxt = {}
- for context in self.contexts:
- _ctxt = context()
- if _ctxt:
- ctxt.update(_ctxt)
- # track interfaces for every complete context.
- [self._complete_contexts.append(interface)
- for interface in context.interfaces
- if interface not in self._complete_contexts]
- return ctxt
-
- def complete_contexts(self):
- '''
- Return a list of interfaces that have satisfied contexts.
- '''
- if self._complete_contexts:
- return self._complete_contexts
- self.context()
- return self._complete_contexts
-
- @property
- def is_string_template(self):
- """:returns: Boolean if this instance is a template initialised with a string"""
- return self.config_template is not None
-
-
-class OSConfigRenderer(object):
- """
- This class provides a common templating system to be used by OpenStack
- charms. It is intended to help charms share common code and templates,
- and ease the burden of managing config templates across multiple OpenStack
- releases.
-
- Basic usage::
-
- # import some common context generates from charmhelpers
- from charmhelpers.contrib.openstack import context
-
- # Create a renderer object for a specific OS release.
- configs = OSConfigRenderer(templates_dir='/tmp/templates',
- openstack_release='folsom')
- # register some config files with context generators.
- configs.register(config_file='/etc/nova/nova.conf',
- contexts=[context.SharedDBContext(),
- context.AMQPContext()])
- configs.register(config_file='/etc/nova/api-paste.ini',
- contexts=[context.IdentityServiceContext()])
- configs.register(config_file='/etc/haproxy/haproxy.conf',
- contexts=[context.HAProxyContext()])
- configs.register(config_file='/etc/keystone/policy.d/extra.cfg',
- contexts=[context.ExtraPolicyContext()
- context.KeystoneContext()],
- config_template=hookenv.config('extra-policy'))
- # write out a single config
- configs.write('/etc/nova/nova.conf')
- # write out all registered configs
- configs.write_all()
-
- **OpenStack Releases and template loading**
-
- When the object is instantiated, it is associated with a specific OS
- release. This dictates how the template loader will be constructed.
-
- The constructed loader attempts to load the template from several places
- in the following order:
- - from the most recent OS release-specific template dir (if one exists)
- - the base templates_dir
- - a template directory shipped in the charm with this helper file.
-
- For the example above, '/tmp/templates' contains the following structure::
-
- /tmp/templates/nova.conf
- /tmp/templates/api-paste.ini
- /tmp/templates/grizzly/api-paste.ini
- /tmp/templates/havana/api-paste.ini
-
- Since it was registered with the grizzly release, it first searches
- the grizzly directory for nova.conf, then the templates dir.
-
- When writing api-paste.ini, it will find the template in the grizzly
- directory.
-
- If the object were created with folsom, it would fall back to the
- base templates dir for its api-paste.ini template.
-
- This system should help manage changes in config files through
- openstack releases, allowing charms to fall back to the most recently
- updated config template for a given release
-
- The haproxy.conf, since it is not shipped in the templates dir, will
- be loaded from the module directory's template directory, eg
- $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
- us to ship common templates (haproxy, apache) with the helpers.
-
- **Context generators**
-
- Context generators are used to generate template contexts during hook
- execution. Doing so may require inspecting service relations, charm
- config, etc. When registered, a config file is associated with a list
- of generators. When a template is rendered and written, all context
- generates are called in a chain to generate the context dictionary
- passed to the jinja2 template. See context.py for more info.
- """
- def __init__(self, templates_dir, openstack_release):
- if not os.path.isdir(templates_dir):
- log('Could not locate templates dir %s' % templates_dir,
- level=ERROR)
- raise OSConfigException
-
- self.templates_dir = templates_dir
- self.openstack_release = openstack_release
- self.templates = {}
- self._tmpl_env = None
-
- if None in [Environment, ChoiceLoader, FileSystemLoader]:
- # if this code is running, the object is created pre-install hook.
- # jinja2 shouldn't get touched until the module is reloaded on next
- # hook execution, with proper jinja2 bits successfully imported.
- if six.PY2:
- apt_install('python-jinja2')
- else:
- apt_install('python3-jinja2')
-
- def register(self, config_file, contexts, config_template=None):
- """
- Register a config file with a list of context generators to be called
- during rendering.
- config_template can be used to load a template from a string instead of
- using template loaders and template files.
- :param config_file (str): a path where a config file will be rendered
- :param contexts (list): a list of context dictionaries with kv pairs
- :param config_template (str): an optional template string to use
- """
- self.templates[config_file] = OSConfigTemplate(
- config_file=config_file,
- contexts=contexts,
- config_template=config_template
- )
- log('Registered config file: {}'.format(config_file),
- level=INFO)
-
- def _get_tmpl_env(self):
- if not self._tmpl_env:
- loader = get_loader(self.templates_dir, self.openstack_release)
- self._tmpl_env = Environment(loader=loader)
-
- def _get_template(self, template):
- self._get_tmpl_env()
- template = self._tmpl_env.get_template(template)
- log('Loaded template from {}'.format(template.filename),
- level=INFO)
- return template
-
- def _get_template_from_string(self, ostmpl):
- '''
- Get a jinja2 template object from a string.
- :param ostmpl: OSConfigTemplate to use as a data source.
- '''
- self._get_tmpl_env()
- template = self._tmpl_env.from_string(ostmpl.config_template)
- log('Loaded a template from a string for {}'.format(
- ostmpl.config_file),
- level=INFO)
- return template
-
- def render(self, config_file):
- if config_file not in self.templates:
- log('Config not registered: {}'.format(config_file), level=ERROR)
- raise OSConfigException
-
- ostmpl = self.templates[config_file]
- ctxt = ostmpl.context()
-
- if ostmpl.is_string_template:
- template = self._get_template_from_string(ostmpl)
- log('Rendering from a string template: '
- '{}'.format(config_file),
- level=INFO)
- else:
- _tmpl = os.path.basename(config_file)
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound:
- # if no template is found with basename, try looking
- # for it using a munged full path, eg:
- # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
- _tmpl = '_'.join(config_file.split('/')[1:])
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound as e:
- log('Could not load template from {} by {} or {}.'
- ''.format(
- self.templates_dir,
- os.path.basename(config_file),
- _tmpl
- ),
- level=ERROR)
- raise e
-
- log('Rendering from template: {}'.format(config_file),
- level=INFO)
- return template.render(ctxt)
-
- def write(self, config_file):
- """
- Write a single config file, raises if config file is not registered.
- """
- if config_file not in self.templates:
- log('Config not registered: %s' % config_file, level=ERROR)
- raise OSConfigException
-
- _out = self.render(config_file)
- if six.PY3:
- _out = _out.encode('UTF-8')
-
- with open(config_file, 'wb') as out:
- out.write(_out)
-
- log('Wrote template %s.' % config_file, level=INFO)
-
- def write_all(self):
- """
- Write out all registered config files.
- """
- [self.write(k) for k in six.iterkeys(self.templates)]
-
- def set_release(self, openstack_release):
- """
- Resets the template environment and generates a new template loader
- based on a the new openstack release.
- """
- self._tmpl_env = None
- self.openstack_release = openstack_release
- self._get_tmpl_env()
-
- def complete_contexts(self):
- '''
- Returns a list of context interfaces that yield a complete context.
- '''
- interfaces = []
- [interfaces.extend(i.complete_contexts())
- for i in six.itervalues(self.templates)]
- return interfaces
-
- def get_incomplete_context_data(self, interfaces):
- '''
- Return dictionary of relation status of interfaces and any missing
- required context data. Example:
- {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
- 'zeromq-configuration': {'related': False}}
- '''
- incomplete_context_data = {}
-
- for i in six.itervalues(self.templates):
- for context in i.contexts:
- for interface in interfaces:
- related = False
- if interface in context.interfaces:
- related = context.get_related()
- missing_data = context.missing_data
- if missing_data:
- incomplete_context_data[interface] = {'missing_data': missing_data}
- if related:
- if incomplete_context_data.get(interface):
- incomplete_context_data[interface].update({'related': True})
- else:
- incomplete_context_data[interface] = {'related': True}
- else:
- incomplete_context_data[interface] = {'related': False}
- return incomplete_context_data
diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py
deleted file mode 100644
index e5e2536..0000000
--- a/hooks/charmhelpers/contrib/openstack/utils.py
+++ /dev/null
@@ -1,1843 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Common python helper functions used for OpenStack charms.
-from collections import OrderedDict
-from functools import wraps
-
-import subprocess
-import json
-import os
-import sys
-import re
-import itertools
-import functools
-
-import six
-import traceback
-import uuid
-import yaml
-
-from charmhelpers import deprecate
-
-from charmhelpers.contrib.network import ip
-
-from charmhelpers.core import unitdata
-
-from charmhelpers.core.hookenv import (
- action_fail,
- action_set,
- config,
- log as juju_log,
- charm_dir,
- INFO,
- ERROR,
- related_units,
- relation_ids,
- relation_set,
- status_set,
- hook_name,
- application_version_set,
- cached,
-)
-
-from charmhelpers.core.strutils import BasicStringComparator
-
-from charmhelpers.contrib.storage.linux.lvm import (
- deactivate_lvm_volume_group,
- is_lvm_physical_volume,
- remove_lvm_physical_volume,
-)
-
-from charmhelpers.contrib.network.ip import (
- get_ipv6_addr,
- is_ipv6,
- port_has_listener,
-)
-
-from charmhelpers.core.host import (
- lsb_release,
- mounts,
- umount,
- service_running,
- service_pause,
- service_resume,
- service_stop,
- service_start,
- restart_on_change_helper,
-)
-from charmhelpers.fetch import (
- apt_cache,
- import_key as fetch_import_key,
- add_source as fetch_add_source,
- SourceConfigError,
- GPGKeyError,
- get_upstream_version,
- filter_missing_packages
-)
-
-from charmhelpers.fetch.snap import (
- snap_install,
- snap_refresh,
- valid_snap_channel,
-)
-
-from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
-from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
-from charmhelpers.contrib.openstack.exceptions import OSContextError
-
-CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
-CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
-
-DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
- 'restricted main multiverse universe')
-
-OPENSTACK_RELEASES = (
- 'diablo',
- 'essex',
- 'folsom',
- 'grizzly',
- 'havana',
- 'icehouse',
- 'juno',
- 'kilo',
- 'liberty',
- 'mitaka',
- 'newton',
- 'ocata',
- 'pike',
- 'queens',
- 'rocky',
- 'stein',
-)
-
-UBUNTU_OPENSTACK_RELEASE = OrderedDict([
- ('oneiric', 'diablo'),
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
- ('xenial', 'mitaka'),
- ('yakkety', 'newton'),
- ('zesty', 'ocata'),
- ('artful', 'pike'),
- ('bionic', 'queens'),
- ('cosmic', 'rocky'),
- ('disco', 'stein'),
-])
-
-
-OPENSTACK_CODENAMES = OrderedDict([
- ('2011.2', 'diablo'),
- ('2012.1', 'essex'),
- ('2012.2', 'folsom'),
- ('2013.1', 'grizzly'),
- ('2013.2', 'havana'),
- ('2014.1', 'icehouse'),
- ('2014.2', 'juno'),
- ('2015.1', 'kilo'),
- ('2015.2', 'liberty'),
- ('2016.1', 'mitaka'),
- ('2016.2', 'newton'),
- ('2017.1', 'ocata'),
- ('2017.2', 'pike'),
- ('2018.1', 'queens'),
- ('2018.2', 'rocky'),
- ('2019.1', 'stein'),
-])
-
-# The ugly duckling - must list releases oldest to newest
-SWIFT_CODENAMES = OrderedDict([
- ('diablo',
- ['1.4.3']),
- ('essex',
- ['1.4.8']),
- ('folsom',
- ['1.7.4']),
- ('grizzly',
- ['1.7.6', '1.7.7', '1.8.0']),
- ('havana',
- ['1.9.0', '1.9.1', '1.10.0']),
- ('icehouse',
- ['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
- ('juno',
- ['2.0.0', '2.1.0', '2.2.0']),
- ('kilo',
- ['2.2.1', '2.2.2']),
- ('liberty',
- ['2.3.0', '2.4.0', '2.5.0']),
- ('mitaka',
- ['2.5.0', '2.6.0', '2.7.0']),
- ('newton',
- ['2.8.0', '2.9.0', '2.10.0']),
- ('ocata',
- ['2.11.0', '2.12.0', '2.13.0']),
- ('pike',
- ['2.13.0', '2.15.0']),
- ('queens',
- ['2.16.0', '2.17.0']),
- ('rocky',
- ['2.18.0', '2.19.0']),
- ('stein',
- ['2.20.0']),
-])
-
-# >= Liberty version->codename mapping
-PACKAGE_CODENAMES = {
- 'nova-common': OrderedDict([
- ('12', 'liberty'),
- ('13', 'mitaka'),
- ('14', 'newton'),
- ('15', 'ocata'),
- ('16', 'pike'),
- ('17', 'queens'),
- ('18', 'rocky'),
- ('19', 'stein'),
- ]),
- 'neutron-common': OrderedDict([
- ('7', 'liberty'),
- ('8', 'mitaka'),
- ('9', 'newton'),
- ('10', 'ocata'),
- ('11', 'pike'),
- ('12', 'queens'),
- ('13', 'rocky'),
- ('14', 'stein'),
- ]),
- 'cinder-common': OrderedDict([
- ('7', 'liberty'),
- ('8', 'mitaka'),
- ('9', 'newton'),
- ('10', 'ocata'),
- ('11', 'pike'),
- ('12', 'queens'),
- ('13', 'rocky'),
- ('14', 'stein'),
- ]),
- 'keystone': OrderedDict([
- ('8', 'liberty'),
- ('9', 'mitaka'),
- ('10', 'newton'),
- ('11', 'ocata'),
- ('12', 'pike'),
- ('13', 'queens'),
- ('14', 'rocky'),
- ('15', 'stein'),
- ]),
- 'horizon-common': OrderedDict([
- ('8', 'liberty'),
- ('9', 'mitaka'),
- ('10', 'newton'),
- ('11', 'ocata'),
- ('12', 'pike'),
- ('13', 'queens'),
- ('14', 'rocky'),
- ('15', 'stein'),
- ]),
- 'ceilometer-common': OrderedDict([
- ('5', 'liberty'),
- ('6', 'mitaka'),
- ('7', 'newton'),
- ('8', 'ocata'),
- ('9', 'pike'),
- ('10', 'queens'),
- ('11', 'rocky'),
- ('12', 'stein'),
- ]),
- 'heat-common': OrderedDict([
- ('5', 'liberty'),
- ('6', 'mitaka'),
- ('7', 'newton'),
- ('8', 'ocata'),
- ('9', 'pike'),
- ('10', 'queens'),
- ('11', 'rocky'),
- ('12', 'stein'),
- ]),
- 'glance-common': OrderedDict([
- ('11', 'liberty'),
- ('12', 'mitaka'),
- ('13', 'newton'),
- ('14', 'ocata'),
- ('15', 'pike'),
- ('16', 'queens'),
- ('17', 'rocky'),
- ('18', 'stein'),
- ]),
- 'openstack-dashboard': OrderedDict([
- ('8', 'liberty'),
- ('9', 'mitaka'),
- ('10', 'newton'),
- ('11', 'ocata'),
- ('12', 'pike'),
- ('13', 'queens'),
- ('14', 'rocky'),
- ('15', 'stein'),
- ]),
-}
-
-DEFAULT_LOOPBACK_SIZE = '5G'
-
-
-class CompareOpenStackReleases(BasicStringComparator):
- """Provide comparisons of OpenStack releases.
-
- Use in the form of
-
- if CompareOpenStackReleases(release) > 'mitaka':
- # do something with mitaka
- """
- _list = OPENSTACK_RELEASES
-
-
-def error_out(msg):
- juju_log("FATAL ERROR: %s" % msg, level='ERROR')
- sys.exit(1)
-
-
-def get_installed_semantic_versioned_packages():
- '''Get a list of installed packages which have OpenStack semantic versioning
-
- :returns List of installed packages
- :rtype: [pkg1, pkg2, ...]
- '''
- return filter_missing_packages(PACKAGE_CODENAMES.keys())
-
-
-def get_os_codename_install_source(src):
- '''Derive OpenStack release codename from a given installation source.'''
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- rel = ''
- if src is None:
- return rel
- if src in ['distro', 'distro-proposed', 'proposed']:
- try:
- rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
- except KeyError:
- e = 'Could not derive openstack release for '\
- 'this Ubuntu release: %s' % ubuntu_rel
- error_out(e)
- return rel
-
- if src.startswith('cloud:'):
- ca_rel = src.split(':')[1]
- ca_rel = ca_rel.split('-')[1].split('/')[0]
- return ca_rel
-
- # Best guess match based on deb string provided
- if (src.startswith('deb') or
- src.startswith('ppa') or
- src.startswith('snap')):
- for v in OPENSTACK_CODENAMES.values():
- if v in src:
- return v
-
-
-def get_os_version_install_source(src):
- codename = get_os_codename_install_source(src)
- return get_os_version_codename(codename)
-
-
-def get_os_codename_version(vers):
- '''Determine OpenStack codename from version number.'''
- try:
- return OPENSTACK_CODENAMES[vers]
- except KeyError:
- e = 'Could not determine OpenStack codename for version %s' % vers
- error_out(e)
-
-
-def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
- '''Determine OpenStack version number from codename.'''
- for k, v in six.iteritems(version_map):
- if v == codename:
- return k
- e = 'Could not derive OpenStack version for '\
- 'codename: %s' % codename
- error_out(e)
-
-
-def get_os_version_codename_swift(codename):
- '''Determine OpenStack version number of swift from codename.'''
- for k, v in six.iteritems(SWIFT_CODENAMES):
- if k == codename:
- return v[-1]
- e = 'Could not derive swift version for '\
- 'codename: %s' % codename
- error_out(e)
-
-
-def get_swift_codename(version):
- '''Determine OpenStack codename that corresponds to swift version.'''
- codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
-
- if len(codenames) > 1:
- # If more than one release codename contains this version we determine
- # the actual codename based on the highest available install source.
- for codename in reversed(codenames):
- releases = UBUNTU_OPENSTACK_RELEASE
- release = [k for k, v in six.iteritems(releases) if codename in v]
- ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
- if six.PY3:
- ret = ret.decode('UTF-8')
- if codename in ret or release[0] in ret:
- return codename
- elif len(codenames) == 1:
- return codenames[0]
-
- # NOTE: fallback - attempt to match with just major.minor version
- match = re.match(r'^(\d+)\.(\d+)', version)
- if match:
- major_minor_version = match.group(0)
- for codename, versions in six.iteritems(SWIFT_CODENAMES):
- for release_version in versions:
- if release_version.startswith(major_minor_version):
- return codename
-
- return None
-
-
-def get_os_codename_package(package, fatal=True):
- '''Derive OpenStack release codename from an installed package.'''
-
- if snap_install_requested():
- cmd = ['snap', 'list', package]
- try:
- out = subprocess.check_output(cmd)
- if six.PY3:
- out = out.decode('UTF-8')
- except subprocess.CalledProcessError:
- return None
- lines = out.split('\n')
- for line in lines:
- if package in line:
- # Second item in list is Version
- return line.split()[1]
-
- import apt_pkg as apt
-
- cache = apt_cache()
-
- try:
- pkg = cache[package]
- except Exception:
- if not fatal:
- return None
- # the package is unknown to the current apt cache.
- e = 'Could not determine version of package with no installation '\
- 'candidate: %s' % package
- error_out(e)
-
- if not pkg.current_ver:
- if not fatal:
- return None
- # package is known, but no version is currently installed.
- e = 'Could not determine version of uninstalled package: %s' % package
- error_out(e)
-
- vers = apt.upstream_version(pkg.current_ver.ver_str)
- if 'swift' in pkg.name:
- # Fully x.y.z match for swift versions
- match = re.match(r'^(\d+)\.(\d+)\.(\d+)', vers)
- else:
- # x.y match only for 20XX.X
- # and ignore patch level for other packages
- match = re.match(r'^(\d+)\.(\d+)', vers)
-
- if match:
- vers = match.group(0)
-
- # Generate a major version number for newer semantic
- # versions of openstack projects
- major_vers = vers.split('.')[0]
- # >= Liberty independent project versions
- if (package in PACKAGE_CODENAMES and
- major_vers in PACKAGE_CODENAMES[package]):
- return PACKAGE_CODENAMES[package][major_vers]
- else:
- # < Liberty co-ordinated project versions
- try:
- if 'swift' in pkg.name:
- return get_swift_codename(vers)
- else:
- return OPENSTACK_CODENAMES[vers]
- except KeyError:
- if not fatal:
- return None
- e = 'Could not determine OpenStack codename for version %s' % vers
- error_out(e)
-
-
-def get_os_version_package(pkg, fatal=True):
- '''Derive OpenStack version number from an installed package.'''
- codename = get_os_codename_package(pkg, fatal=fatal)
-
- if not codename:
- return None
-
- if 'swift' in pkg:
- vers_map = SWIFT_CODENAMES
- for cname, version in six.iteritems(vers_map):
- if cname == codename:
- return version[-1]
- else:
- vers_map = OPENSTACK_CODENAMES
- for version, cname in six.iteritems(vers_map):
- if cname == codename:
- return version
- # e = "Could not determine OpenStack version for package: %s" % pkg
- # error_out(e)
-
-
-# Module local cache variable for the os_release.
-_os_rel = None
-
-
-def reset_os_release():
- '''Unset the cached os_release version'''
- global _os_rel
- _os_rel = None
-
-
-def os_release(package, base='essex', reset_cache=False):
- '''
- Returns OpenStack release codename from a cached global.
-
- If reset_cache then unset the cached os_release version and return the
- freshly determined version.
-
- If the codename can not be determined from either an installed package or
- the installation source, the earliest release supported by the charm should
- be returned.
- '''
- global _os_rel
- if reset_cache:
- reset_os_release()
- if _os_rel:
- return _os_rel
- _os_rel = (
- get_os_codename_package(package, fatal=False) or
- get_os_codename_install_source(config('openstack-origin')) or
- base)
- return _os_rel
-
-
-@deprecate("moved to charmhelpers.fetch.import_key()", "2017-07", log=juju_log)
-def import_key(keyid):
- """Import a key, either ASCII armored, or a GPG key id.
-
- @param keyid: the key in ASCII armor format, or a GPG key id.
- @raises SystemExit() via sys.exit() on failure.
- """
- try:
- return fetch_import_key(keyid)
- except GPGKeyError as e:
- error_out("Could not import key: {}".format(str(e)))
-
-
-def get_source_and_pgp_key(source_and_key):
- """Look for a pgp key ID or ascii-armor key in the given input.
-
- :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is
- optional.
- :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id
- if there was no '|' in the source_and_key string.
- """
- try:
- source, key = source_and_key.split('|', 2)
- return source, key or None
- except ValueError:
- return source_and_key, None
-
-
-@deprecate("use charmhelpers.fetch.add_source() instead.",
- "2017-07", log=juju_log)
-def configure_installation_source(source_plus_key):
- """Configure an installation source.
-
- The functionality is provided by charmhelpers.fetch.add_source()
- The difference between the two functions is that add_source() signature
- requires the key to be passed directly, whereas this function passes an
- optional key by appending '|' to the end of the source specificiation
- 'source'.
-
- Another difference from add_source() is that the function calls sys.exit(1)
- if the configuration fails, whereas add_source() raises
- SourceConfigurationError(). Another difference, is that add_source()
- silently fails (with a juju_log command) if there is no matching source to
- configure, whereas this function fails with a sys.exit(1)
-
- :param source: String_plus_key -- see above for details.
-
- Note that the behaviour on error is to log the error to the juju log and
- then call sys.exit(1).
- """
- if source_plus_key.startswith('snap'):
- # Do nothing for snap installs
- return
- # extract the key if there is one, denoted by a '|' in the rel
- source, key = get_source_and_pgp_key(source_plus_key)
-
- # handle the ordinary sources via add_source
- try:
- fetch_add_source(source, key, fail_invalid=True)
- except SourceConfigError as se:
- error_out(str(se))
-
-
-def config_value_changed(option):
- """
- Determine if config value changed since last call to this function.
- """
- hook_data = unitdata.HookData()
- with hook_data():
- db = unitdata.kv()
- current = config(option)
- saved = db.get(option)
- db.set(option, current)
- if saved is None:
- return False
- return current != saved
-
-
-def save_script_rc(script_path="scripts/scriptrc", **env_vars):
- """
- Write an rc file in the charm-delivered directory containing
- exported environment variables provided by env_vars. Any charm scripts run
- outside the juju hook environment can source this scriptrc to obtain
- updated config information necessary to perform health checks or
- service changes.
- """
- juju_rc_path = "%s/%s" % (charm_dir(), script_path)
- if not os.path.exists(os.path.dirname(juju_rc_path)):
- os.mkdir(os.path.dirname(juju_rc_path))
- with open(juju_rc_path, 'wt') as rc_script:
- rc_script.write(
- "#!/bin/bash\n")
- [rc_script.write('export %s=%s\n' % (u, p))
- for u, p in six.iteritems(env_vars) if u != "script_path"]
-
-
-def openstack_upgrade_available(package):
- """
- Determines if an OpenStack upgrade is available from installation
- source, based on version of installed package.
-
- :param package: str: Name of installed package.
-
- :returns: bool: : Returns True if configured installation source offers
- a newer version of package.
- """
-
- import apt_pkg as apt
- src = config('openstack-origin')
- cur_vers = get_os_version_package(package)
- if not cur_vers:
- # The package has not been installed yet do not attempt upgrade
- return False
- if "swift" in package:
- codename = get_os_codename_install_source(src)
- avail_vers = get_os_version_codename_swift(codename)
- else:
- avail_vers = get_os_version_install_source(src)
- apt.init()
- return apt.version_compare(avail_vers, cur_vers) >= 1
-
-
-def ensure_block_device(block_device):
- '''
- Confirm block_device, create as loopback if necessary.
-
- :param block_device: str: Full path of block device to ensure.
-
- :returns: str: Full path of ensured block device.
- '''
- _none = ['None', 'none', None]
- if (block_device in _none):
- error_out('prepare_storage(): Missing required input: block_device=%s.'
- % block_device)
-
- if block_device.startswith('/dev/'):
- bdev = block_device
- elif block_device.startswith('/'):
- _bd = block_device.split('|')
- if len(_bd) == 2:
- bdev, size = _bd
- else:
- bdev = block_device
- size = DEFAULT_LOOPBACK_SIZE
- bdev = ensure_loopback_device(bdev, size)
- else:
- bdev = '/dev/%s' % block_device
-
- if not is_block_device(bdev):
- error_out('Failed to locate valid block device at %s' % bdev)
-
- return bdev
-
-
-def clean_storage(block_device):
- '''
- Ensures a block device is clean. That is:
- - unmounted
- - any lvm volume groups are deactivated
- - any lvm physical device signatures removed
- - partition table wiped
-
- :param block_device: str: Full path to block device to clean.
- '''
- for mp, d in mounts():
- if d == block_device:
- juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
- (d, mp), level=INFO)
- umount(mp, persist=True)
-
- if is_lvm_physical_volume(block_device):
- deactivate_lvm_volume_group(block_device)
- remove_lvm_physical_volume(block_device)
- else:
- zap_disk(block_device)
-
-
-is_ip = ip.is_ip
-ns_query = ip.ns_query
-get_host_ip = ip.get_host_ip
-get_hostname = ip.get_hostname
-
-
-def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
- mm_map = {}
- if os.path.isfile(mm_file):
- with open(mm_file, 'r') as f:
- mm_map = json.load(f)
- return mm_map
-
-
-def sync_db_with_multi_ipv6_addresses(database, database_user,
- relation_prefix=None):
- hosts = get_ipv6_addr(dynamic_only=False)
-
- if config('vip'):
- vips = config('vip').split()
- for vip in vips:
- if vip and is_ipv6(vip):
- hosts.append(vip)
-
- kwargs = {'database': database,
- 'username': database_user,
- 'hostname': json.dumps(hosts)}
-
- if relation_prefix:
- for key in list(kwargs.keys()):
- kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
- del kwargs[key]
-
- for rid in relation_ids('shared-db'):
- relation_set(relation_id=rid, **kwargs)
-
-
-def os_requires_version(ostack_release, pkg):
- """
- Decorator for hook to specify minimum supported release
- """
- def wrap(f):
- @wraps(f)
- def wrapped_f(*args):
- if os_release(pkg) < ostack_release:
- raise Exception("This hook is not supported on releases"
- " before %s" % ostack_release)
- f(*args)
- return wrapped_f
- return wrap
-
-
-def os_workload_status(configs, required_interfaces, charm_func=None):
- """
- Decorator to set workload status based on complete contexts
- """
- def wrap(f):
- @wraps(f)
- def wrapped_f(*args, **kwargs):
- # Run the original function first
- f(*args, **kwargs)
- # Set workload status now that contexts have been
- # acted on
- set_os_workload_status(configs, required_interfaces, charm_func)
- return wrapped_f
- return wrap
-
-
-def set_os_workload_status(configs, required_interfaces, charm_func=None,
- services=None, ports=None):
- """Set the state of the workload status for the charm.
-
- This calls _determine_os_workload_status() to get the new state, message
- and sets the status using status_set()
-
- @param configs: a templating.OSConfigRenderer() object
- @param required_interfaces: {generic: [specific, specific2, ...]}
- @param charm_func: a callable function that returns state, message. The
- signature is charm_func(configs) -> (state, message)
- @param services: list of strings OR dictionary specifying services/ports
- @param ports: OPTIONAL list of port numbers.
- @returns state, message: the new workload status, user message
- """
- state, message = _determine_os_workload_status(
- configs, required_interfaces, charm_func, services, ports)
- status_set(state, message)
-
-
-def _determine_os_workload_status(
- configs, required_interfaces, charm_func=None,
- services=None, ports=None):
- """Determine the state of the workload status for the charm.
-
- This function returns the new workload status for the charm based
- on the state of the interfaces, the paused state and whether the
- services are actually running and any specified ports are open.
-
- This checks:
-
- 1. if the unit should be paused, that it is actually paused. If so the
- state is 'maintenance' + message, else 'broken'.
- 2. that the interfaces/relations are complete. If they are not then
- it sets the state to either 'broken' or 'waiting' and an appropriate
- message.
- 3. If all the relation data is set, then it checks that the actual
- services really are running. If not it sets the state to 'broken'.
-
- If everything is okay then the state returns 'active'.
-
- @param configs: a templating.OSConfigRenderer() object
- @param required_interfaces: {generic: [specific, specific2, ...]}
- @param charm_func: a callable function that returns state, message. The
- signature is charm_func(configs) -> (state, message)
- @param services: list of strings OR dictionary specifying services/ports
- @param ports: OPTIONAL list of port numbers.
- @returns state, message: the new workload status, user message
- """
- state, message = _ows_check_if_paused(services, ports)
-
- if state is None:
- state, message = _ows_check_generic_interfaces(
- configs, required_interfaces)
-
- if state != 'maintenance' and charm_func:
- # _ows_check_charm_func() may modify the state, message
- state, message = _ows_check_charm_func(
- state, message, lambda: charm_func(configs))
-
- if state is None:
- state, message = _ows_check_services_running(services, ports)
-
- if state is None:
- state = 'active'
- message = "Unit is ready"
- juju_log(message, 'INFO')
-
- return state, message
-
-
-def _ows_check_if_paused(services=None, ports=None):
- """Check if the unit is supposed to be paused, and if so check that the
- services/ports (if passed) are actually stopped/not being listened to.
-
- If the unit isn't supposed to be paused, just return None, None
-
- If the unit is performing a series upgrade, return a message indicating
- this.
-
- @param services: OPTIONAL services spec or list of service names.
- @param ports: OPTIONAL list of port numbers.
- @returns state, message or None, None
- """
- if is_unit_upgrading_set():
- state, message = check_actually_paused(services=services,
- ports=ports)
- if state is None:
- # we're paused okay, so set maintenance and return
- state = "blocked"
- message = ("Ready for do-release-upgrade and reboot. "
- "Set complete when finished.")
- return state, message
-
- if is_unit_paused_set():
- state, message = check_actually_paused(services=services,
- ports=ports)
- if state is None:
- # we're paused okay, so set maintenance and return
- state = "maintenance"
- message = "Paused. Use 'resume' action to resume normal service."
- return state, message
- return None, None
-
-
-def _ows_check_generic_interfaces(configs, required_interfaces):
- """Check the complete contexts to determine the workload status.
-
- - Checks for missing or incomplete contexts
- - juju log details of missing required data.
- - determines the correct workload status
- - creates an appropriate message for status_set(...)
-
- if there are no problems then the function returns None, None
-
- @param configs: a templating.OSConfigRenderer() object
- @params required_interfaces: {generic_interface: [specific_interface], }
- @returns state, message or None, None
- """
- incomplete_rel_data = incomplete_relation_data(configs,
- required_interfaces)
- state = None
- message = None
- missing_relations = set()
- incomplete_relations = set()
-
- for generic_interface, relations_states in incomplete_rel_data.items():
- related_interface = None
- missing_data = {}
- # Related or not?
- for interface, relation_state in relations_states.items():
- if relation_state.get('related'):
- related_interface = interface
- missing_data = relation_state.get('missing_data')
- break
- # No relation ID for the generic_interface?
- if not related_interface:
- juju_log("{} relation is missing and must be related for "
- "functionality. ".format(generic_interface), 'WARN')
- state = 'blocked'
- missing_relations.add(generic_interface)
- else:
- # Relation ID eists but no related unit
- if not missing_data:
- # Edge case - relation ID exists but departings
- _hook_name = hook_name()
- if (('departed' in _hook_name or 'broken' in _hook_name) and
- related_interface in _hook_name):
- state = 'blocked'
- missing_relations.add(generic_interface)
- juju_log("{} relation's interface, {}, "
- "relationship is departed or broken "
- "and is required for functionality."
- "".format(generic_interface, related_interface),
- "WARN")
- # Normal case relation ID exists but no related unit
- # (joining)
- else:
- juju_log("{} relations's interface, {}, is related but has"
- " no units in the relation."
- "".format(generic_interface, related_interface),
- "INFO")
- # Related unit exists and data missing on the relation
- else:
- juju_log("{} relation's interface, {}, is related awaiting "
- "the following data from the relationship: {}. "
- "".format(generic_interface, related_interface,
- ", ".join(missing_data)), "INFO")
- if state != 'blocked':
- state = 'waiting'
- if generic_interface not in missing_relations:
- incomplete_relations.add(generic_interface)
-
- if missing_relations:
- message = "Missing relations: {}".format(", ".join(missing_relations))
- if incomplete_relations:
- message += "; incomplete relations: {}" \
- "".format(", ".join(incomplete_relations))
- state = 'blocked'
- elif incomplete_relations:
- message = "Incomplete relations: {}" \
- "".format(", ".join(incomplete_relations))
- state = 'waiting'
-
- return state, message
-
-
-def _ows_check_charm_func(state, message, charm_func_with_configs):
- """Run a custom check function for the charm to see if it wants to
- change the state. This is only run if not in 'maintenance' and
- tests to see if the new state is more important that the previous
- one determined by the interfaces/relations check.
-
- @param state: the previously determined state so far.
- @param message: the user orientated message so far.
- @param charm_func: a callable function that returns state, message
- @returns state, message strings.
- """
- if charm_func_with_configs:
- charm_state, charm_message = charm_func_with_configs()
- if (charm_state != 'active' and
- charm_state != 'unknown' and
- charm_state is not None):
- state = workload_state_compare(state, charm_state)
- if message:
- charm_message = charm_message.replace("Incomplete relations: ",
- "")
- message = "{}, {}".format(message, charm_message)
- else:
- message = charm_message
- return state, message
-
-
-def _ows_check_services_running(services, ports):
- """Check that the services that should be running are actually running
- and that any ports specified are being listened to.
-
- @param services: list of strings OR dictionary specifying services/ports
- @param ports: list of ports
- @returns state, message: strings or None, None
- """
- messages = []
- state = None
- if services is not None:
- services = _extract_services_list_helper(services)
- services_running, running = _check_running_services(services)
- if not all(running):
- messages.append(
- "Services not running that should be: {}"
- .format(", ".join(_filter_tuples(services_running, False))))
- state = 'blocked'
- # also verify that the ports that should be open are open
- # NB, that ServiceManager objects only OPTIONALLY have ports
- map_not_open, ports_open = (
- _check_listening_on_services_ports(services))
- if not all(ports_open):
- # find which service has missing ports. They are in service
- # order which makes it a bit easier.
- message_parts = {service: ", ".join([str(v) for v in open_ports])
- for service, open_ports in map_not_open.items()}
- message = ", ".join(
- ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
- messages.append(
- "Services with ports not open that should be: {}"
- .format(message))
- state = 'blocked'
-
- if ports is not None:
- # and we can also check ports which we don't know the service for
- ports_open, ports_open_bools = _check_listening_on_ports_list(ports)
- if not all(ports_open_bools):
- messages.append(
- "Ports which should be open, but are not: {}"
- .format(", ".join([str(p) for p, v in ports_open
- if not v])))
- state = 'blocked'
-
- if state is not None:
- message = "; ".join(messages)
- return state, message
-
- return None, None
-
-
-def _extract_services_list_helper(services):
- """Extract a OrderedDict of {service: [ports]} of the supplied services
- for use by the other functions.
-
- The services object can either be:
- - None : no services were passed (an empty dict is returned)
- - a list of strings
- - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- - An array of [{'service': service_name, ...}, ...]
-
- @param services: see above
- @returns OrderedDict(service: [ports], ...)
- """
- if services is None:
- return {}
- if isinstance(services, dict):
- services = services.values()
- # either extract the list of services from the dictionary, or if
- # it is a simple string, use that. i.e. works with mixed lists.
- _s = OrderedDict()
- for s in services:
- if isinstance(s, dict) and 'service' in s:
- _s[s['service']] = s.get('ports', [])
- if isinstance(s, str):
- _s[s] = []
- return _s
-
-
-def _check_running_services(services):
- """Check that the services dict provided is actually running and provide
- a list of (service, boolean) tuples for each service.
-
- Returns both a zipped list of (service, boolean) and a list of booleans
- in the same order as the services.
-
- @param services: OrderedDict of strings: [ports], one for each service to
- check.
- @returns [(service, boolean), ...], : results for checks
- [boolean] : just the result of the service checks
- """
- services_running = [service_running(s) for s in services]
- return list(zip(services, services_running)), services_running
-
-
-def _check_listening_on_services_ports(services, test=False):
- """Check that the unit is actually listening (has the port open) on the
- ports that the service specifies are open. If test is True then the
- function returns the services with ports that are open rather than
- closed.
-
- Returns an OrderedDict of service: ports and a list of booleans
-
- @param services: OrderedDict(service: [port, ...], ...)
- @param test: default=False, if False, test for closed, otherwise open.
- @returns OrderedDict(service: [port-not-open, ...]...), [boolean]
- """
- test = not(not(test)) # ensure test is True or False
- all_ports = list(itertools.chain(*services.values()))
- ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
- map_ports = OrderedDict()
- matched_ports = [p for p, opened in zip(all_ports, ports_states)
- if opened == test] # essentially opened xor test
- for service, ports in services.items():
- set_ports = set(ports).intersection(matched_ports)
- if set_ports:
- map_ports[service] = set_ports
- return map_ports, ports_states
-
-
-def _check_listening_on_ports_list(ports):
- """Check that the ports list given are being listened to
-
- Returns a list of ports being listened to and a list of the
- booleans.
-
- @param ports: LIST or port numbers.
- @returns [(port_num, boolean), ...], [boolean]
- """
- ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
- return zip(ports, ports_open), ports_open
-
-
-def _filter_tuples(services_states, state):
- """Return a simple list from a list of tuples according to the condition
-
- @param services_states: LIST of (string, boolean): service and running
- state.
- @param state: Boolean to match the tuple against.
- @returns [LIST of strings] that matched the tuple RHS.
- """
- return [s for s, b in services_states if b == state]
-
-
-def workload_state_compare(current_workload_state, workload_state):
- """ Return highest priority of two states"""
- hierarchy = {'unknown': -1,
- 'active': 0,
- 'maintenance': 1,
- 'waiting': 2,
- 'blocked': 3,
- }
-
- if hierarchy.get(workload_state) is None:
- workload_state = 'unknown'
- if hierarchy.get(current_workload_state) is None:
- current_workload_state = 'unknown'
-
- # Set workload_state based on hierarchy of statuses
- if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
- return current_workload_state
- else:
- return workload_state
-
-
-def incomplete_relation_data(configs, required_interfaces):
- """Check complete contexts against required_interfaces
- Return dictionary of incomplete relation data.
-
- configs is an OSConfigRenderer object with configs registered
-
- required_interfaces is a dictionary of required general interfaces
- with dictionary values of possible specific interfaces.
- Example:
- required_interfaces = {'database': ['shared-db', 'pgsql-db']}
-
- The interface is said to be satisfied if anyone of the interfaces in the
- list has a complete context.
-
- Return dictionary of incomplete or missing required contexts with relation
- status of interfaces and any missing data points. Example:
- {'message':
- {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
- 'zeromq-configuration': {'related': False}},
- 'identity':
- {'identity-service': {'related': False}},
- 'database':
- {'pgsql-db': {'related': False},
- 'shared-db': {'related': True}}}
- """
- complete_ctxts = configs.complete_contexts()
- incomplete_relations = [
- svc_type
- for svc_type, interfaces in required_interfaces.items()
- if not set(interfaces).intersection(complete_ctxts)]
- return {
- i: configs.get_incomplete_context_data(required_interfaces[i])
- for i in incomplete_relations}
-
-
-def do_action_openstack_upgrade(package, upgrade_callback, configs):
- """Perform action-managed OpenStack upgrade.
-
- Upgrades packages to the configured openstack-origin version and sets
- the corresponding action status as a result.
-
- If the charm was installed from source we cannot upgrade it.
- For backwards compatibility a config flag (action-managed-upgrade) must
- be set for this code to run, otherwise a full service level upgrade will
- fire on config-changed.
-
- @param package: package name for determining if upgrade available
- @param upgrade_callback: function callback to charm's upgrade function
- @param configs: templating object derived from OSConfigRenderer class
-
- @return: True if upgrade successful; False if upgrade failed or skipped
- """
- ret = False
-
- if openstack_upgrade_available(package):
- if config('action-managed-upgrade'):
- juju_log('Upgrading OpenStack release')
-
- try:
- upgrade_callback(configs=configs)
- action_set({'outcome': 'success, upgrade completed.'})
- ret = True
- except Exception:
- action_set({'outcome': 'upgrade failed, see traceback.'})
- action_set({'traceback': traceback.format_exc()})
- action_fail('do_openstack_upgrade resulted in an '
- 'unexpected error')
- else:
- action_set({'outcome': 'action-managed-upgrade config is '
- 'False, skipped upgrade.'})
- else:
- action_set({'outcome': 'no upgrade available.'})
-
- return ret
-
-
-def remote_restart(rel_name, remote_service=None):
- trigger = {
- 'restart-trigger': str(uuid.uuid4()),
- }
- if remote_service:
- trigger['remote-service'] = remote_service
- for rid in relation_ids(rel_name):
- # This subordinate can be related to two seperate services using
- # different subordinate relations so only issue the restart if
- # the principle is conencted down the relation we think it is
- if related_units(relid=rid):
- relation_set(relation_id=rid,
- relation_settings=trigger,
- )
-
-
-def check_actually_paused(services=None, ports=None):
- """Check that services listed in the services object and ports
- are actually closed (not listened to), to verify that the unit is
- properly paused.
-
- @param services: See _extract_services_list_helper
- @returns status, : string for status (None if okay)
- message : string for problem for status_set
- """
- state = None
- message = None
- messages = []
- if services is not None:
- services = _extract_services_list_helper(services)
- services_running, services_states = _check_running_services(services)
- if any(services_states):
- # there shouldn't be any running so this is a problem
- messages.append("these services running: {}"
- .format(", ".join(
- _filter_tuples(services_running, True))))
- state = "blocked"
- ports_open, ports_open_bools = (
- _check_listening_on_services_ports(services, True))
- if any(ports_open_bools):
- message_parts = {service: ", ".join([str(v) for v in open_ports])
- for service, open_ports in ports_open.items()}
- message = ", ".join(
- ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
- messages.append(
- "these service:ports are open: {}".format(message))
- state = 'blocked'
- if ports is not None:
- ports_open, bools = _check_listening_on_ports_list(ports)
- if any(bools):
- messages.append(
- "these ports which should be closed, but are open: {}"
- .format(", ".join([str(p) for p, v in ports_open if v])))
- state = 'blocked'
- if messages:
- message = ("Services should be paused but {}"
- .format(", ".join(messages)))
- return state, message
-
-
-def set_unit_paused():
- """Set the unit to a paused state in the local kv() store.
- This does NOT actually pause the unit
- """
- with unitdata.HookData()() as t:
- kv = t[0]
- kv.set('unit-paused', True)
-
-
-def clear_unit_paused():
- """Clear the unit from a paused state in the local kv() store
- This does NOT actually restart any services - it only clears the
- local state.
- """
- with unitdata.HookData()() as t:
- kv = t[0]
- kv.set('unit-paused', False)
-
-
-def is_unit_paused_set():
- """Return the state of the kv().get('unit-paused').
- This does NOT verify that the unit really is paused.
-
- To help with units that don't have HookData() (testing)
- if it excepts, return False
- """
- try:
- with unitdata.HookData()() as t:
- kv = t[0]
- # transform something truth-y into a Boolean.
- return not(not(kv.get('unit-paused')))
- except Exception:
- return False
-
-
-def manage_payload_services(action, services=None, charm_func=None):
- """Run an action against all services.
-
- An optional charm_func() can be called. It should raise an Exception to
- indicate that the function failed. If it was succesfull it should return
- None or an optional message.
-
- The signature for charm_func is:
- charm_func() -> message: str
-
- charm_func() is executed after any services are stopped, if supplied.
-
- The services object can either be:
- - None : no services were passed (an empty dict is returned)
- - a list of strings
- - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- - An array of [{'service': service_name, ...}, ...]
-
- :param action: Action to run: pause, resume, start or stop.
- :type action: str
- :param services: See above
- :type services: See above
- :param charm_func: function to run for custom charm pausing.
- :type charm_func: f()
- :returns: Status boolean and list of messages
- :rtype: (bool, [])
- :raises: RuntimeError
- """
- actions = {
- 'pause': service_pause,
- 'resume': service_resume,
- 'start': service_start,
- 'stop': service_stop}
- action = action.lower()
- if action not in actions.keys():
- raise RuntimeError(
- "action: {} must be one of: {}".format(action,
- ', '.join(actions.keys())))
- services = _extract_services_list_helper(services)
- messages = []
- success = True
- if services:
- for service in services.keys():
- rc = actions[action](service)
- if not rc:
- success = False
- messages.append("{} didn't {} cleanly.".format(service,
- action))
- if charm_func:
- try:
- message = charm_func()
- if message:
- messages.append(message)
- except Exception as e:
- success = False
- messages.append(str(e))
- return success, messages
-
-
-def pause_unit(assess_status_func, services=None, ports=None,
- charm_func=None):
- """Pause a unit by stopping the services and setting 'unit-paused'
- in the local kv() store.
-
- Also checks that the services have stopped and ports are no longer
- being listened to.
-
- An optional charm_func() can be called that can either raise an
- Exception or return non None, None to indicate that the unit
- didn't pause cleanly.
-
- The signature for charm_func is:
- charm_func() -> message: string
-
- charm_func() is executed after any services are stopped, if supplied.
-
- The services object can either be:
- - None : no services were passed (an empty dict is returned)
- - a list of strings
- - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- - An array of [{'service': service_name, ...}, ...]
-
- @param assess_status_func: (f() -> message: string | None) or None
- @param services: OPTIONAL see above
- @param ports: OPTIONAL list of port
- @param charm_func: function to run for custom charm pausing.
- @returns None
- @raises Exception(message) on an error for action_fail().
- """
- _, messages = manage_payload_services(
- 'pause',
- services=services,
- charm_func=charm_func)
- set_unit_paused()
- if assess_status_func:
- message = assess_status_func()
- if message:
- messages.append(message)
- if messages and not is_unit_upgrading_set():
- raise Exception("Couldn't pause: {}".format("; ".join(messages)))
-
-
-def resume_unit(assess_status_func, services=None, ports=None,
- charm_func=None):
- """Resume a unit by starting the services and clearning 'unit-paused'
- in the local kv() store.
-
- Also checks that the services have started and ports are being listened to.
-
- An optional charm_func() can be called that can either raise an
- Exception or return non None to indicate that the unit
- didn't resume cleanly.
-
- The signature for charm_func is:
- charm_func() -> message: string
-
- charm_func() is executed after any services are started, if supplied.
-
- The services object can either be:
- - None : no services were passed (an empty dict is returned)
- - a list of strings
- - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- - An array of [{'service': service_name, ...}, ...]
-
- @param assess_status_func: (f() -> message: string | None) or None
- @param services: OPTIONAL see above
- @param ports: OPTIONAL list of port
- @param charm_func: function to run for custom charm resuming.
- @returns None
- @raises Exception(message) on an error for action_fail().
- """
- _, messages = manage_payload_services(
- 'resume',
- services=services,
- charm_func=charm_func)
- clear_unit_paused()
- if assess_status_func:
- message = assess_status_func()
- if message:
- messages.append(message)
- if messages:
- raise Exception("Couldn't resume: {}".format("; ".join(messages)))
-
-
-def make_assess_status_func(*args, **kwargs):
- """Creates an assess_status_func() suitable for handing to pause_unit()
- and resume_unit().
-
- This uses the _determine_os_workload_status(...) function to determine
- what the workload_status should be for the unit. If the unit is
- not in maintenance or active states, then the message is returned to
- the caller. This is so an action that doesn't result in either a
- complete pause or complete resume can signal failure with an action_fail()
- """
- def _assess_status_func():
- state, message = _determine_os_workload_status(*args, **kwargs)
- status_set(state, message)
- if state not in ['maintenance', 'active']:
- return message
- return None
-
- return _assess_status_func
-
-
-def pausable_restart_on_change(restart_map, stopstart=False,
- restart_functions=None):
- """A restart_on_change decorator that checks to see if the unit is
- paused. If it is paused then the decorated function doesn't fire.
-
- This is provided as a helper, as the @restart_on_change(...) decorator
- is in core.host, yet the openstack specific helpers are in this file
- (contrib.openstack.utils). Thus, this needs to be an optional feature
- for openstack charms (or charms that wish to use the openstack
- pause/resume type features).
-
- It is used as follows:
-
- from contrib.openstack.utils import (
- pausable_restart_on_change as restart_on_change)
-
- @restart_on_change(restart_map, stopstart=)
- def some_hook(...):
- pass
-
- see core.utils.restart_on_change() for more details.
-
- Note restart_map can be a callable, in which case, restart_map is only
- evaluated at runtime. This means that it is lazy and the underlying
- function won't be called if the decorated function is never called. Note,
- retains backwards compatibility for passing a non-callable dictionary.
-
- @param f: the function to decorate
- @param restart_map: (optionally callable, which then returns the
- restart_map) the restart map {conf_file: [services]}
- @param stopstart: DEFAULT false; whether to stop, start or just restart
- @returns decorator to use a restart_on_change with pausability
- """
- def wrap(f):
- # py27 compatible nonlocal variable. When py3 only, replace with
- # nonlocal keyword
- __restart_map_cache = {'cache': None}
-
- @functools.wraps(f)
- def wrapped_f(*args, **kwargs):
- if is_unit_paused_set():
- return f(*args, **kwargs)
- if __restart_map_cache['cache'] is None:
- __restart_map_cache['cache'] = restart_map() \
- if callable(restart_map) else restart_map
- # otherwise, normal restart_on_change functionality
- return restart_on_change_helper(
- (lambda: f(*args, **kwargs)), __restart_map_cache['cache'],
- stopstart, restart_functions)
- return wrapped_f
- return wrap
-
-
-def ordered(orderme):
- """Converts the provided dictionary into a collections.OrderedDict.
-
- The items in the returned OrderedDict will be inserted based on the
- natural sort order of the keys. Nested dictionaries will also be sorted
- in order to ensure fully predictable ordering.
-
- :param orderme: the dict to order
- :return: collections.OrderedDict
- :raises: ValueError: if `orderme` isn't a dict instance.
- """
- if not isinstance(orderme, dict):
- raise ValueError('argument must be a dict type')
-
- result = OrderedDict()
- for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]):
- if isinstance(v, dict):
- result[k] = ordered(v)
- else:
- result[k] = v
-
- return result
-
-
-def config_flags_parser(config_flags):
- """Parses config flags string into dict.
-
- This parsing method supports a few different formats for the config
- flag values to be parsed:
-
- 1. A string in the simple format of key=value pairs, with the possibility
- of specifying multiple key value pairs within the same string. For
- example, a string in the format of 'key1=value1, key2=value2' will
- return a dict of:
-
- {'key1': 'value1', 'key2': 'value2'}.
-
- 2. A string in the above format, but supporting a comma-delimited list
- of values for the same key. For example, a string in the format of
- 'key1=value1, key2=value3,value4,value5' will return a dict of:
-
- {'key1': 'value1', 'key2': 'value2,value3,value4'}
-
- 3. A string containing a colon character (:) prior to an equal
- character (=) will be treated as yaml and parsed as such. This can be
- used to specify more complex key value pairs. For example,
- a string in the format of 'key1: subkey1=value1, subkey2=value2' will
- return a dict of:
-
- {'key1', 'subkey1=value1, subkey2=value2'}
-
- The provided config_flags string may be a list of comma-separated values
- which themselves may be comma-separated list of values.
- """
- # If we find a colon before an equals sign then treat it as yaml.
- # Note: limit it to finding the colon first since this indicates assignment
- # for inline yaml.
- colon = config_flags.find(':')
- equals = config_flags.find('=')
- if colon > 0:
- if colon < equals or equals < 0:
- return ordered(yaml.safe_load(config_flags))
-
- if config_flags.find('==') >= 0:
- juju_log("config_flags is not in expected format (key=value)",
- level=ERROR)
- raise OSContextError
-
- # strip the following from each value.
- post_strippers = ' ,'
- # we strip any leading/trailing '=' or ' ' from the string then
- # split on '='.
- split = config_flags.strip(' =').split('=')
- limit = len(split)
- flags = OrderedDict()
- for i in range(0, limit - 1):
- current = split[i]
- next = split[i + 1]
- vindex = next.rfind(',')
- if (i == limit - 2) or (vindex < 0):
- value = next
- else:
- value = next[:vindex]
-
- if i == 0:
- key = current
- else:
- # if this not the first entry, expect an embedded key.
- index = current.rfind(',')
- if index < 0:
- juju_log("Invalid config value(s) at index %s" % (i),
- level=ERROR)
- raise OSContextError
- key = current[index + 1:]
-
- # Add to collection.
- flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
-
- return flags
-
-
-def os_application_version_set(package):
- '''Set version of application for Juju 2.0 and later'''
- application_version = get_upstream_version(package)
- # NOTE(jamespage) if not able to figure out package version, fallback to
- # openstack codename version detection.
- if not application_version:
- application_version_set(os_release(package))
- else:
- application_version_set(application_version)
-
-
-def enable_memcache(source=None, release=None, package=None):
- """Determine if memcache should be enabled on the local unit
-
- @param release: release of OpenStack currently deployed
- @param package: package to derive OpenStack version deployed
- @returns boolean Whether memcache should be enabled
- """
- _release = None
- if release:
- _release = release
- else:
- _release = os_release(package, base='icehouse')
- if not _release:
- _release = get_os_codename_install_source(source)
-
- return CompareOpenStackReleases(_release) >= 'mitaka'
-
-
-def token_cache_pkgs(source=None, release=None):
- """Determine additional packages needed for token caching
-
- @param source: source string for charm
- @param release: release of OpenStack currently deployed
- @returns List of package to enable token caching
- """
- packages = []
- if enable_memcache(source=source, release=release):
- packages.extend(['memcached', 'python-memcache'])
- return packages
-
-
-def update_json_file(filename, items):
- """Updates the json `filename` with a given dict.
- :param filename: path to json file (e.g. /etc/glance/policy.json)
- :param items: dict of items to update
- """
- if not items:
- return
-
- with open(filename) as fd:
- policy = json.load(fd)
-
- # Compare before and after and if nothing has changed don't write the file
- # since that could cause unnecessary service restarts.
- before = json.dumps(policy, indent=4, sort_keys=True)
- policy.update(items)
- after = json.dumps(policy, indent=4, sort_keys=True)
- if before == after:
- return
-
- with open(filename, "w") as fd:
- fd.write(after)
-
-
-@cached
-def snap_install_requested():
- """ Determine if installing from snaps
-
- If openstack-origin is of the form snap:track/channel[/branch]
- and channel is in SNAPS_CHANNELS return True.
- """
- origin = config('openstack-origin') or ""
- if not origin.startswith('snap:'):
- return False
-
- _src = origin[5:]
- if '/' in _src:
- channel = _src.split('/')[1]
- else:
- # Handle snap:track with no channel
- channel = 'stable'
- return valid_snap_channel(channel)
-
-
-def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
- """Generate a dictionary of snap install information from origin
-
- @param snaps: List of snaps
- @param src: String of openstack-origin or source of the form
- snap:track/channel
- @param mode: String classic, devmode or jailmode
- @returns: Dictionary of snaps with channels and modes
- """
-
- if not src.startswith('snap:'):
- juju_log("Snap source is not a snap origin", 'WARN')
- return {}
-
- _src = src[5:]
- channel = '--channel={}'.format(_src)
-
- return {snap: {'channel': channel, 'mode': mode}
- for snap in snaps}
-
-
-def install_os_snaps(snaps, refresh=False):
- """Install OpenStack snaps from channel and with mode
-
- @param snaps: Dictionary of snaps with channels and modes of the form:
- {'snap_name': {'channel': 'snap_channel',
- 'mode': 'snap_mode'}}
- Where channel is a snapstore channel and mode is --classic, --devmode
- or --jailmode.
- @param post_snap_install: Callback function to run after snaps have been
- installed
- """
-
- def _ensure_flag(flag):
- if flag.startswith('--'):
- return flag
- return '--{}'.format(flag)
-
- if refresh:
- for snap in snaps.keys():
- snap_refresh(snap,
- _ensure_flag(snaps[snap]['channel']),
- _ensure_flag(snaps[snap]['mode']))
- else:
- for snap in snaps.keys():
- snap_install(snap,
- _ensure_flag(snaps[snap]['channel']),
- _ensure_flag(snaps[snap]['mode']))
-
-
-def set_unit_upgrading():
- """Set the unit to a upgrading state in the local kv() store.
- """
- with unitdata.HookData()() as t:
- kv = t[0]
- kv.set('unit-upgrading', True)
-
-
-def clear_unit_upgrading():
- """Clear the unit from a upgrading state in the local kv() store
- """
- with unitdata.HookData()() as t:
- kv = t[0]
- kv.set('unit-upgrading', False)
-
-
-def is_unit_upgrading_set():
- """Return the state of the kv().get('unit-upgrading').
-
- To help with units that don't have HookData() (testing)
- if it excepts, return False
- """
- try:
- with unitdata.HookData()() as t:
- kv = t[0]
- # transform something truth-y into a Boolean.
- return not(not(kv.get('unit-upgrading')))
- except Exception:
- return False
-
-
-def series_upgrade_prepare(pause_unit_helper=None, configs=None):
- """ Run common series upgrade prepare tasks.
-
- :param pause_unit_helper: function: Function to pause unit
- :param configs: OSConfigRenderer object: Configurations
- :returns None:
- """
- set_unit_upgrading()
- if pause_unit_helper and configs:
- if not is_unit_paused_set():
- pause_unit_helper(configs)
-
-
-def series_upgrade_complete(resume_unit_helper=None, configs=None):
- """ Run common series upgrade complete tasks.
-
- :param resume_unit_helper: function: Function to resume unit
- :param configs: OSConfigRenderer object: Configurations
- :returns None:
- """
- clear_unit_paused()
- clear_unit_upgrading()
- if configs:
- configs.write_all()
- if resume_unit_helper:
- resume_unit_helper(configs)
diff --git a/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/hooks/charmhelpers/contrib/openstack/vaultlocker.py
deleted file mode 100644
index a8e4bf8..0000000
--- a/hooks/charmhelpers/contrib/openstack/vaultlocker.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright 2018 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import json
-import os
-
-import charmhelpers.contrib.openstack.alternatives as alternatives
-import charmhelpers.contrib.openstack.context as context
-
-import charmhelpers.core.hookenv as hookenv
-import charmhelpers.core.host as host
-import charmhelpers.core.templating as templating
-import charmhelpers.core.unitdata as unitdata
-
-VAULTLOCKER_BACKEND = 'charm-vaultlocker'
-
-
-class VaultKVContext(context.OSContextGenerator):
- """Vault KV context for interaction with vault-kv interfaces"""
- interfaces = ['secrets-storage']
-
- def __init__(self, secret_backend=None):
- super(context.OSContextGenerator, self).__init__()
- self.secret_backend = (
- secret_backend or 'charm-{}'.format(hookenv.service_name())
- )
-
- def __call__(self):
- db = unitdata.kv()
- last_token = db.get('last-token')
- secret_id = db.get('secret-id')
- for relation_id in hookenv.relation_ids(self.interfaces[0]):
- for unit in hookenv.related_units(relation_id):
- data = hookenv.relation_get(unit=unit,
- rid=relation_id)
- vault_url = data.get('vault_url')
- role_id = data.get('{}_role_id'.format(hookenv.local_unit()))
- token = data.get('{}_token'.format(hookenv.local_unit()))
-
- if all([vault_url, role_id, token]):
- token = json.loads(token)
- vault_url = json.loads(vault_url)
-
- # Tokens may change when secret_id's are being
- # reissued - if so use token to get new secret_id
- if token != last_token:
- secret_id = retrieve_secret_id(
- url=vault_url,
- token=token
- )
- db.set('secret-id', secret_id)
- db.set('last-token', token)
- db.flush()
-
- ctxt = {
- 'vault_url': vault_url,
- 'role_id': json.loads(role_id),
- 'secret_id': secret_id,
- 'secret_backend': self.secret_backend,
- }
- vault_ca = data.get('vault_ca')
- if vault_ca:
- ctxt['vault_ca'] = json.loads(vault_ca)
- self.complete = True
- return ctxt
- return {}
-
-
-def write_vaultlocker_conf(context, priority=100):
- """Write vaultlocker configuration to disk and install alternative
-
- :param context: Dict of data from vault-kv relation
- :ptype: context: dict
- :param priority: Priority of alternative configuration
- :ptype: priority: int"""
- charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format(
- hookenv.service_name()
- )
- host.mkdir(os.path.dirname(charm_vl_path), perms=0o700)
- templating.render(source='vaultlocker.conf.j2',
- target=charm_vl_path,
- context=context, perms=0o600),
- alternatives.install_alternative('vaultlocker.conf',
- '/etc/vaultlocker/vaultlocker.conf',
- charm_vl_path, priority)
-
-
-def vault_relation_complete(backend=None):
- """Determine whether vault relation is complete
-
- :param backend: Name of secrets backend requested
- :ptype backend: string
- :returns: whether the relation to vault is complete
- :rtype: bool"""
- vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
- vault_kv()
- return vault_kv.complete
-
-
-# TODO: contrib a high level unwrap method to hvac that works
-def retrieve_secret_id(url, token):
- """Retrieve a response-wrapped secret_id from Vault
-
- :param url: URL to Vault Server
- :ptype url: str
- :param token: One shot Token to use
- :ptype token: str
- :returns: secret_id to use for Vault Access
- :rtype: str"""
- import hvac
- client = hvac.Client(url=url, token=token)
- response = client._post('/v1/sys/wrapping/unwrap')
- if response.status_code == 200:
- data = response.json()
- return data['data']['secret_id']
diff --git a/hooks/charmhelpers/contrib/python.py b/hooks/charmhelpers/contrib/python.py
deleted file mode 100644
index 84cba8c..0000000
--- a/hooks/charmhelpers/contrib/python.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2014-2019 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-# deprecated aliases for backwards compatibility
-from charmhelpers.fetch.python import debug # noqa
-from charmhelpers.fetch.python import packages # noqa
-from charmhelpers.fetch.python import rpdb # noqa
-from charmhelpers.fetch.python import version # noqa
diff --git a/hooks/charmhelpers/contrib/storage/__init__.py b/hooks/charmhelpers/contrib/storage/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/hooks/charmhelpers/contrib/storage/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/storage/linux/__init__.py b/hooks/charmhelpers/contrib/storage/linux/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/hooks/charmhelpers/contrib/storage/linux/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/storage/linux/bcache.py b/hooks/charmhelpers/contrib/storage/linux/bcache.py
deleted file mode 100644
index 605991e..0000000
--- a/hooks/charmhelpers/contrib/storage/linux/bcache.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright 2017 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import os
-import json
-
-from charmhelpers.core.hookenv import log
-
-stats_intervals = ['stats_day', 'stats_five_minute',
- 'stats_hour', 'stats_total']
-
-SYSFS = '/sys'
-
-
-class Bcache(object):
- """Bcache behaviour
- """
-
- def __init__(self, cachepath):
- self.cachepath = cachepath
-
- @classmethod
- def fromdevice(cls, devname):
- return cls('{}/block/{}/bcache'.format(SYSFS, devname))
-
- def __str__(self):
- return self.cachepath
-
- def get_stats(self, interval):
- """Get cache stats
- """
- intervaldir = 'stats_{}'.format(interval)
- path = "{}/{}".format(self.cachepath, intervaldir)
- out = dict()
- for elem in os.listdir(path):
- out[elem] = open('{}/{}'.format(path, elem)).read().strip()
- return out
-
-
-def get_bcache_fs():
- """Return all cache sets
- """
- cachesetroot = "{}/fs/bcache".format(SYSFS)
- try:
- dirs = os.listdir(cachesetroot)
- except OSError:
- log("No bcache fs found")
- return []
- cacheset = set([Bcache('{}/{}'.format(cachesetroot, d)) for d in dirs if not d.startswith('register')])
- return cacheset
-
-
-def get_stats_action(cachespec, interval):
- """Action for getting bcache statistics for a given cachespec.
- Cachespec can either be a device name, eg. 'sdb', which will retrieve
- cache stats for the given device, or 'global', which will retrieve stats
- for all cachesets
- """
- if cachespec == 'global':
- caches = get_bcache_fs()
- else:
- caches = [Bcache.fromdevice(cachespec)]
- res = dict((c.cachepath, c.get_stats(interval)) for c in caches)
- return json.dumps(res, indent=4, separators=(',', ': '))
diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py
deleted file mode 100644
index 2c62092..0000000
--- a/hooks/charmhelpers/contrib/storage/linux/ceph.py
+++ /dev/null
@@ -1,1562 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Copyright 2012 Canonical Ltd.
-#
-# This file is sourced from lp:openstack-charm-helpers
-#
-# Authors:
-# James Page
-# Adam Gandelman
-#
-
-import errno
-import hashlib
-import math
-import six
-
-import os
-import shutil
-import json
-import time
-import uuid
-
-from subprocess import (
- check_call,
- check_output,
- CalledProcessError,
-)
-from charmhelpers.core.hookenv import (
- config,
- service_name,
- local_unit,
- relation_get,
- relation_ids,
- relation_set,
- related_units,
- log,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
-)
-from charmhelpers.core.host import (
- mount,
- mounts,
- service_start,
- service_stop,
- service_running,
- umount,
- cmp_pkgrevno,
-)
-from charmhelpers.fetch import (
- apt_install,
-)
-from charmhelpers.core.unitdata import kv
-
-from charmhelpers.core.kernel import modprobe
-from charmhelpers.contrib.openstack.utils import config_flags_parser
-
-KEYRING = '/etc/ceph/ceph.client.{}.keyring'
-KEYFILE = '/etc/ceph/ceph.client.{}.key'
-
-CEPH_CONF = """[global]
-auth supported = {auth}
-keyring = {keyring}
-mon host = {mon_hosts}
-log to syslog = {use_syslog}
-err to syslog = {use_syslog}
-clog to syslog = {use_syslog}
-"""
-
-# The number of placement groups per OSD to target for placement group
-# calculations. This number is chosen as 100 due to the ceph PG Calc
-# documentation recommending to choose 100 for clusters which are not
-# expected to increase in the foreseeable future. Since the majority of the
-# calculations are done on deployment, target the case of non-expanding
-# clusters as the default.
-DEFAULT_PGS_PER_OSD_TARGET = 100
-DEFAULT_POOL_WEIGHT = 10.0
-LEGACY_PG_COUNT = 200
-DEFAULT_MINIMUM_PGS = 2
-
-
-def validator(value, valid_type, valid_range=None):
- """
- Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
- Example input:
- validator(value=1,
- valid_type=int,
- valid_range=[0, 2])
- This says I'm testing value=1. It must be an int inclusive in [0,2]
-
- :param value: The value to validate
- :param valid_type: The type that value should be.
- :param valid_range: A range of values that value can assume.
- :return:
- """
- assert isinstance(value, valid_type), "{} is not a {}".format(
- value,
- valid_type)
- if valid_range is not None:
- assert isinstance(valid_range, list), \
- "valid_range must be a list, was given {}".format(valid_range)
- # If we're dealing with strings
- if isinstance(value, six.string_types):
- assert value in valid_range, \
- "{} is not in the list {}".format(value, valid_range)
- # Integer, float should have a min and max
- else:
- if len(valid_range) != 2:
- raise ValueError(
- "Invalid valid_range list of {} for {}. "
- "List must be [min,max]".format(valid_range, value))
- assert value >= valid_range[0], \
- "{} is less than minimum allowed value of {}".format(
- value, valid_range[0])
- assert value <= valid_range[1], \
- "{} is greater than maximum allowed value of {}".format(
- value, valid_range[1])
-
-
-class PoolCreationError(Exception):
- """
- A custom error to inform the caller that a pool creation failed. Provides an error message
- """
-
- def __init__(self, message):
- super(PoolCreationError, self).__init__(message)
-
-
-class Pool(object):
- """
- An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
- Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
- """
-
- def __init__(self, service, name):
- self.service = service
- self.name = name
-
- # Create the pool if it doesn't exist already
- # To be implemented by subclasses
- def create(self):
- pass
-
- def add_cache_tier(self, cache_pool, mode):
- """
- Adds a new cache tier to an existing pool.
- :param cache_pool: six.string_types. The cache tier pool name to add.
- :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
- :return: None
- """
- # Check the input types and values
- validator(value=cache_pool, valid_type=six.string_types)
- validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
-
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
- check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
-
- def remove_cache_tier(self, cache_pool):
- """
- Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete.
- :param cache_pool: six.string_types. The cache tier pool name to remove.
- :return: None
- """
- # read-only is easy, writeback is much harder
- mode = get_cache_mode(self.service, cache_pool)
- if mode == 'readonly':
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
-
- elif mode == 'writeback':
- pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
- 'cache-mode', cache_pool, 'forward']
- if cmp_pkgrevno('ceph-common', '10.1') >= 0:
- # Jewel added a mandatory flag
- pool_forward_cmd.append('--yes-i-really-mean-it')
-
- check_call(pool_forward_cmd)
- # Flush the cache and wait for it to return
- check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
-
- def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT,
- device_class=None):
- """Return the number of placement groups to use when creating the pool.
-
- Returns the number of placement groups which should be specified when
- creating the pool. This is based upon the calculation guidelines
- provided by the Ceph Placement Group Calculator (located online at
- http://ceph.com/pgcalc/).
-
- The number of placement groups are calculated using the following:
-
- (Target PGs per OSD) * (OSD #) * (%Data)
- ----------------------------------------
- (Pool size)
-
- Per the upstream guidelines, the OSD # should really be considered
- based on the number of OSDs which are eligible to be selected by the
- pool. Since the pool creation doesn't specify any of CRUSH set rules,
- the default rule will be dependent upon the type of pool being
- created (replicated or erasure).
-
- This code makes no attempt to determine the number of OSDs which can be
- selected for the specific rule, rather it is left to the user to tune
- in the form of 'expected-osd-count' config option.
-
- :param pool_size: int. pool_size is either the number of replicas for
- replicated pools or the K+M sum for erasure coded pools
- :param percent_data: float. the percentage of data that is expected to
- be contained in the pool for the specific OSD set. Default value
- is to assume 10% of the data is for this pool, which is a
- relatively low % of the data but allows for the pg_num to be
- increased. NOTE: the default is primarily to handle the scenario
- where related charms requiring pools has not been upgraded to
- include an update to indicate their relative usage of the pools.
- :param device_class: str. class of storage to use for basis of pgs
- calculation; ceph supports nvme, ssd and hdd by default based
- on presence of devices of each type in the deployment.
- :return: int. The number of pgs to use.
- """
-
- # Note: This calculation follows the approach that is provided
- # by the Ceph PG Calculator located at http://ceph.com/pgcalc/.
- validator(value=pool_size, valid_type=int)
-
- # Ensure that percent data is set to something - even with a default
- # it can be set to None, which would wreak havoc below.
- if percent_data is None:
- percent_data = DEFAULT_POOL_WEIGHT
-
- # If the expected-osd-count is specified, then use the max between
- # the expected-osd-count and the actual osd_count
- osd_list = get_osds(self.service, device_class)
- expected = config('expected-osd-count') or 0
-
- if osd_list:
- if device_class:
- osd_count = len(osd_list)
- else:
- osd_count = max(expected, len(osd_list))
-
- # Log a message to provide some insight if the calculations claim
- # to be off because someone is setting the expected count and
- # there are more OSDs in reality. Try to make a proper guess
- # based upon the cluster itself.
- if not device_class and expected and osd_count != expected:
- log("Found more OSDs than provided expected count. "
- "Using the actual count instead", INFO)
- elif expected:
- # Use the expected-osd-count in older ceph versions to allow for
- # a more accurate pg calculations
- osd_count = expected
- else:
- # NOTE(james-page): Default to 200 for older ceph versions
- # which don't support OSD query from cli
- return LEGACY_PG_COUNT
-
- percent_data /= 100.0
- target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET
- num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size
-
- # NOTE: ensure a sane minimum number of PGS otherwise we don't get any
- # reasonable data distribution in minimal OSD configurations
- if num_pg < DEFAULT_MINIMUM_PGS:
- num_pg = DEFAULT_MINIMUM_PGS
-
- # The CRUSH algorithm has a slight optimization for placement groups
- # with powers of 2 so find the nearest power of 2. If the nearest
- # power of 2 is more than 25% below the original value, the next
- # highest value is used. To do this, find the nearest power of 2 such
- # that 2^n <= num_pg, check to see if its within the 25% tolerance.
- exponent = math.floor(math.log(num_pg, 2))
- nearest = 2 ** exponent
- if (num_pg - nearest) > (num_pg * 0.25):
- # Choose the next highest power of 2 since the nearest is more
- # than 25% below the original value.
- return int(nearest * 2)
- else:
- return int(nearest)
-
-
-class ReplicatedPool(Pool):
- def __init__(self, service, name, pg_num=None, replicas=2,
- percent_data=10.0, app_name=None):
- super(ReplicatedPool, self).__init__(service=service, name=name)
- self.replicas = replicas
- if pg_num:
- # Since the number of placement groups were specified, ensure
- # that there aren't too many created.
- max_pgs = self.get_pgs(self.replicas, 100.0)
- self.pg_num = min(pg_num, max_pgs)
- else:
- self.pg_num = self.get_pgs(self.replicas, percent_data)
- if app_name:
- self.app_name = app_name
- else:
- self.app_name = 'unknown'
-
- def create(self):
- if not pool_exists(self.service, self.name):
- # Create it
- cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
- self.name, str(self.pg_num)]
- try:
- check_call(cmd)
- # Set the pool replica size
- update_pool(client=self.service,
- pool=self.name,
- settings={'size': str(self.replicas)})
- try:
- set_app_name_for_pool(client=self.service,
- pool=self.name,
- name=self.app_name)
- except CalledProcessError:
- log('Could not set app name for pool {}'.format(self.name, level=WARNING))
- except CalledProcessError:
- raise
-
-
-# Default jerasure erasure coded pool
-class ErasurePool(Pool):
- def __init__(self, service, name, erasure_code_profile="default",
- percent_data=10.0, app_name=None):
- super(ErasurePool, self).__init__(service=service, name=name)
- self.erasure_code_profile = erasure_code_profile
- self.percent_data = percent_data
- if app_name:
- self.app_name = app_name
- else:
- self.app_name = 'unknown'
-
- def create(self):
- if not pool_exists(self.service, self.name):
- # Try to find the erasure profile information in order to properly
- # size the number of placement groups. The size of an erasure
- # coded placement group is calculated as k+m.
- erasure_profile = get_erasure_profile(self.service,
- self.erasure_code_profile)
-
- # Check for errors
- if erasure_profile is None:
- msg = ("Failed to discover erasure profile named "
- "{}".format(self.erasure_code_profile))
- log(msg, level=ERROR)
- raise PoolCreationError(msg)
- if 'k' not in erasure_profile or 'm' not in erasure_profile:
- # Error
- msg = ("Unable to find k (data chunks) or m (coding chunks) "
- "in erasure profile {}".format(erasure_profile))
- log(msg, level=ERROR)
- raise PoolCreationError(msg)
-
- k = int(erasure_profile['k'])
- m = int(erasure_profile['m'])
- pgs = self.get_pgs(k + m, self.percent_data)
- # Create it
- cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
- self.name, str(pgs), str(pgs),
- 'erasure', self.erasure_code_profile]
- try:
- check_call(cmd)
- try:
- set_app_name_for_pool(client=self.service,
- pool=self.name,
- name=self.app_name)
- except CalledProcessError:
- log('Could not set app name for pool {}'.format(self.name, level=WARNING))
- except CalledProcessError:
- raise
-
- """Get an existing erasure code profile if it already exists.
- Returns json formatted output"""
-
-
-def get_mon_map(service):
- """
- Returns the current monitor map.
- :param service: six.string_types. The Ceph user name to run the command under
- :return: json string. :raise: ValueError if the monmap fails to parse.
- Also raises CalledProcessError if our ceph command fails
- """
- try:
- mon_status = check_output(['ceph', '--id', service,
- 'mon_status', '--format=json'])
- if six.PY3:
- mon_status = mon_status.decode('UTF-8')
- try:
- return json.loads(mon_status)
- except ValueError as v:
- log("Unable to parse mon_status json: {}. Error: {}"
- .format(mon_status, str(v)))
- raise
- except CalledProcessError as e:
- log("mon_status command failed with message: {}"
- .format(str(e)))
- raise
-
-
-def hash_monitor_names(service):
- """
- Uses the get_mon_map() function to get information about the monitor
- cluster.
- Hash the name of each monitor. Return a sorted list of monitor hashes
- in an ascending order.
- :param service: six.string_types. The Ceph user name to run the command under
- :rtype : dict. json dict of monitor name, ip address and rank
- example: {
- 'name': 'ip-172-31-13-165',
- 'rank': 0,
- 'addr': '172.31.13.165:6789/0'}
- """
- try:
- hash_list = []
- monitor_list = get_mon_map(service=service)
- if monitor_list['monmap']['mons']:
- for mon in monitor_list['monmap']['mons']:
- hash_list.append(
- hashlib.sha224(mon['name'].encode('utf-8')).hexdigest())
- return sorted(hash_list)
- else:
- return None
- except (ValueError, CalledProcessError):
- raise
-
-
-def monitor_key_delete(service, key):
- """
- Delete a key and value pair from the monitor cluster
- :param service: six.string_types. The Ceph user name to run the command under
- Deletes a key value pair on the monitor cluster.
- :param key: six.string_types. The key to delete.
- """
- try:
- check_output(
- ['ceph', '--id', service,
- 'config-key', 'del', str(key)])
- except CalledProcessError as e:
- log("Monitor config-key put failed with message: {}".format(
- e.output))
- raise
-
-
-def monitor_key_set(service, key, value):
- """
- Sets a key value pair on the monitor cluster.
- :param service: six.string_types. The Ceph user name to run the command under
- :param key: six.string_types. The key to set.
- :param value: The value to set. This will be converted to a string
- before setting
- """
- try:
- check_output(
- ['ceph', '--id', service,
- 'config-key', 'put', str(key), str(value)])
- except CalledProcessError as e:
- log("Monitor config-key put failed with message: {}".format(
- e.output))
- raise
-
-
-def monitor_key_get(service, key):
- """
- Gets the value of an existing key in the monitor cluster.
- :param service: six.string_types. The Ceph user name to run the command under
- :param key: six.string_types. The key to search for.
- :return: Returns the value of that key or None if not found.
- """
- try:
- output = check_output(
- ['ceph', '--id', service,
- 'config-key', 'get', str(key)]).decode('UTF-8')
- return output
- except CalledProcessError as e:
- log("Monitor config-key get failed with message: {}".format(
- e.output))
- return None
-
-
-def monitor_key_exists(service, key):
- """
- Searches for the existence of a key in the monitor cluster.
- :param service: six.string_types. The Ceph user name to run the command under
- :param key: six.string_types. The key to search for
- :return: Returns True if the key exists, False if not and raises an
- exception if an unknown error occurs. :raise: CalledProcessError if
- an unknown error occurs
- """
- try:
- check_call(
- ['ceph', '--id', service,
- 'config-key', 'exists', str(key)])
- # I can return true here regardless because Ceph returns
- # ENOENT if the key wasn't found
- return True
- except CalledProcessError as e:
- if e.returncode == errno.ENOENT:
- return False
- else:
- log("Unknown error from ceph config-get exists: {} {}".format(
- e.returncode, e.output))
- raise
-
-
-def get_erasure_profile(service, name):
- """
- :param service: six.string_types. The Ceph user name to run the command under
- :param name:
- :return:
- """
- try:
- out = check_output(['ceph', '--id', service,
- 'osd', 'erasure-code-profile', 'get',
- name, '--format=json'])
- if six.PY3:
- out = out.decode('UTF-8')
- return json.loads(out)
- except (CalledProcessError, OSError, ValueError):
- return None
-
-
-def pool_set(service, pool_name, key, value):
- """
- Sets a value for a RADOS pool in ceph.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :param key: six.string_types
- :param value:
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key,
- str(value).lower()]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def snapshot_pool(service, pool_name, snapshot_name):
- """
- Snapshots a RADOS pool in ceph.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :param snapshot_name: six.string_types
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def remove_pool_snapshot(service, pool_name, snapshot_name):
- """
- Remove a snapshot from a RADOS pool in ceph.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :param snapshot_name: six.string_types
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None):
- """
- :param service: The Ceph user name to run the command under
- :type service: str
- :param pool_name: Name of pool
- :type pool_name: str
- :param max_bytes: Maximum bytes quota to apply
- :type max_bytes: int
- :param max_objects: Maximum objects quota to apply
- :type max_objects: int
- :raises: subprocess.CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name]
- if max_bytes:
- cmd = cmd + ['max_bytes', str(max_bytes)]
- if max_objects:
- cmd = cmd + ['max_objects', str(max_objects)]
- check_call(cmd)
-
-
-def remove_pool_quota(service, pool_name):
- """
- Set a byte quota on a RADOS pool in ceph.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def remove_erasure_profile(service, profile_name):
- """
- Create a new erasure code profile if one does not already exist for it. Updates
- the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
- for more details
- :param service: six.string_types. The Ceph user name to run the command under
- :param profile_name: six.string_types
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm',
- profile_name]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
- failure_domain='host',
- data_chunks=2, coding_chunks=1,
- locality=None, durability_estimator=None,
- device_class=None):
- """
- Create a new erasure code profile if one does not already exist for it. Updates
- the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
- for more details
- :param service: six.string_types. The Ceph user name to run the command under
- :param profile_name: six.string_types
- :param erasure_plugin_name: six.string_types
- :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region',
- 'room', 'root', 'row'])
- :param data_chunks: int
- :param coding_chunks: int
- :param locality: int
- :param durability_estimator: int
- :param device_class: six.string_types
- :return: None. Can raise CalledProcessError
- """
- # Ensure this failure_domain is allowed by Ceph
- validator(failure_domain, six.string_types,
- ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
-
- cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
- 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks)
- ]
- if locality is not None and durability_estimator is not None:
- raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
-
- luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
- # failure_domain changed in luminous
- if luminous_or_later:
- cmd.append('crush-failure-domain=' + failure_domain)
- else:
- cmd.append('ruleset-failure-domain=' + failure_domain)
-
- # device class new in luminous
- if luminous_or_later and device_class:
- cmd.append('crush-device-class={}'.format(device_class))
- else:
- log('Skipping device class configuration (ceph < 12.0.0)',
- level=DEBUG)
-
- # Add plugin specific information
- if locality is not None:
- # For local erasure codes
- cmd.append('l=' + str(locality))
- if durability_estimator is not None:
- # For Shec erasure codes
- cmd.append('c=' + str(durability_estimator))
-
- if erasure_profile_exists(service, profile_name):
- cmd.append('--force')
-
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def rename_pool(service, old_name, new_name):
- """
- Rename a Ceph pool from old_name to new_name
- :param service: six.string_types. The Ceph user name to run the command under
- :param old_name: six.string_types
- :param new_name: six.string_types
- :return: None
- """
- validator(value=old_name, valid_type=six.string_types)
- validator(value=new_name, valid_type=six.string_types)
-
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
- check_call(cmd)
-
-
-def erasure_profile_exists(service, name):
- """
- Check to see if an Erasure code profile already exists.
- :param service: six.string_types. The Ceph user name to run the command under
- :param name: six.string_types
- :return: int or None
- """
- validator(value=name, valid_type=six.string_types)
- try:
- check_call(['ceph', '--id', service,
- 'osd', 'erasure-code-profile', 'get',
- name])
- return True
- except CalledProcessError:
- return False
-
-
-def get_cache_mode(service, pool_name):
- """
- Find the current caching mode of the pool_name given.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :return: int or None
- """
- validator(value=service, valid_type=six.string_types)
- validator(value=pool_name, valid_type=six.string_types)
- out = check_output(['ceph', '--id', service,
- 'osd', 'dump', '--format=json'])
- if six.PY3:
- out = out.decode('UTF-8')
- try:
- osd_json = json.loads(out)
- for pool in osd_json['pools']:
- if pool['pool_name'] == pool_name:
- return pool['cache_mode']
- return None
- except ValueError:
- raise
-
-
-def pool_exists(service, name):
- """Check to see if a RADOS pool already exists."""
- try:
- out = check_output(['rados', '--id', service, 'lspools'])
- if six.PY3:
- out = out.decode('UTF-8')
- except CalledProcessError:
- return False
-
- return name in out.split()
-
-
-def get_osds(service, device_class=None):
- """Return a list of all Ceph Object Storage Daemons currently in the
- cluster (optionally filtered by storage device class).
-
- :param device_class: Class of storage device for OSD's
- :type device_class: str
- """
- luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
- if luminous_or_later and device_class:
- out = check_output(['ceph', '--id', service,
- 'osd', 'crush', 'class',
- 'ls-osd', device_class,
- '--format=json'])
- else:
- out = check_output(['ceph', '--id', service,
- 'osd', 'ls',
- '--format=json'])
- if six.PY3:
- out = out.decode('UTF-8')
- return json.loads(out)
-
-
-def install():
- """Basic Ceph client installation."""
- ceph_dir = "/etc/ceph"
- if not os.path.exists(ceph_dir):
- os.mkdir(ceph_dir)
-
- apt_install('ceph-common', fatal=True)
-
-
-def rbd_exists(service, pool, rbd_img):
- """Check to see if a RADOS block device exists."""
- try:
- out = check_output(['rbd', 'list', '--id',
- service, '--pool', pool])
- if six.PY3:
- out = out.decode('UTF-8')
- except CalledProcessError:
- return False
-
- return rbd_img in out
-
-
-def create_rbd_image(service, pool, image, sizemb):
- """Create a new RADOS block device."""
- cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
- '--pool', pool]
- check_call(cmd)
-
-
-def update_pool(client, pool, settings):
- cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
- for k, v in six.iteritems(settings):
- cmd.append(k)
- cmd.append(v)
-
- check_call(cmd)
-
-
-def set_app_name_for_pool(client, pool, name):
- """
- Calls `osd pool application enable` for the specified pool name
-
- :param client: Name of the ceph client to use
- :type client: str
- :param pool: Pool to set app name for
- :type pool: str
- :param name: app name for the specified pool
- :type name: str
-
- :raises: CalledProcessError if ceph call fails
- """
- if cmp_pkgrevno('ceph-common', '12.0.0') >= 0:
- cmd = ['ceph', '--id', client, 'osd', 'pool',
- 'application', 'enable', pool, name]
- check_call(cmd)
-
-
-def create_pool(service, name, replicas=3, pg_num=None):
- """Create a new RADOS pool."""
- if pool_exists(service, name):
- log("Ceph pool {} already exists, skipping creation".format(name),
- level=WARNING)
- return
-
- if not pg_num:
- # Calculate the number of placement groups based
- # on upstream recommended best practices.
- osds = get_osds(service)
- if osds:
- pg_num = (len(osds) * 100 // replicas)
- else:
- # NOTE(james-page): Default to 200 for older ceph versions
- # which don't support OSD query from cli
- pg_num = 200
-
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
- check_call(cmd)
-
- update_pool(service, name, settings={'size': str(replicas)})
-
-
-def delete_pool(service, name):
- """Delete a RADOS pool from ceph."""
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
- '--yes-i-really-really-mean-it']
- check_call(cmd)
-
-
-def _keyfile_path(service):
- return KEYFILE.format(service)
-
-
-def _keyring_path(service):
- return KEYRING.format(service)
-
-
-def add_key(service, key):
- """
- Add a key to a keyring.
-
- Creates the keyring if it doesn't already exist.
-
- Logs and returns if the key is already in the keyring.
- """
- keyring = _keyring_path(service)
- if os.path.exists(keyring):
- with open(keyring, 'r') as ring:
- if key in ring.read():
- log('Ceph keyring exists at %s and has not changed.' % keyring,
- level=DEBUG)
- return
- log('Updating existing keyring %s.' % keyring, level=DEBUG)
-
- cmd = ['ceph-authtool', keyring, '--create-keyring',
- '--name=client.{}'.format(service), '--add-key={}'.format(key)]
- check_call(cmd)
- log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
-
-
-def create_keyring(service, key):
- """Deprecated. Please use the more accurately named 'add_key'"""
- return add_key(service, key)
-
-
-def delete_keyring(service):
- """Delete an existing Ceph keyring."""
- keyring = _keyring_path(service)
- if not os.path.exists(keyring):
- log('Keyring does not exist at %s' % keyring, level=WARNING)
- return
-
- os.remove(keyring)
- log('Deleted ring at %s.' % keyring, level=INFO)
-
-
-def create_key_file(service, key):
- """Create a file containing key."""
- keyfile = _keyfile_path(service)
- if os.path.exists(keyfile):
- log('Keyfile exists at %s.' % keyfile, level=WARNING)
- return
-
- with open(keyfile, 'w') as fd:
- fd.write(key)
-
- log('Created new keyfile at %s.' % keyfile, level=INFO)
-
-
-def get_ceph_nodes(relation='ceph'):
- """Query named relation to determine current nodes."""
- hosts = []
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- hosts.append(relation_get('private-address', unit=unit, rid=r_id))
-
- return hosts
-
-
-def configure(service, key, auth, use_syslog):
- """Perform basic configuration of Ceph."""
- add_key(service, key)
- create_key_file(service, key)
- hosts = get_ceph_nodes()
- with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
- ceph_conf.write(CEPH_CONF.format(auth=auth,
- keyring=_keyring_path(service),
- mon_hosts=",".join(map(str, hosts)),
- use_syslog=use_syslog))
- modprobe('rbd')
-
-
-def image_mapped(name):
- """Determine whether a RADOS block device is mapped locally."""
- try:
- out = check_output(['rbd', 'showmapped'])
- if six.PY3:
- out = out.decode('UTF-8')
- except CalledProcessError:
- return False
-
- return name in out
-
-
-def map_block_storage(service, pool, image):
- """Map a RADOS block device for local use."""
- cmd = [
- 'rbd',
- 'map',
- '{}/{}'.format(pool, image),
- '--user',
- service,
- '--secret',
- _keyfile_path(service),
- ]
- check_call(cmd)
-
-
-def filesystem_mounted(fs):
- """Determine whether a filesytems is already mounted."""
- return fs in [f for f, m in mounts()]
-
-
-def make_filesystem(blk_device, fstype='ext4', timeout=10):
- """Make a new filesystem on the specified block device."""
- count = 0
- e_noent = os.errno.ENOENT
- while not os.path.exists(blk_device):
- if count >= timeout:
- log('Gave up waiting on block device %s' % blk_device,
- level=ERROR)
- raise IOError(e_noent, os.strerror(e_noent), blk_device)
-
- log('Waiting for block device %s to appear' % blk_device,
- level=DEBUG)
- count += 1
- time.sleep(1)
- else:
- log('Formatting block device %s as filesystem %s.' %
- (blk_device, fstype), level=INFO)
- check_call(['mkfs', '-t', fstype, blk_device])
-
-
-def place_data_on_block_device(blk_device, data_src_dst):
- """Migrate data in data_src_dst to blk_device and then remount."""
- # mount block device into /mnt
- mount(blk_device, '/mnt')
- # copy data to /mnt
- copy_files(data_src_dst, '/mnt')
- # umount block device
- umount('/mnt')
- # Grab user/group ID's from original source
- _dir = os.stat(data_src_dst)
- uid = _dir.st_uid
- gid = _dir.st_gid
- # re-mount where the data should originally be
- # TODO: persist is currently a NO-OP in core.host
- mount(blk_device, data_src_dst, persist=True)
- # ensure original ownership of new mount.
- os.chown(data_src_dst, uid, gid)
-
-
-def copy_files(src, dst, symlinks=False, ignore=None):
- """Copy files from src to dst."""
- for item in os.listdir(src):
- s = os.path.join(src, item)
- d = os.path.join(dst, item)
- if os.path.isdir(s):
- shutil.copytree(s, d, symlinks, ignore)
- else:
- shutil.copy2(s, d)
-
-
-def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
- blk_device, fstype, system_services=[],
- replicas=3):
- """NOTE: This function must only be called from a single service unit for
- the same rbd_img otherwise data loss will occur.
-
- Ensures given pool and RBD image exists, is mapped to a block device,
- and the device is formatted and mounted at the given mount_point.
-
- If formatting a device for the first time, data existing at mount_point
- will be migrated to the RBD device before being re-mounted.
-
- All services listed in system_services will be stopped prior to data
- migration and restarted when complete.
- """
- # Ensure pool, RBD image, RBD mappings are in place.
- if not pool_exists(service, pool):
- log('Creating new pool {}.'.format(pool), level=INFO)
- create_pool(service, pool, replicas=replicas)
-
- if not rbd_exists(service, pool, rbd_img):
- log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
- create_rbd_image(service, pool, rbd_img, sizemb)
-
- if not image_mapped(rbd_img):
- log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
- level=INFO)
- map_block_storage(service, pool, rbd_img)
-
- # make file system
- # TODO: What happens if for whatever reason this is run again and
- # the data is already in the rbd device and/or is mounted??
- # When it is mounted already, it will fail to make the fs
- # XXX: This is really sketchy! Need to at least add an fstab entry
- # otherwise this hook will blow away existing data if its executed
- # after a reboot.
- if not filesystem_mounted(mount_point):
- make_filesystem(blk_device, fstype)
-
- for svc in system_services:
- if service_running(svc):
- log('Stopping services {} prior to migrating data.'
- .format(svc), level=DEBUG)
- service_stop(svc)
-
- place_data_on_block_device(blk_device, mount_point)
-
- for svc in system_services:
- log('Starting service {} after migrating data.'
- .format(svc), level=DEBUG)
- service_start(svc)
-
-
-def ensure_ceph_keyring(service, user=None, group=None,
- relation='ceph', key=None):
- """Ensures a ceph keyring is created for a named service and optionally
- ensures user and group ownership.
-
- @returns boolean: Flag to indicate whether a key was successfully written
- to disk based on either relation data or a supplied key
- """
- if not key:
- for rid in relation_ids(relation):
- for unit in related_units(rid):
- key = relation_get('key', rid=rid, unit=unit)
- if key:
- break
-
- if not key:
- return False
-
- add_key(service=service, key=key)
- keyring = _keyring_path(service)
- if user and group:
- check_call(['chown', '%s.%s' % (user, group), keyring])
-
- return True
-
-
-class CephBrokerRq(object):
- """Ceph broker request.
-
- Multiple operations can be added to a request and sent to the Ceph broker
- to be executed.
-
- Request is json-encoded for sending over the wire.
-
- The API is versioned and defaults to version 1.
- """
-
- def __init__(self, api_version=1, request_id=None):
- self.api_version = api_version
- if request_id:
- self.request_id = request_id
- else:
- self.request_id = str(uuid.uuid1())
- self.ops = []
-
- def add_op_request_access_to_group(self, name, namespace=None,
- permission=None, key_name=None,
- object_prefix_permissions=None):
- """
- Adds the requested permissions to the current service's Ceph key,
- allowing the key to access only the specified pools or
- object prefixes. object_prefix_permissions should be a dictionary
- keyed on the permission with the corresponding value being a list
- of prefixes to apply that permission to.
- {
- 'rwx': ['prefix1', 'prefix2'],
- 'class-read': ['prefix3']}
- """
- self.ops.append({
- 'op': 'add-permissions-to-key', 'group': name,
- 'namespace': namespace,
- 'name': key_name or service_name(),
- 'group-permission': permission,
- 'object-prefix-permissions': object_prefix_permissions})
-
- def add_op_create_pool(self, name, replica_count=3, pg_num=None,
- weight=None, group=None, namespace=None,
- app_name=None, max_bytes=None, max_objects=None):
- """DEPRECATED: Use ``add_op_create_replicated_pool()`` or
- ``add_op_create_erasure_pool()`` instead.
- """
- return self.add_op_create_replicated_pool(
- name, replica_count=replica_count, pg_num=pg_num, weight=weight,
- group=group, namespace=namespace, app_name=app_name,
- max_bytes=max_bytes, max_objects=max_objects)
-
- def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None,
- weight=None, group=None, namespace=None,
- app_name=None, max_bytes=None,
- max_objects=None):
- """Adds an operation to create a replicated pool.
-
- :param name: Name of pool to create
- :type name: str
- :param replica_count: Number of copies Ceph should keep of your data.
- :type replica_count: int
- :param pg_num: Request specific number of Placement Groups to create
- for pool.
- :type pg_num: int
- :param weight: The percentage of data that is expected to be contained
- in the pool from the total available space on the OSDs.
- Used to calculate number of Placement Groups to create
- for pool.
- :type weight: float
- :param group: Group to add pool to
- :type group: str
- :param namespace: Group namespace
- :type namespace: str
- :param app_name: (Optional) Tag pool with application name. Note that
- there is certain protocols emerging upstream with
- regard to meaningful application names to use.
- Examples are ``rbd`` and ``rgw``.
- :type app_name: str
- :param max_bytes: Maximum bytes quota to apply
- :type max_bytes: int
- :param max_objects: Maximum objects quota to apply
- :type max_objects: int
- """
- if pg_num and weight:
- raise ValueError('pg_num and weight are mutually exclusive')
-
- self.ops.append({'op': 'create-pool', 'name': name,
- 'replicas': replica_count, 'pg_num': pg_num,
- 'weight': weight, 'group': group,
- 'group-namespace': namespace, 'app-name': app_name,
- 'max-bytes': max_bytes, 'max-objects': max_objects})
-
- def add_op_create_erasure_pool(self, name, erasure_profile=None,
- weight=None, group=None, app_name=None,
- max_bytes=None, max_objects=None):
- """Adds an operation to create a erasure coded pool.
-
- :param name: Name of pool to create
- :type name: str
- :param erasure_profile: Name of erasure code profile to use. If not
- set the ceph-mon unit handling the broker
- request will set its default value.
- :type erasure_profile: str
- :param weight: The percentage of data that is expected to be contained
- in the pool from the total available space on the OSDs.
- :type weight: float
- :param group: Group to add pool to
- :type group: str
- :param app_name: (Optional) Tag pool with application name. Note that
- there is certain protocols emerging upstream with
- regard to meaningful application names to use.
- Examples are ``rbd`` and ``rgw``.
- :type app_name: str
- :param max_bytes: Maximum bytes quota to apply
- :type max_bytes: int
- :param max_objects: Maximum objects quota to apply
- :type max_objects: int
- """
- self.ops.append({'op': 'create-pool', 'name': name,
- 'pool-type': 'erasure',
- 'erasure-profile': erasure_profile,
- 'weight': weight,
- 'group': group, 'app-name': app_name,
- 'max-bytes': max_bytes, 'max-objects': max_objects})
-
- def set_ops(self, ops):
- """Set request ops to provided value.
-
- Useful for injecting ops that come from a previous request
- to allow comparisons to ensure validity.
- """
- self.ops = ops
-
- @property
- def request(self):
- return json.dumps({'api-version': self.api_version, 'ops': self.ops,
- 'request-id': self.request_id})
-
- def _ops_equal(self, other):
- if len(self.ops) == len(other.ops):
- for req_no in range(0, len(self.ops)):
- for key in [
- 'replicas', 'name', 'op', 'pg_num', 'weight',
- 'group', 'group-namespace', 'group-permission',
- 'object-prefix-permissions']:
- if self.ops[req_no].get(key) != other.ops[req_no].get(key):
- return False
- else:
- return False
- return True
-
- def __eq__(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.api_version == other.api_version and \
- self._ops_equal(other):
- return True
- else:
- return False
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
-
-class CephBrokerRsp(object):
- """Ceph broker response.
-
- Response is json-decoded and contents provided as methods/properties.
-
- The API is versioned and defaults to version 1.
- """
-
- def __init__(self, encoded_rsp):
- self.api_version = None
- self.rsp = json.loads(encoded_rsp)
-
- @property
- def request_id(self):
- return self.rsp.get('request-id')
-
- @property
- def exit_code(self):
- return self.rsp.get('exit-code')
-
- @property
- def exit_msg(self):
- return self.rsp.get('stderr')
-
-
-# Ceph Broker Conversation:
-# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
-# and send that request to ceph via the ceph relation. The CephBrokerRq has a
-# unique id so that the client can identity which CephBrokerRsp is associated
-# with the request. Ceph will also respond to each client unit individually
-# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
-# via key broker-rsp-glance-0
-#
-# To use this the charm can just do something like:
-#
-# from charmhelpers.contrib.storage.linux.ceph import (
-# send_request_if_needed,
-# is_request_complete,
-# CephBrokerRq,
-# )
-#
-# @hooks.hook('ceph-relation-changed')
-# def ceph_changed():
-# rq = CephBrokerRq()
-# rq.add_op_create_pool(name='poolname', replica_count=3)
-#
-# if is_request_complete(rq):
-#
-# else:
-# send_request_if_needed(get_ceph_request())
-#
-# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
-# of glance having sent a request to ceph which ceph has successfully processed
-# 'ceph:8': {
-# 'ceph/0': {
-# 'auth': 'cephx',
-# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
-# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
-# 'ceph-public-address': '10.5.44.103',
-# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
-# 'private-address': '10.5.44.103',
-# },
-# 'glance/0': {
-# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
-# '"ops": [{"replicas": 3, "name": "glance", '
-# '"op": "create-pool"}]}'),
-# 'private-address': '10.5.44.109',
-# },
-# }
-
-def get_previous_request(rid):
- """Return the last ceph broker request sent on a given relation
-
- @param rid: Relation id to query for request
- """
- request = None
- broker_req = relation_get(attribute='broker_req', rid=rid,
- unit=local_unit())
- if broker_req:
- request_data = json.loads(broker_req)
- request = CephBrokerRq(api_version=request_data['api-version'],
- request_id=request_data['request-id'])
- request.set_ops(request_data['ops'])
-
- return request
-
-
-def get_request_states(request, relation='ceph'):
- """Return a dict of requests per relation id with their corresponding
- completion state.
-
- This allows a charm, which has a request for ceph, to see whether there is
- an equivalent request already being processed and if so what state that
- request is in.
-
- @param request: A CephBrokerRq object
- """
- complete = []
- requests = {}
- for rid in relation_ids(relation):
- complete = False
- previous_request = get_previous_request(rid)
- if request == previous_request:
- sent = True
- complete = is_request_complete_for_rid(previous_request, rid)
- else:
- sent = False
- complete = False
-
- requests[rid] = {
- 'sent': sent,
- 'complete': complete,
- }
-
- return requests
-
-
-def is_request_sent(request, relation='ceph'):
- """Check to see if a functionally equivalent request has already been sent
-
- Returns True if a similair request has been sent
-
- @param request: A CephBrokerRq object
- """
- states = get_request_states(request, relation=relation)
- for rid in states.keys():
- if not states[rid]['sent']:
- return False
-
- return True
-
-
-def is_request_complete(request, relation='ceph'):
- """Check to see if a functionally equivalent request has already been
- completed
-
- Returns True if a similair request has been completed
-
- @param request: A CephBrokerRq object
- """
- states = get_request_states(request, relation=relation)
- for rid in states.keys():
- if not states[rid]['complete']:
- return False
-
- return True
-
-
-def is_request_complete_for_rid(request, rid):
- """Check if a given request has been completed on the given relation
-
- @param request: A CephBrokerRq object
- @param rid: Relation ID
- """
- broker_key = get_broker_rsp_key()
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- if rdata.get(broker_key):
- rsp = CephBrokerRsp(rdata.get(broker_key))
- if rsp.request_id == request.request_id:
- if not rsp.exit_code:
- return True
- else:
- # The remote unit sent no reply targeted at this unit so either the
- # remote ceph cluster does not support unit targeted replies or it
- # has not processed our request yet.
- if rdata.get('broker_rsp'):
- request_data = json.loads(rdata['broker_rsp'])
- if request_data.get('request-id'):
- log('Ignoring legacy broker_rsp without unit key as remote '
- 'service supports unit specific replies', level=DEBUG)
- else:
- log('Using legacy broker_rsp as remote service does not '
- 'supports unit specific replies', level=DEBUG)
- rsp = CephBrokerRsp(rdata['broker_rsp'])
- if not rsp.exit_code:
- return True
-
- return False
-
-
-def get_broker_rsp_key():
- """Return broker response key for this unit
-
- This is the key that ceph is going to use to pass request status
- information back to this unit
- """
- return 'broker-rsp-' + local_unit().replace('/', '-')
-
-
-def send_request_if_needed(request, relation='ceph'):
- """Send broker request if an equivalent request has not already been sent
-
- @param request: A CephBrokerRq object
- """
- if is_request_sent(request, relation=relation):
- log('Request already sent but not complete, not sending new request',
- level=DEBUG)
- else:
- for rid in relation_ids(relation):
- log('Sending request {}'.format(request.request_id), level=DEBUG)
- relation_set(relation_id=rid, broker_req=request.request)
-
-
-def is_broker_action_done(action, rid=None, unit=None):
- """Check whether broker action has completed yet.
-
- @param action: name of action to be performed
- @returns True if action complete otherwise False
- """
- rdata = relation_get(rid, unit) or {}
- broker_rsp = rdata.get(get_broker_rsp_key())
- if not broker_rsp:
- return False
-
- rsp = CephBrokerRsp(broker_rsp)
- unit_name = local_unit().partition('/')[2]
- key = "unit_{}_ceph_broker_action.{}".format(unit_name, action)
- kvstore = kv()
- val = kvstore.get(key=key)
- if val and val == rsp.request_id:
- return True
-
- return False
-
-
-def mark_broker_action_done(action, rid=None, unit=None):
- """Mark action as having been completed.
-
- @param action: name of action to be performed
- @returns None
- """
- rdata = relation_get(rid, unit) or {}
- broker_rsp = rdata.get(get_broker_rsp_key())
- if not broker_rsp:
- return
-
- rsp = CephBrokerRsp(broker_rsp)
- unit_name = local_unit().partition('/')[2]
- key = "unit_{}_ceph_broker_action.{}".format(unit_name, action)
- kvstore = kv()
- kvstore.set(key=key, value=rsp.request_id)
- kvstore.flush()
-
-
-class CephConfContext(object):
- """Ceph config (ceph.conf) context.
-
- Supports user-provided Ceph configuration settings. Use can provide a
- dictionary as the value for the config-flags charm option containing
- Ceph configuration settings keyede by their section in ceph.conf.
- """
- def __init__(self, permitted_sections=None):
- self.permitted_sections = permitted_sections or []
-
- def __call__(self):
- conf = config('config-flags')
- if not conf:
- return {}
-
- conf = config_flags_parser(conf)
- if not isinstance(conf, dict):
- log("Provided config-flags is not a dictionary - ignoring",
- level=WARNING)
- return {}
-
- permitted = self.permitted_sections
- if permitted:
- diff = set(conf.keys()).difference(set(permitted))
- if diff:
- log("Config-flags contains invalid keys '%s' - they will be "
- "ignored" % (', '.join(diff)), level=WARNING)
-
- ceph_conf = {}
- for key in conf:
- if permitted and key not in permitted:
- log("Ignoring key '%s'" % key, level=WARNING)
- continue
-
- ceph_conf[key] = conf[key]
-
- return ceph_conf
diff --git a/hooks/charmhelpers/contrib/storage/linux/loopback.py b/hooks/charmhelpers/contrib/storage/linux/loopback.py
deleted file mode 100644
index 82472ff..0000000
--- a/hooks/charmhelpers/contrib/storage/linux/loopback.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import re
-from subprocess import (
- check_call,
- check_output,
-)
-
-import six
-
-
-##################################################
-# loopback device helpers.
-##################################################
-def loopback_devices():
- '''
- Parse through 'losetup -a' output to determine currently mapped
- loopback devices. Output is expected to look like:
-
- /dev/loop0: [0807]:961814 (/tmp/my.img)
-
- :returns: dict: a dict mapping {loopback_dev: backing_file}
- '''
- loopbacks = {}
- cmd = ['losetup', '-a']
- output = check_output(cmd)
- if six.PY3:
- output = output.decode('utf-8')
- devs = [d.strip().split(' ') for d in output.splitlines() if d != '']
- for dev, _, f in devs:
- loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0]
- return loopbacks
-
-
-def create_loopback(file_path):
- '''
- Create a loopback device for a given backing file.
-
- :returns: str: Full path to new loopback device (eg, /dev/loop0)
- '''
- file_path = os.path.abspath(file_path)
- check_call(['losetup', '--find', file_path])
- for d, f in six.iteritems(loopback_devices()):
- if f == file_path:
- return d
-
-
-def ensure_loopback_device(path, size):
- '''
- Ensure a loopback device exists for a given backing file path and size.
- If it a loopback device is not mapped to file, a new one will be created.
-
- TODO: Confirm size of found loopback device.
-
- :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
- '''
- for d, f in six.iteritems(loopback_devices()):
- if f == path:
- return d
-
- if not os.path.exists(path):
- cmd = ['truncate', '--size', size, path]
- check_call(cmd)
-
- return create_loopback(path)
-
-
-def is_mapped_loopback_device(device):
- """
- Checks if a given device name is an existing/mapped loopback device.
- :param device: str: Full path to the device (eg, /dev/loop1).
- :returns: str: Path to the backing file if is a loopback device
- empty string otherwise
- """
- return loopback_devices().get(device, "")
diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py
deleted file mode 100644
index c8bde69..0000000
--- a/hooks/charmhelpers/contrib/storage/linux/lvm.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import functools
-from subprocess import (
- CalledProcessError,
- check_call,
- check_output,
- Popen,
- PIPE,
-)
-
-
-##################################################
-# LVM helpers.
-##################################################
-def deactivate_lvm_volume_group(block_device):
- '''
- Deactivate any volume gruop associated with an LVM physical volume.
-
- :param block_device: str: Full path to LVM physical volume
- '''
- vg = list_lvm_volume_group(block_device)
- if vg:
- cmd = ['vgchange', '-an', vg]
- check_call(cmd)
-
-
-def is_lvm_physical_volume(block_device):
- '''
- Determine whether a block device is initialized as an LVM PV.
-
- :param block_device: str: Full path of block device to inspect.
-
- :returns: boolean: True if block device is a PV, False if not.
- '''
- try:
- check_output(['pvdisplay', block_device])
- return True
- except CalledProcessError:
- return False
-
-
-def remove_lvm_physical_volume(block_device):
- '''
- Remove LVM PV signatures from a given block device.
-
- :param block_device: str: Full path of block device to scrub.
- '''
- p = Popen(['pvremove', '-ff', block_device],
- stdin=PIPE)
- p.communicate(input='y\n')
-
-
-def list_lvm_volume_group(block_device):
- '''
- List LVM volume group associated with a given block device.
-
- Assumes block device is a valid LVM PV.
-
- :param block_device: str: Full path of block device to inspect.
-
- :returns: str: Name of volume group associated with block device or None
- '''
- vg = None
- pvd = check_output(['pvdisplay', block_device]).splitlines()
- for lvm in pvd:
- lvm = lvm.decode('UTF-8')
- if lvm.strip().startswith('VG Name'):
- vg = ' '.join(lvm.strip().split()[2:])
- return vg
-
-
-def create_lvm_physical_volume(block_device):
- '''
- Initialize a block device as an LVM physical volume.
-
- :param block_device: str: Full path of block device to initialize.
-
- '''
- check_call(['pvcreate', block_device])
-
-
-def create_lvm_volume_group(volume_group, block_device):
- '''
- Create an LVM volume group backed by a given block device.
-
- Assumes block device has already been initialized as an LVM PV.
-
- :param volume_group: str: Name of volume group to create.
- :block_device: str: Full path of PV-initialized block device.
- '''
- check_call(['vgcreate', volume_group, block_device])
-
-
-def list_logical_volumes(select_criteria=None, path_mode=False):
- '''
- List logical volumes
-
- :param select_criteria: str: Limit list to those volumes matching this
- criteria (see 'lvs -S help' for more details)
- :param path_mode: bool: return logical volume name in 'vg/lv' format, this
- format is required for some commands like lvextend
- :returns: [str]: List of logical volumes
- '''
- lv_diplay_attr = 'lv_name'
- if path_mode:
- # Parsing output logic relies on the column order
- lv_diplay_attr = 'vg_name,' + lv_diplay_attr
- cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings']
- if select_criteria:
- cmd.extend(['--select', select_criteria])
- lvs = []
- for lv in check_output(cmd).decode('UTF-8').splitlines():
- if not lv:
- continue
- if path_mode:
- lvs.append('/'.join(lv.strip().split()))
- else:
- lvs.append(lv.strip())
- return lvs
-
-
-list_thin_logical_volume_pools = functools.partial(
- list_logical_volumes,
- select_criteria='lv_attr =~ ^t')
-
-list_thin_logical_volumes = functools.partial(
- list_logical_volumes,
- select_criteria='lv_attr =~ ^V')
-
-
-def extend_logical_volume_by_device(lv_name, block_device):
- '''
- Extends the size of logical volume lv_name by the amount of free space on
- physical volume block_device.
-
- :param lv_name: str: name of logical volume to be extended (vg/lv format)
- :param block_device: str: name of block_device to be allocated to lv_name
- '''
- cmd = ['lvextend', lv_name, block_device]
- check_call(cmd)
-
-
-def create_logical_volume(lv_name, volume_group, size=None):
- '''
- Create a new logical volume in an existing volume group
-
- :param lv_name: str: name of logical volume to be created.
- :param volume_group: str: Name of volume group to use for the new volume.
- :param size: str: Size of logical volume to create (100% if not supplied)
- :raises subprocess.CalledProcessError: in the event that the lvcreate fails.
- '''
- if size:
- check_call([
- 'lvcreate',
- '--yes',
- '-L',
- '{}'.format(size),
- '-n', lv_name, volume_group
- ])
- # create the lv with all the space available, this is needed because the
- # system call is different for LVM
- else:
- check_call([
- 'lvcreate',
- '--yes',
- '-l',
- '100%FREE',
- '-n', lv_name, volume_group
- ])
diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py
deleted file mode 100644
index c57aaf3..0000000
--- a/hooks/charmhelpers/contrib/storage/linux/utils.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import re
-from stat import S_ISBLK
-
-from subprocess import (
- CalledProcessError,
- check_call,
- check_output,
- call
-)
-
-
-def _luks_uuid(dev):
- """
- Check to see if dev is a LUKS encrypted volume, returning the UUID
- of volume if it is.
-
- :param: dev: path to block device to check.
- :returns: str. UUID of LUKS device or None if not a LUKS device
- """
- try:
- cmd = ['cryptsetup', 'luksUUID', dev]
- return check_output(cmd).decode('UTF-8').strip()
- except CalledProcessError:
- return None
-
-
-def is_luks_device(dev):
- """
- Determine if dev is a LUKS-formatted block device.
-
- :param: dev: A full path to a block device to check for LUKS header
- presence
- :returns: boolean: indicates whether a device is used based on LUKS header.
- """
- return True if _luks_uuid(dev) else False
-
-
-def is_mapped_luks_device(dev):
- """
- Determine if dev is a mapped LUKS device
- :param: dev: A full path to a block device to be checked
- :returns: boolean: indicates whether a device is mapped
- """
- _, dirs, _ = next(os.walk(
- '/sys/class/block/{}/holders/'
- .format(os.path.basename(os.path.realpath(dev))))
- )
- is_held = len(dirs) > 0
- return is_held and is_luks_device(dev)
-
-
-def is_block_device(path):
- '''
- Confirm device at path is a valid block device node.
-
- :returns: boolean: True if path is a block device, False if not.
- '''
- if not os.path.exists(path):
- return False
- return S_ISBLK(os.stat(path).st_mode)
-
-
-def zap_disk(block_device):
- '''
- Clear a block device of partition table. Relies on sgdisk, which is
- installed as pat of the 'gdisk' package in Ubuntu.
-
- :param block_device: str: Full path of block device to clean.
- '''
- # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
- # sometimes sgdisk exits non-zero; this is OK, dd will clean up
- call(['sgdisk', '--zap-all', '--', block_device])
- call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
- dev_end = check_output(['blockdev', '--getsz',
- block_device]).decode('UTF-8')
- gpt_end = int(dev_end.split()[0]) - 100
- check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
- 'bs=1M', 'count=1'])
- check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
- 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
-
-
-def is_device_mounted(device):
- '''Given a device path, return True if that device is mounted, and False
- if it isn't.
-
- :param device: str: Full path of the device to check.
- :returns: boolean: True if the path represents a mounted device, False if
- it doesn't.
- '''
- try:
- out = check_output(['lsblk', '-P', device]).decode('UTF-8')
- except Exception:
- return False
- return bool(re.search(r'MOUNTPOINT=".+"', out))
-
-
-def mkfs_xfs(device, force=False):
- """Format device with XFS filesystem.
-
- By default this should fail if the device already has a filesystem on it.
- :param device: Full path to device to format
- :ptype device: tr
- :param force: Force operation
- :ptype: force: boolean"""
- cmd = ['mkfs.xfs']
- if force:
- cmd.append("-f")
-
- cmd += ['-i', 'size=1024', device]
- check_call(cmd)
diff --git a/hooks/charmhelpers/core/__init__.py b/hooks/charmhelpers/core/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/hooks/charmhelpers/core/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/hooks/charmhelpers/core/decorators.py b/hooks/charmhelpers/core/decorators.py
deleted file mode 100644
index 6ad41ee..0000000
--- a/hooks/charmhelpers/core/decorators.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Copyright 2014 Canonical Ltd.
-#
-# Authors:
-# Edward Hope-Morley
-#
-
-import time
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-
-
-def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
- """If the decorated function raises exception exc_type, allow num_retries
- retry attempts before raise the exception.
- """
- def _retry_on_exception_inner_1(f):
- def _retry_on_exception_inner_2(*args, **kwargs):
- retries = num_retries
- multiplier = 1
- while True:
- try:
- return f(*args, **kwargs)
- except exc_type:
- if not retries:
- raise
-
- delay = base_delay * multiplier
- multiplier += 1
- log("Retrying '%s' %d more times (delay=%s)" %
- (f.__name__, retries, delay), level=INFO)
- retries -= 1
- if delay:
- time.sleep(delay)
-
- return _retry_on_exception_inner_2
-
- return _retry_on_exception_inner_1
diff --git a/hooks/charmhelpers/core/files.py b/hooks/charmhelpers/core/files.py
deleted file mode 100644
index fdd82b7..0000000
--- a/hooks/charmhelpers/core/files.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-__author__ = 'Jorge Niedbalski '
-
-import os
-import subprocess
-
-
-def sed(filename, before, after, flags='g'):
- """
- Search and replaces the given pattern on filename.
-
- :param filename: relative or absolute file path.
- :param before: expression to be replaced (see 'man sed')
- :param after: expression to replace with (see 'man sed')
- :param flags: sed-compatible regex flags in example, to make
- the search and replace case insensitive, specify ``flags="i"``.
- The ``g`` flag is always specified regardless, so you do not
- need to remember to include it when overriding this parameter.
- :returns: If the sed command exit code was zero then return,
- otherwise raise CalledProcessError.
- """
- expression = r's/{0}/{1}/{2}'.format(before,
- after, flags)
-
- return subprocess.check_call(["sed", "-i", "-r", "-e",
- expression,
- os.path.expanduser(filename)])
diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py
deleted file mode 100644
index d9fa915..0000000
--- a/hooks/charmhelpers/core/fstab.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import io
-import os
-
-__author__ = 'Jorge Niedbalski R. '
-
-
-class Fstab(io.FileIO):
- """This class extends file in order to implement a file reader/writer
- for file `/etc/fstab`
- """
-
- class Entry(object):
- """Entry class represents a non-comment line on the `/etc/fstab` file
- """
- def __init__(self, device, mountpoint, filesystem,
- options, d=0, p=0):
- self.device = device
- self.mountpoint = mountpoint
- self.filesystem = filesystem
-
- if not options:
- options = "defaults"
-
- self.options = options
- self.d = int(d)
- self.p = int(p)
-
- def __eq__(self, o):
- return str(self) == str(o)
-
- def __str__(self):
- return "{} {} {} {} {} {}".format(self.device,
- self.mountpoint,
- self.filesystem,
- self.options,
- self.d,
- self.p)
-
- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
-
- def __init__(self, path=None):
- if path:
- self._path = path
- else:
- self._path = self.DEFAULT_PATH
- super(Fstab, self).__init__(self._path, 'rb+')
-
- def _hydrate_entry(self, line):
- # NOTE: use split with no arguments to split on any
- # whitespace including tabs
- return Fstab.Entry(*filter(
- lambda x: x not in ('', None),
- line.strip("\n").split()))
-
- @property
- def entries(self):
- self.seek(0)
- for line in self.readlines():
- line = line.decode('us-ascii')
- try:
- if line.strip() and not line.strip().startswith("#"):
- yield self._hydrate_entry(line)
- except ValueError:
- pass
-
- def get_entry_by_attr(self, attr, value):
- for entry in self.entries:
- e_attr = getattr(entry, attr)
- if e_attr == value:
- return entry
- return None
-
- def add_entry(self, entry):
- if self.get_entry_by_attr('device', entry.device):
- return False
-
- self.write((str(entry) + '\n').encode('us-ascii'))
- self.truncate()
- return entry
-
- def remove_entry(self, entry):
- self.seek(0)
-
- lines = [l.decode('us-ascii') for l in self.readlines()]
-
- found = False
- for index, line in enumerate(lines):
- if line.strip() and not line.strip().startswith("#"):
- if self._hydrate_entry(line) == entry:
- found = True
- break
-
- if not found:
- return False
-
- lines.remove(line)
-
- self.seek(0)
- self.write(''.join(lines).encode('us-ascii'))
- self.truncate()
- return True
-
- @classmethod
- def remove_by_mountpoint(cls, mountpoint, path=None):
- fstab = cls(path=path)
- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
- if entry:
- return fstab.remove_entry(entry)
- return False
-
- @classmethod
- def add(cls, device, mountpoint, filesystem, options=None, path=None):
- return cls(path=path).add_entry(Fstab.Entry(device,
- mountpoint, filesystem,
- options=options))
diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py
deleted file mode 100644
index 4744eb4..0000000
--- a/hooks/charmhelpers/core/hookenv.py
+++ /dev/null
@@ -1,1490 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"Interactions with the Juju environment"
-# Copyright 2013 Canonical Ltd.
-#
-# Authors:
-# Charm Helpers Developers
-
-from __future__ import print_function
-import copy
-from distutils.version import LooseVersion
-from functools import wraps
-from collections import namedtuple
-import glob
-import os
-import json
-import yaml
-import re
-import subprocess
-import sys
-import errno
-import tempfile
-from subprocess import CalledProcessError
-
-import six
-if not six.PY3:
- from UserDict import UserDict
-else:
- from collections import UserDict
-
-
-CRITICAL = "CRITICAL"
-ERROR = "ERROR"
-WARNING = "WARNING"
-INFO = "INFO"
-DEBUG = "DEBUG"
-TRACE = "TRACE"
-MARKER = object()
-SH_MAX_ARG = 131071
-
-
-RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. '
- 'This may not be compatible with software you are '
- 'running in your shell.')
-
-cache = {}
-
-
-def cached(func):
- """Cache return values for multiple executions of func + args
-
- For example::
-
- @cached
- def unit_get(attribute):
- pass
-
- unit_get('test')
-
- will cache the result of unit_get + 'test' for future calls.
- """
- @wraps(func)
- def wrapper(*args, **kwargs):
- global cache
- key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
- try:
- return cache[key]
- except KeyError:
- pass # Drop out of the exception handler scope.
- res = func(*args, **kwargs)
- cache[key] = res
- return res
- wrapper._wrapped = func
- return wrapper
-
-
-def flush(key):
- """Flushes any entries from function cache where the
- key is found in the function+args """
- flush_list = []
- for item in cache:
- if key in item:
- flush_list.append(item)
- for item in flush_list:
- del cache[item]
-
-
-def log(message, level=None):
- """Write a message to the juju log"""
- command = ['juju-log']
- if level:
- command += ['-l', level]
- if not isinstance(message, six.string_types):
- message = repr(message)
- command += [message[:SH_MAX_ARG]]
- # Missing juju-log should not cause failures in unit tests
- # Send log output to stderr
- try:
- subprocess.call(command)
- except OSError as e:
- if e.errno == errno.ENOENT:
- if level:
- message = "{}: {}".format(level, message)
- message = "juju-log: {}".format(message)
- print(message, file=sys.stderr)
- else:
- raise
-
-
-class Serializable(UserDict):
- """Wrapper, an object that can be serialized to yaml or json"""
-
- def __init__(self, obj):
- # wrap the object
- UserDict.__init__(self)
- self.data = obj
-
- def __getattr__(self, attr):
- # See if this object has attribute.
- if attr in ("json", "yaml", "data"):
- return self.__dict__[attr]
- # Check for attribute in wrapped object.
- got = getattr(self.data, attr, MARKER)
- if got is not MARKER:
- return got
- # Proxy to the wrapped object via dict interface.
- try:
- return self.data[attr]
- except KeyError:
- raise AttributeError(attr)
-
- def __getstate__(self):
- # Pickle as a standard dictionary.
- return self.data
-
- def __setstate__(self, state):
- # Unpickle into our wrapper.
- self.data = state
-
- def json(self):
- """Serialize the object to json"""
- return json.dumps(self.data)
-
- def yaml(self):
- """Serialize the object to yaml"""
- return yaml.dump(self.data)
-
-
-def execution_environment():
- """A convenient bundling of the current execution context"""
- context = {}
- context['conf'] = config()
- if relation_id():
- context['reltype'] = relation_type()
- context['relid'] = relation_id()
- context['rel'] = relation_get()
- context['unit'] = local_unit()
- context['rels'] = relations()
- context['env'] = os.environ
- return context
-
-
-def in_relation_hook():
- """Determine whether we're running in a relation hook"""
- return 'JUJU_RELATION' in os.environ
-
-
-def relation_type():
- """The scope for the current relation hook"""
- return os.environ.get('JUJU_RELATION', None)
-
-
-@cached
-def relation_id(relation_name=None, service_or_unit=None):
- """The relation ID for the current or a specified relation"""
- if not relation_name and not service_or_unit:
- return os.environ.get('JUJU_RELATION_ID', None)
- elif relation_name and service_or_unit:
- service_name = service_or_unit.split('/')[0]
- for relid in relation_ids(relation_name):
- remote_service = remote_service_name(relid)
- if remote_service == service_name:
- return relid
- else:
- raise ValueError('Must specify neither or both of relation_name and service_or_unit')
-
-
-def local_unit():
- """Local unit ID"""
- return os.environ['JUJU_UNIT_NAME']
-
-
-def remote_unit():
- """The remote unit for the current relation hook"""
- return os.environ.get('JUJU_REMOTE_UNIT', None)
-
-
-def application_name():
- """
- The name of the deployed application this unit belongs to.
- """
- return local_unit().split('/')[0]
-
-
-def service_name():
- """
- .. deprecated:: 0.19.1
- Alias for :func:`application_name`.
- """
- return application_name()
-
-
-def model_name():
- """
- Name of the model that this unit is deployed in.
- """
- return os.environ['JUJU_MODEL_NAME']
-
-
-def model_uuid():
- """
- UUID of the model that this unit is deployed in.
- """
- return os.environ['JUJU_MODEL_UUID']
-
-
-def principal_unit():
- """Returns the principal unit of this unit, otherwise None"""
- # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
- principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None)
- # If it's empty, then this unit is the principal
- if principal_unit == '':
- return os.environ['JUJU_UNIT_NAME']
- elif principal_unit is not None:
- return principal_unit
- # For Juju 2.1 and below, let's try work out the principle unit by
- # the various charms' metadata.yaml.
- for reltype in relation_types():
- for rid in relation_ids(reltype):
- for unit in related_units(rid):
- md = _metadata_unit(unit)
- if not md:
- continue
- subordinate = md.pop('subordinate', None)
- if not subordinate:
- return unit
- return None
-
-
-@cached
-def remote_service_name(relid=None):
- """The remote service name for a given relation-id (or the current relation)"""
- if relid is None:
- unit = remote_unit()
- else:
- units = related_units(relid)
- unit = units[0] if units else None
- return unit.split('/')[0] if unit else None
-
-
-def hook_name():
- """The name of the currently executing hook"""
- return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
-
-
-class Config(dict):
- """A dictionary representation of the charm's config.yaml, with some
- extra features:
-
- - See which values in the dictionary have changed since the previous hook.
- - For values that have changed, see what the previous value was.
- - Store arbitrary data for use in a later hook.
-
- NOTE: Do not instantiate this object directly - instead call
- ``hookenv.config()``, which will return an instance of :class:`Config`.
-
- Example usage::
-
- >>> # inside a hook
- >>> from charmhelpers.core import hookenv
- >>> config = hookenv.config()
- >>> config['foo']
- 'bar'
- >>> # store a new key/value for later use
- >>> config['mykey'] = 'myval'
-
-
- >>> # user runs `juju set mycharm foo=baz`
- >>> # now we're inside subsequent config-changed hook
- >>> config = hookenv.config()
- >>> config['foo']
- 'baz'
- >>> # test to see if this val has changed since last hook
- >>> config.changed('foo')
- True
- >>> # what was the previous value?
- >>> config.previous('foo')
- 'bar'
- >>> # keys/values that we add are preserved across hooks
- >>> config['mykey']
- 'myval'
-
- """
- CONFIG_FILE_NAME = '.juju-persistent-config'
-
- def __init__(self, *args, **kw):
- super(Config, self).__init__(*args, **kw)
- self.implicit_save = True
- self._prev_dict = None
- self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
- if os.path.exists(self.path) and os.stat(self.path).st_size:
- self.load_previous()
- atexit(self._implicit_save)
-
- def load_previous(self, path=None):
- """Load previous copy of config from disk.
-
- In normal usage you don't need to call this method directly - it
- is called automatically at object initialization.
-
- :param path:
-
- File path from which to load the previous config. If `None`,
- config is loaded from the default location. If `path` is
- specified, subsequent `save()` calls will write to the same
- path.
-
- """
- self.path = path or self.path
- with open(self.path) as f:
- try:
- self._prev_dict = json.load(f)
- except ValueError as e:
- log('Unable to parse previous config data - {}'.format(str(e)),
- level=ERROR)
- for k, v in copy.deepcopy(self._prev_dict).items():
- if k not in self:
- self[k] = v
-
- def changed(self, key):
- """Return True if the current value for this key is different from
- the previous value.
-
- """
- if self._prev_dict is None:
- return True
- return self.previous(key) != self.get(key)
-
- def previous(self, key):
- """Return previous value for this key, or None if there
- is no previous value.
-
- """
- if self._prev_dict:
- return self._prev_dict.get(key)
- return None
-
- def save(self):
- """Save this config to disk.
-
- If the charm is using the :mod:`Services Framework `
- or :meth:'@hook ' decorator, this
- is called automatically at the end of successful hook execution.
- Otherwise, it should be called directly by user code.
-
- To disable automatic saves, set ``implicit_save=False`` on this
- instance.
-
- """
- with open(self.path, 'w') as f:
- os.fchmod(f.fileno(), 0o600)
- json.dump(self, f)
-
- def _implicit_save(self):
- if self.implicit_save:
- self.save()
-
-
-_cache_config = None
-
-
-def config(scope=None):
- """
- Get the juju charm configuration (scope==None) or individual key,
- (scope=str). The returned value is a Python data structure loaded as
- JSON from the Juju config command.
-
- :param scope: If set, return the value for the specified key.
- :type scope: Optional[str]
- :returns: Either the whole config as a Config, or a key from it.
- :rtype: Any
- """
- global _cache_config
- config_cmd_line = ['config-get', '--all', '--format=json']
- try:
- # JSON Decode Exception for Python3.5+
- exc_json = json.decoder.JSONDecodeError
- except AttributeError:
- # JSON Decode Exception for Python2.7 through Python3.4
- exc_json = ValueError
- try:
- if _cache_config is None:
- config_data = json.loads(
- subprocess.check_output(config_cmd_line).decode('UTF-8'))
- _cache_config = Config(config_data)
- if scope is not None:
- return _cache_config.get(scope)
- return _cache_config
- except (exc_json, UnicodeDecodeError) as e:
- log('Unable to parse output from config-get: config_cmd_line="{}" '
- 'message="{}"'
- .format(config_cmd_line, str(e)), level=ERROR)
- return None
-
-
-@cached
-def relation_get(attribute=None, unit=None, rid=None):
- """Get relation information"""
- _args = ['relation-get', '--format=json']
- if rid:
- _args.append('-r')
- _args.append(rid)
- _args.append(attribute or '-')
- if unit:
- _args.append(unit)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except CalledProcessError as e:
- if e.returncode == 2:
- return None
- raise
-
-
-def relation_set(relation_id=None, relation_settings=None, **kwargs):
- """Set relation information for the current unit"""
- relation_settings = relation_settings if relation_settings else {}
- relation_cmd_line = ['relation-set']
- accepts_file = "--file" in subprocess.check_output(
- relation_cmd_line + ["--help"], universal_newlines=True)
- if relation_id is not None:
- relation_cmd_line.extend(('-r', relation_id))
- settings = relation_settings.copy()
- settings.update(kwargs)
- for key, value in settings.items():
- # Force value to be a string: it always should, but some call
- # sites pass in things like dicts or numbers.
- if value is not None:
- settings[key] = "{}".format(value)
- if accepts_file:
- # --file was introduced in Juju 1.23.2. Use it by default if
- # available, since otherwise we'll break if the relation data is
- # too big. Ideally we should tell relation-set to read the data from
- # stdin, but that feature is broken in 1.23.2: Bug #1454678.
- with tempfile.NamedTemporaryFile(delete=False) as settings_file:
- settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
- subprocess.check_call(
- relation_cmd_line + ["--file", settings_file.name])
- os.remove(settings_file.name)
- else:
- for key, value in settings.items():
- if value is None:
- relation_cmd_line.append('{}='.format(key))
- else:
- relation_cmd_line.append('{}={}'.format(key, value))
- subprocess.check_call(relation_cmd_line)
- # Flush cache of any relation-gets for local unit
- flush(local_unit())
-
-
-def relation_clear(r_id=None):
- ''' Clears any relation data already set on relation r_id '''
- settings = relation_get(rid=r_id,
- unit=local_unit())
- for setting in settings:
- if setting not in ['public-address', 'private-address']:
- settings[setting] = None
- relation_set(relation_id=r_id,
- **settings)
-
-
-@cached
-def relation_ids(reltype=None):
- """A list of relation_ids"""
- reltype = reltype or relation_type()
- relid_cmd_line = ['relation-ids', '--format=json']
- if reltype is not None:
- relid_cmd_line.append(reltype)
- return json.loads(
- subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
- return []
-
-
-@cached
-def related_units(relid=None):
- """A list of related units"""
- relid = relid or relation_id()
- units_cmd_line = ['relation-list', '--format=json']
- if relid is not None:
- units_cmd_line.extend(('-r', relid))
- return json.loads(
- subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
-
-
-def expected_peer_units():
- """Get a generator for units we expect to join peer relation based on
- goal-state.
-
- The local unit is excluded from the result to make it easy to gauge
- completion of all peers joining the relation with existing hook tools.
-
- Example usage:
- log('peer {} of {} joined peer relation'
- .format(len(related_units()),
- len(list(expected_peer_units()))))
-
- This function will raise NotImplementedError if used with juju versions
- without goal-state support.
-
- :returns: iterator
- :rtype: types.GeneratorType
- :raises: NotImplementedError
- """
- if not has_juju_version("2.4.0"):
- # goal-state first appeared in 2.4.0.
- raise NotImplementedError("goal-state")
- _goal_state = goal_state()
- return (key for key in _goal_state['units']
- if '/' in key and key != local_unit())
-
-
-def expected_related_units(reltype=None):
- """Get a generator for units we expect to join relation based on
- goal-state.
-
- Note that you can not use this function for the peer relation, take a look
- at expected_peer_units() for that.
-
- This function will raise KeyError if you request information for a
- relation type for which juju goal-state does not have information. It will
- raise NotImplementedError if used with juju versions without goal-state
- support.
-
- Example usage:
- log('participant {} of {} joined relation {}'
- .format(len(related_units()),
- len(list(expected_related_units())),
- relation_type()))
-
- :param reltype: Relation type to list data for, default is to list data for
- the realtion type we are currently executing a hook for.
- :type reltype: str
- :returns: iterator
- :rtype: types.GeneratorType
- :raises: KeyError, NotImplementedError
- """
- if not has_juju_version("2.4.4"):
- # goal-state existed in 2.4.0, but did not list individual units to
- # join a relation in 2.4.1 through 2.4.3. (LP: #1794739)
- raise NotImplementedError("goal-state relation unit count")
- reltype = reltype or relation_type()
- _goal_state = goal_state()
- return (key for key in _goal_state['relations'][reltype] if '/' in key)
-
-
-@cached
-def relation_for_unit(unit=None, rid=None):
- """Get the json represenation of a unit's relation"""
- unit = unit or remote_unit()
- relation = relation_get(unit=unit, rid=rid)
- for key in relation:
- if key.endswith('-list'):
- relation[key] = relation[key].split()
- relation['__unit__'] = unit
- return relation
-
-
-@cached
-def relations_for_id(relid=None):
- """Get relations of a specific relation ID"""
- relation_data = []
- relid = relid or relation_ids()
- for unit in related_units(relid):
- unit_data = relation_for_unit(unit, relid)
- unit_data['__relid__'] = relid
- relation_data.append(unit_data)
- return relation_data
-
-
-@cached
-def relations_of_type(reltype=None):
- """Get relations of a specific type"""
- relation_data = []
- reltype = reltype or relation_type()
- for relid in relation_ids(reltype):
- for relation in relations_for_id(relid):
- relation['__relid__'] = relid
- relation_data.append(relation)
- return relation_data
-
-
-@cached
-def metadata():
- """Get the current charm metadata.yaml contents as a python object"""
- with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
- return yaml.safe_load(md)
-
-
-def _metadata_unit(unit):
- """Given the name of a unit (e.g. apache2/0), get the unit charm's
- metadata.yaml. Very similar to metadata() but allows us to inspect
- other units. Unit needs to be co-located, such as a subordinate or
- principal/primary.
-
- :returns: metadata.yaml as a python object.
-
- """
- basedir = os.sep.join(charm_dir().split(os.sep)[:-2])
- unitdir = 'unit-{}'.format(unit.replace(os.sep, '-'))
- joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')
- if not os.path.exists(joineddir):
- return None
- with open(joineddir) as md:
- return yaml.safe_load(md)
-
-
-@cached
-def relation_types():
- """Get a list of relation types supported by this charm"""
- rel_types = []
- md = metadata()
- for key in ('provides', 'requires', 'peers'):
- section = md.get(key)
- if section:
- rel_types.extend(section.keys())
- return rel_types
-
-
-@cached
-def peer_relation_id():
- '''Get the peers relation id if a peers relation has been joined, else None.'''
- md = metadata()
- section = md.get('peers')
- if section:
- for key in section:
- relids = relation_ids(key)
- if relids:
- return relids[0]
- return None
-
-
-@cached
-def relation_to_interface(relation_name):
- """
- Given the name of a relation, return the interface that relation uses.
-
- :returns: The interface name, or ``None``.
- """
- return relation_to_role_and_interface(relation_name)[1]
-
-
-@cached
-def relation_to_role_and_interface(relation_name):
- """
- Given the name of a relation, return the role and the name of the interface
- that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
-
- :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
- """
- _metadata = metadata()
- for role in ('provides', 'requires', 'peers'):
- interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
- if interface:
- return role, interface
- return None, None
-
-
-@cached
-def role_and_interface_to_relations(role, interface_name):
- """
- Given a role and interface name, return a list of relation names for the
- current charm that use that interface under that role (where role is one
- of ``provides``, ``requires``, or ``peers``).
-
- :returns: A list of relation names.
- """
- _metadata = metadata()
- results = []
- for relation_name, relation in _metadata.get(role, {}).items():
- if relation['interface'] == interface_name:
- results.append(relation_name)
- return results
-
-
-@cached
-def interface_to_relations(interface_name):
- """
- Given an interface, return a list of relation names for the current
- charm that use that interface.
-
- :returns: A list of relation names.
- """
- results = []
- for role in ('provides', 'requires', 'peers'):
- results.extend(role_and_interface_to_relations(role, interface_name))
- return results
-
-
-@cached
-def charm_name():
- """Get the name of the current charm as is specified on metadata.yaml"""
- return metadata().get('name')
-
-
-@cached
-def relations():
- """Get a nested dictionary of relation data for all related units"""
- rels = {}
- for reltype in relation_types():
- relids = {}
- for relid in relation_ids(reltype):
- units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
- for unit in related_units(relid):
- reldata = relation_get(unit=unit, rid=relid)
- units[unit] = reldata
- relids[relid] = units
- rels[reltype] = relids
- return rels
-
-
-@cached
-def is_relation_made(relation, keys='private-address'):
- '''
- Determine whether a relation is established by checking for
- presence of key(s). If a list of keys is provided, they
- must all be present for the relation to be identified as made
- '''
- if isinstance(keys, str):
- keys = [keys]
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- context = {}
- for k in keys:
- context[k] = relation_get(k, rid=r_id,
- unit=unit)
- if None not in context.values():
- return True
- return False
-
-
-def _port_op(op_name, port, protocol="TCP"):
- """Open or close a service network port"""
- _args = [op_name]
- icmp = protocol.upper() == "ICMP"
- if icmp:
- _args.append(protocol)
- else:
- _args.append('{}/{}'.format(port, protocol))
- try:
- subprocess.check_call(_args)
- except subprocess.CalledProcessError:
- # Older Juju pre 2.3 doesn't support ICMP
- # so treat it as a no-op if it fails.
- if not icmp:
- raise
-
-
-def open_port(port, protocol="TCP"):
- """Open a service network port"""
- _port_op('open-port', port, protocol)
-
-
-def close_port(port, protocol="TCP"):
- """Close a service network port"""
- _port_op('close-port', port, protocol)
-
-
-def open_ports(start, end, protocol="TCP"):
- """Opens a range of service network ports"""
- _args = ['open-port']
- _args.append('{}-{}/{}'.format(start, end, protocol))
- subprocess.check_call(_args)
-
-
-def close_ports(start, end, protocol="TCP"):
- """Close a range of service network ports"""
- _args = ['close-port']
- _args.append('{}-{}/{}'.format(start, end, protocol))
- subprocess.check_call(_args)
-
-
-def opened_ports():
- """Get the opened ports
-
- *Note that this will only show ports opened in a previous hook*
-
- :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']``
- """
- _args = ['opened-ports', '--format=json']
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
-
-
-@cached
-def unit_get(attribute):
- """Get the unit ID for the remote unit"""
- _args = ['unit-get', '--format=json', attribute]
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-def unit_public_ip():
- """Get this unit's public IP address"""
- return unit_get('public-address')
-
-
-def unit_private_ip():
- """Get this unit's private IP address"""
- return unit_get('private-address')
-
-
-@cached
-def storage_get(attribute=None, storage_id=None):
- """Get storage attributes"""
- _args = ['storage-get', '--format=json']
- if storage_id:
- _args.extend(('-s', storage_id))
- if attribute:
- _args.append(attribute)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-@cached
-def storage_list(storage_name=None):
- """List the storage IDs for the unit"""
- _args = ['storage-list', '--format=json']
- if storage_name:
- _args.append(storage_name)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except OSError as e:
- import errno
- if e.errno == errno.ENOENT:
- # storage-list does not exist
- return []
- raise
-
-
-class UnregisteredHookError(Exception):
- """Raised when an undefined hook is called"""
- pass
-
-
-class Hooks(object):
- """A convenient handler for hook functions.
-
- Example::
-
- hooks = Hooks()
-
- # register a hook, taking its name from the function name
- @hooks.hook()
- def install():
- pass # your code here
-
- # register a hook, providing a custom hook name
- @hooks.hook("config-changed")
- def config_changed():
- pass # your code here
-
- if __name__ == "__main__":
- # execute a hook based on the name the program is called by
- hooks.execute(sys.argv)
- """
-
- def __init__(self, config_save=None):
- super(Hooks, self).__init__()
- self._hooks = {}
-
- # For unknown reasons, we allow the Hooks constructor to override
- # config().implicit_save.
- if config_save is not None:
- config().implicit_save = config_save
-
- def register(self, name, function):
- """Register a hook"""
- self._hooks[name] = function
-
- def execute(self, args):
- """Execute a registered hook based on args[0]"""
- _run_atstart()
- hook_name = os.path.basename(args[0])
- if hook_name in self._hooks:
- try:
- self._hooks[hook_name]()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- _run_atexit()
- raise
- _run_atexit()
- else:
- raise UnregisteredHookError(hook_name)
-
- def hook(self, *hook_names):
- """Decorator, registering them as hooks"""
- def wrapper(decorated):
- for hook_name in hook_names:
- self.register(hook_name, decorated)
- else:
- self.register(decorated.__name__, decorated)
- if '_' in decorated.__name__:
- self.register(
- decorated.__name__.replace('_', '-'), decorated)
- return decorated
- return wrapper
-
-
-class NoNetworkBinding(Exception):
- pass
-
-
-def charm_dir():
- """Return the root directory of the current charm"""
- d = os.environ.get('JUJU_CHARM_DIR')
- if d is not None:
- return d
- return os.environ.get('CHARM_DIR')
-
-
-@cached
-def action_get(key=None):
- """Gets the value of an action parameter, or all key/value param pairs"""
- cmd = ['action-get']
- if key is not None:
- cmd.append(key)
- cmd.append('--format=json')
- action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
- return action_data
-
-
-def action_set(values):
- """Sets the values to be returned after the action finishes"""
- cmd = ['action-set']
- for k, v in list(values.items()):
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-def action_fail(message):
- """Sets the action status to failed and sets the error message.
-
- The results set by action_set are preserved."""
- subprocess.check_call(['action-fail', message])
-
-
-def action_name():
- """Get the name of the currently executing action."""
- return os.environ.get('JUJU_ACTION_NAME')
-
-
-def action_uuid():
- """Get the UUID of the currently executing action."""
- return os.environ.get('JUJU_ACTION_UUID')
-
-
-def action_tag():
- """Get the tag for the currently executing action."""
- return os.environ.get('JUJU_ACTION_TAG')
-
-
-def status_set(workload_state, message):
- """Set the workload state with a message
-
- Use status-set to set the workload state with a message which is visible
- to the user via juju status. If the status-set command is not found then
- assume this is juju < 1.23 and juju-log the message unstead.
-
- workload_state -- valid juju workload state.
- message -- status update message
- """
- valid_states = ['maintenance', 'blocked', 'waiting', 'active']
- if workload_state not in valid_states:
- raise ValueError(
- '{!r} is not a valid workload state'.format(workload_state)
- )
- cmd = ['status-set', workload_state, message]
- try:
- ret = subprocess.call(cmd)
- if ret == 0:
- return
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- log_message = 'status-set failed: {} {}'.format(workload_state,
- message)
- log(log_message, level='INFO')
-
-
-def status_get():
- """Retrieve the previously set juju workload state and message
-
- If the status-get command is not found then assume this is juju < 1.23 and
- return 'unknown', ""
-
- """
- cmd = ['status-get', "--format=json", "--include-data"]
- try:
- raw_status = subprocess.check_output(cmd)
- except OSError as e:
- if e.errno == errno.ENOENT:
- return ('unknown', "")
- else:
- raise
- else:
- status = json.loads(raw_status.decode("UTF-8"))
- return (status["status"], status["message"])
-
-
-def translate_exc(from_exc, to_exc):
- def inner_translate_exc1(f):
- @wraps(f)
- def inner_translate_exc2(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except from_exc:
- raise to_exc
-
- return inner_translate_exc2
-
- return inner_translate_exc1
-
-
-def application_version_set(version):
- """Charm authors may trigger this command from any hook to output what
- version of the application is running. This could be a package version,
- for instance postgres version 9.5. It could also be a build number or
- version control revision identifier, for instance git sha 6fb7ba68. """
-
- cmd = ['application-version-set']
- cmd.append(version)
- try:
- subprocess.check_call(cmd)
- except OSError:
- log("Application Version: {}".format(version))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-@cached
-def goal_state():
- """Juju goal state values"""
- cmd = ['goal-state', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def is_leader():
- """Does the current unit hold the juju leadership
-
- Uses juju to determine whether the current unit is the leader of its peers
- """
- cmd = ['is-leader', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_get(attribute=None):
- """Juju leader get value(s)"""
- cmd = ['leader-get', '--format=json'] + [attribute or '-']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_set(settings=None, **kwargs):
- """Juju leader set value(s)"""
- # Don't log secrets.
- # log("Juju leader-set '%s'" % (settings), level=DEBUG)
- cmd = ['leader-set']
- settings = settings or {}
- settings.update(kwargs)
- for k, v in settings.items():
- if v is None:
- cmd.append('{}='.format(k))
- else:
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_register(ptype, klass, pid):
- """ is used while a hook is running to let Juju know that a
- payload has been started."""
- cmd = ['payload-register']
- for x in [ptype, klass, pid]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_unregister(klass, pid):
- """ is used while a hook is running to let Juju know
- that a payload has been manually stopped. The and provided
- must match a payload that has been previously registered with juju using
- payload-register."""
- cmd = ['payload-unregister']
- for x in [klass, pid]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_status_set(klass, pid, status):
- """is used to update the current status of a registered payload.
- The and provided must match a payload that has been previously
- registered with juju using payload-register. The must be one of the
- follow: starting, started, stopping, stopped"""
- cmd = ['payload-status-set']
- for x in [klass, pid, status]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def resource_get(name):
- """used to fetch the resource path of the given name.
-
- must match a name of defined resource in metadata.yaml
-
- returns either a path or False if resource not available
- """
- if not name:
- return False
-
- cmd = ['resource-get', name]
- try:
- return subprocess.check_output(cmd).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
-
-
-@cached
-def juju_version():
- """Full version string (eg. '1.23.3.1-trusty-amd64')"""
- # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
- jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
- return subprocess.check_output([jujud, 'version'],
- universal_newlines=True).strip()
-
-
-def has_juju_version(minimum_version):
- """Return True if the Juju version is at least the provided version"""
- return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
-
-
-_atexit = []
-_atstart = []
-
-
-def atstart(callback, *args, **kwargs):
- '''Schedule a callback to run before the main hook.
-
- Callbacks are run in the order they were added.
-
- This is useful for modules and classes to perform initialization
- and inject behavior. In particular:
-
- - Run common code before all of your hooks, such as logging
- the hook name or interesting relation data.
- - Defer object or module initialization that requires a hook
- context until we know there actually is a hook context,
- making testing easier.
- - Rather than requiring charm authors to include boilerplate to
- invoke your helper's behavior, have it run automatically if
- your object is instantiated or module imported.
-
- This is not at all useful after your hook framework as been launched.
- '''
- global _atstart
- _atstart.append((callback, args, kwargs))
-
-
-def atexit(callback, *args, **kwargs):
- '''Schedule a callback to run on successful hook completion.
-
- Callbacks are run in the reverse order that they were added.'''
- _atexit.append((callback, args, kwargs))
-
-
-def _run_atstart():
- '''Hook frameworks must invoke this before running the main hook body.'''
- global _atstart
- for callback, args, kwargs in _atstart:
- callback(*args, **kwargs)
- del _atstart[:]
-
-
-def _run_atexit():
- '''Hook frameworks must invoke this after the main hook body has
- successfully completed. Do not invoke it if the hook fails.'''
- global _atexit
- for callback, args, kwargs in reversed(_atexit):
- callback(*args, **kwargs)
- del _atexit[:]
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def network_get_primary_address(binding):
- '''
- Deprecated since Juju 2.3; use network_get()
-
- Retrieve the primary network address for a named binding
-
- :param binding: string. The name of a relation of extra-binding
- :return: string. The primary IP address for the named binding
- :raise: NotImplementedError if run on Juju < 2.0
- '''
- cmd = ['network-get', '--primary-address', binding]
- try:
- response = subprocess.check_output(
- cmd,
- stderr=subprocess.STDOUT).decode('UTF-8').strip()
- except CalledProcessError as e:
- if 'no network config found for binding' in e.output.decode('UTF-8'):
- raise NoNetworkBinding("No network binding for {}"
- .format(binding))
- else:
- raise
- return response
-
-
-def network_get(endpoint, relation_id=None):
- """
- Retrieve the network details for a relation endpoint
-
- :param endpoint: string. The name of a relation endpoint
- :param relation_id: int. The ID of the relation for the current context.
- :return: dict. The loaded YAML output of the network-get query.
- :raise: NotImplementedError if request not supported by the Juju version.
- """
- if not has_juju_version('2.2'):
- raise NotImplementedError(juju_version()) # earlier versions require --primary-address
- if relation_id and not has_juju_version('2.3'):
- raise NotImplementedError # 2.3 added the -r option
-
- cmd = ['network-get', endpoint, '--format', 'yaml']
- if relation_id:
- cmd.append('-r')
- cmd.append(relation_id)
- response = subprocess.check_output(
- cmd,
- stderr=subprocess.STDOUT).decode('UTF-8').strip()
- return yaml.safe_load(response)
-
-
-def add_metric(*args, **kwargs):
- """Add metric values. Values may be expressed with keyword arguments. For
- metric names containing dashes, these may be expressed as one or more
- 'key=value' positional arguments. May only be called from the collect-metrics
- hook."""
- _args = ['add-metric']
- _kvpairs = []
- _kvpairs.extend(args)
- _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()])
- _args.extend(sorted(_kvpairs))
- try:
- subprocess.check_call(_args)
- return
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs))
- log(log_message, level='INFO')
-
-
-def meter_status():
- """Get the meter status, if running in the meter-status-changed hook."""
- return os.environ.get('JUJU_METER_STATUS')
-
-
-def meter_info():
- """Get the meter status information, if running in the meter-status-changed
- hook."""
- return os.environ.get('JUJU_METER_INFO')
-
-
-def iter_units_for_relation_name(relation_name):
- """Iterate through all units in a relation
-
- Generator that iterates through all the units in a relation and yields
- a named tuple with rid and unit field names.
-
- Usage:
- data = [(u.rid, u.unit)
- for u in iter_units_for_relation_name(relation_name)]
-
- :param relation_name: string relation name
- :yield: Named Tuple with rid and unit field names
- """
- RelatedUnit = namedtuple('RelatedUnit', 'rid, unit')
- for rid in relation_ids(relation_name):
- for unit in related_units(rid):
- yield RelatedUnit(rid, unit)
-
-
-def ingress_address(rid=None, unit=None):
- """
- Retrieve the ingress-address from a relation when available.
- Otherwise, return the private-address.
-
- When used on the consuming side of the relation (unit is a remote
- unit), the ingress-address is the IP address that this unit needs
- to use to reach the provided service on the remote unit.
-
- When used on the providing side of the relation (unit == local_unit()),
- the ingress-address is the IP address that is advertised to remote
- units on this relation. Remote units need to use this address to
- reach the local provided service on this unit.
-
- Note that charms may document some other method to use in
- preference to the ingress_address(), such as an address provided
- on a different relation attribute or a service discovery mechanism.
- This allows charms to redirect inbound connections to their peers
- or different applications such as load balancers.
-
- Usage:
- addresses = [ingress_address(rid=u.rid, unit=u.unit)
- for u in iter_units_for_relation_name(relation_name)]
-
- :param rid: string relation id
- :param unit: string unit name
- :side effect: calls relation_get
- :return: string IP address
- """
- settings = relation_get(rid=rid, unit=unit)
- return (settings.get('ingress-address') or
- settings.get('private-address'))
-
-
-def egress_subnets(rid=None, unit=None):
- """
- Retrieve the egress-subnets from a relation.
-
- This function is to be used on the providing side of the
- relation, and provides the ranges of addresses that client
- connections may come from. The result is uninteresting on
- the consuming side of a relation (unit == local_unit()).
-
- Returns a stable list of subnets in CIDR format.
- eg. ['192.168.1.0/24', '2001::F00F/128']
-
- If egress-subnets is not available, falls back to using the published
- ingress-address, or finally private-address.
-
- :param rid: string relation id
- :param unit: string unit name
- :side effect: calls relation_get
- :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
- """
- def _to_range(addr):
- if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
- addr += '/32'
- elif ':' in addr and '/' not in addr: # IPv6
- addr += '/128'
- return addr
-
- settings = relation_get(rid=rid, unit=unit)
- if 'egress-subnets' in settings:
- return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
- if 'ingress-address' in settings:
- return [_to_range(settings['ingress-address'])]
- if 'private-address' in settings:
- return [_to_range(settings['private-address'])]
- return [] # Should never happen
-
-
-def unit_doomed(unit=None):
- """Determines if the unit is being removed from the model
-
- Requires Juju 2.4.1.
-
- :param unit: string unit name, defaults to local_unit
- :side effect: calls goal_state
- :side effect: calls local_unit
- :side effect: calls has_juju_version
- :return: True if the unit is being removed, already gone, or never existed
- """
- if not has_juju_version("2.4.1"):
- # We cannot risk blindly returning False for 'we don't know',
- # because that could cause data loss; if call sites don't
- # need an accurate answer, they likely don't need this helper
- # at all.
- # goal-state existed in 2.4.0, but did not handle removals
- # correctly until 2.4.1.
- raise NotImplementedError("is_doomed")
- if unit is None:
- unit = local_unit()
- gs = goal_state()
- units = gs.get('units', {})
- if unit not in units:
- return True
- # I don't think 'dead' units ever show up in the goal-state, but
- # check anyway in addition to 'dying'.
- return units[unit]['status'] in ('dying', 'dead')
-
-
-def env_proxy_settings(selected_settings=None):
- """Get proxy settings from process environment variables.
-
- Get charm proxy settings from environment variables that correspond to
- juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2,
- see lp:1782236) in a format suitable for passing to an application that
- reacts to proxy settings passed as environment variables. Some applications
- support lowercase or uppercase notation (e.g. curl), some support only
- lowercase (e.g. wget), there are also subjectively rare cases of only
- uppercase notation support. no_proxy CIDR and wildcard support also varies
- between runtimes and applications as there is no enforced standard.
-
- Some applications may connect to multiple destinations and expose config
- options that would affect only proxy settings for a specific destination
- these should be handled in charms in an application-specific manner.
-
- :param selected_settings: format only a subset of possible settings
- :type selected_settings: list
- :rtype: Option(None, dict[str, str])
- """
- SUPPORTED_SETTINGS = {
- 'http': 'HTTP_PROXY',
- 'https': 'HTTPS_PROXY',
- 'no_proxy': 'NO_PROXY',
- 'ftp': 'FTP_PROXY'
- }
- if selected_settings is None:
- selected_settings = SUPPORTED_SETTINGS
-
- selected_vars = [v for k, v in SUPPORTED_SETTINGS.items()
- if k in selected_settings]
- proxy_settings = {}
- for var in selected_vars:
- var_val = os.getenv(var)
- if var_val:
- proxy_settings[var] = var_val
- proxy_settings[var.lower()] = var_val
- # Now handle juju-prefixed environment variables. The legacy vs new
- # environment variable usage is mutually exclusive
- charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var))
- if charm_var_val:
- proxy_settings[var] = charm_var_val
- proxy_settings[var.lower()] = charm_var_val
- if 'no_proxy' in proxy_settings:
- if _contains_range(proxy_settings['no_proxy']):
- log(RANGE_WARNING, level=WARNING)
- return proxy_settings if proxy_settings else None
-
-
-def _contains_range(addresses):
- """Check for cidr or wildcard domain in a string.
-
- Given a string comprising a comma seperated list of ip addresses
- and domain names, determine whether the string contains IP ranges
- or wildcard domains.
-
- :param addresses: comma seperated list of domains and ip addresses.
- :type addresses: str
- """
- return (
- # Test for cidr (e.g. 10.20.20.0/24)
- "/" in addresses or
- # Test for wildcard domains (*.foo.com or .foo.com)
- "*" in addresses or
- addresses.startswith(".") or
- ",." in addresses or
- " ." in addresses)
diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py
deleted file mode 100644
index 32754ff..0000000
--- a/hooks/charmhelpers/core/host.py
+++ /dev/null
@@ -1,1077 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tools for working with the host system"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Nick Moffitt
-# Matthew Wedgwood
-
-import os
-import re
-import pwd
-import glob
-import grp
-import random
-import string
-import subprocess
-import hashlib
-import functools
-import itertools
-import six
-
-from contextlib import contextmanager
-from collections import OrderedDict
-from .hookenv import log, INFO, DEBUG, local_unit, charm_name
-from .fstab import Fstab
-from charmhelpers.osplatform import get_platform
-
-__platform__ = get_platform()
-if __platform__ == "ubuntu":
- from charmhelpers.core.host_factory.ubuntu import ( # NOQA:F401
- service_available,
- add_new_group,
- lsb_release,
- cmp_pkgrevno,
- CompareHostReleases,
- get_distrib_codename,
- arch
- ) # flake8: noqa -- ignore F401 for this import
-elif __platform__ == "centos":
- from charmhelpers.core.host_factory.centos import ( # NOQA:F401
- service_available,
- add_new_group,
- lsb_release,
- cmp_pkgrevno,
- CompareHostReleases,
- ) # flake8: noqa -- ignore F401 for this import
-
-UPDATEDB_PATH = '/etc/updatedb.conf'
-
-
-def service_start(service_name, **kwargs):
- """Start a system service.
-
- The specified service name is managed via the system level init system.
- Some init systems (e.g. upstart) require that additional arguments be
- provided in order to directly control service instances whereas other init
- systems allow for addressing instances of a service directly by name (e.g.
- systemd).
-
- The kwargs allow for the additional parameters to be passed to underlying
- init systems for those systems which require/allow for them. For example,
- the ceph-osd upstart script requires the id parameter to be passed along
- in order to identify which running daemon should be reloaded. The follow-
- ing example stops the ceph-osd service for instance id=4:
-
- service_stop('ceph-osd', id=4)
-
- :param service_name: the name of the service to stop
- :param **kwargs: additional parameters to pass to the init system when
- managing services. These will be passed as key=value
- parameters to the init system's commandline. kwargs
- are ignored for systemd enabled systems.
- """
- return service('start', service_name, **kwargs)
-
-
-def service_stop(service_name, **kwargs):
- """Stop a system service.
-
- The specified service name is managed via the system level init system.
- Some init systems (e.g. upstart) require that additional arguments be
- provided in order to directly control service instances whereas other init
- systems allow for addressing instances of a service directly by name (e.g.
- systemd).
-
- The kwargs allow for the additional parameters to be passed to underlying
- init systems for those systems which require/allow for them. For example,
- the ceph-osd upstart script requires the id parameter to be passed along
- in order to identify which running daemon should be reloaded. The follow-
- ing example stops the ceph-osd service for instance id=4:
-
- service_stop('ceph-osd', id=4)
-
- :param service_name: the name of the service to stop
- :param **kwargs: additional parameters to pass to the init system when
- managing services. These will be passed as key=value
- parameters to the init system's commandline. kwargs
- are ignored for systemd enabled systems.
- """
- return service('stop', service_name, **kwargs)
-
-
-def service_restart(service_name, **kwargs):
- """Restart a system service.
-
- The specified service name is managed via the system level init system.
- Some init systems (e.g. upstart) require that additional arguments be
- provided in order to directly control service instances whereas other init
- systems allow for addressing instances of a service directly by name (e.g.
- systemd).
-
- The kwargs allow for the additional parameters to be passed to underlying
- init systems for those systems which require/allow for them. For example,
- the ceph-osd upstart script requires the id parameter to be passed along
- in order to identify which running daemon should be restarted. The follow-
- ing example restarts the ceph-osd service for instance id=4:
-
- service_restart('ceph-osd', id=4)
-
- :param service_name: the name of the service to restart
- :param **kwargs: additional parameters to pass to the init system when
- managing services. These will be passed as key=value
- parameters to the init system's commandline. kwargs
- are ignored for init systems not allowing additional
- parameters via the commandline (systemd).
- """
- return service('restart', service_name)
-
-
-def service_reload(service_name, restart_on_failure=False, **kwargs):
- """Reload a system service, optionally falling back to restart if
- reload fails.
-
- The specified service name is managed via the system level init system.
- Some init systems (e.g. upstart) require that additional arguments be
- provided in order to directly control service instances whereas other init
- systems allow for addressing instances of a service directly by name (e.g.
- systemd).
-
- The kwargs allow for the additional parameters to be passed to underlying
- init systems for those systems which require/allow for them. For example,
- the ceph-osd upstart script requires the id parameter to be passed along
- in order to identify which running daemon should be reloaded. The follow-
- ing example restarts the ceph-osd service for instance id=4:
-
- service_reload('ceph-osd', id=4)
-
- :param service_name: the name of the service to reload
- :param restart_on_failure: boolean indicating whether to fallback to a
- restart if the reload fails.
- :param **kwargs: additional parameters to pass to the init system when
- managing services. These will be passed as key=value
- parameters to the init system's commandline. kwargs
- are ignored for init systems not allowing additional
- parameters via the commandline (systemd).
- """
- service_result = service('reload', service_name, **kwargs)
- if not service_result and restart_on_failure:
- service_result = service('restart', service_name, **kwargs)
- return service_result
-
-
-def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
- **kwargs):
- """Pause a system service.
-
- Stop it, and prevent it from starting again at boot.
-
- :param service_name: the name of the service to pause
- :param init_dir: path to the upstart init directory
- :param initd_dir: path to the sysv init directory
- :param **kwargs: additional parameters to pass to the init system when
- managing services. These will be passed as key=value
- parameters to the init system's commandline. kwargs
- are ignored for init systems which do not support
- key=value arguments via the commandline.
- """
- stopped = True
- if service_running(service_name, **kwargs):
- stopped = service_stop(service_name, **kwargs)
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if init_is_systemd():
- service('disable', service_name)
- service('mask', service_name)
- elif os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- with open(override_path, 'w') as fh:
- fh.write("manual\n")
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "disable"])
- else:
- raise ValueError(
- "Unable to detect {0} as SystemD, Upstart {1} or"
- " SysV {2}".format(
- service_name, upstart_file, sysv_file))
- return stopped
-
-
-def service_resume(service_name, init_dir="/etc/init",
- initd_dir="/etc/init.d", **kwargs):
- """Resume a system service.
-
- Reenable starting again at boot. Start the service.
-
- :param service_name: the name of the service to resume
- :param init_dir: the path to the init dir
- :param initd dir: the path to the initd dir
- :param **kwargs: additional parameters to pass to the init system when
- managing services. These will be passed as key=value
- parameters to the init system's commandline. kwargs
- are ignored for systemd enabled systems.
- """
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if init_is_systemd():
- service('unmask', service_name)
- service('enable', service_name)
- elif os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- if os.path.exists(override_path):
- os.unlink(override_path)
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "enable"])
- else:
- raise ValueError(
- "Unable to detect {0} as SystemD, Upstart {1} or"
- " SysV {2}".format(
- service_name, upstart_file, sysv_file))
- started = service_running(service_name, **kwargs)
-
- if not started:
- started = service_start(service_name, **kwargs)
- return started
-
-
-def service(action, service_name, **kwargs):
- """Control a system service.
-
- :param action: the action to take on the service
- :param service_name: the name of the service to perform th action on
- :param **kwargs: additional params to be passed to the service command in
- the form of key=value.
- """
- if init_is_systemd():
- cmd = ['systemctl', action, service_name]
- else:
- cmd = ['service', service_name, action]
- for key, value in six.iteritems(kwargs):
- parameter = '%s=%s' % (key, value)
- cmd.append(parameter)
- return subprocess.call(cmd) == 0
-
-
-_UPSTART_CONF = "/etc/init/{}.conf"
-_INIT_D_CONF = "/etc/init.d/{}"
-
-
-def service_running(service_name, **kwargs):
- """Determine whether a system service is running.
-
- :param service_name: the name of the service
- :param **kwargs: additional args to pass to the service command. This is
- used to pass additional key=value arguments to the
- service command line for managing specific instance
- units (e.g. service ceph-osd status id=2). The kwargs
- are ignored in systemd services.
- """
- if init_is_systemd():
- return service('is-active', service_name)
- else:
- if os.path.exists(_UPSTART_CONF.format(service_name)):
- try:
- cmd = ['status', service_name]
- for key, value in six.iteritems(kwargs):
- parameter = '%s=%s' % (key, value)
- cmd.append(parameter)
- output = subprocess.check_output(
- cmd, stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
- else:
- # This works for upstart scripts where the 'service' command
- # returns a consistent string to represent running
- # 'start/running'
- if ("start/running" in output or
- "is running" in output or
- "up and running" in output):
- return True
- elif os.path.exists(_INIT_D_CONF.format(service_name)):
- # Check System V scripts init script return codes
- return service('status', service_name)
- return False
-
-
-SYSTEMD_SYSTEM = '/run/systemd/system'
-
-
-def init_is_systemd():
- """Return True if the host system uses systemd, False otherwise."""
- if lsb_release()['DISTRIB_CODENAME'] == 'trusty':
- return False
- return os.path.isdir(SYSTEMD_SYSTEM)
-
-
-def adduser(username, password=None, shell='/bin/bash',
- system_user=False, primary_group=None,
- secondary_groups=None, uid=None, home_dir=None):
- """Add a user to the system.
-
- Will log but otherwise succeed if the user already exists.
-
- :param str username: Username to create
- :param str password: Password for user; if ``None``, create a system user
- :param str shell: The default shell for the user
- :param bool system_user: Whether to create a login or system user
- :param str primary_group: Primary group for user; defaults to username
- :param list secondary_groups: Optional list of additional groups
- :param int uid: UID for user being created
- :param str home_dir: Home directory for user
-
- :returns: The password database entry struct, as returned by `pwd.getpwnam`
- """
- try:
- user_info = pwd.getpwnam(username)
- log('user {0} already exists!'.format(username))
- if uid:
- user_info = pwd.getpwuid(int(uid))
- log('user with uid {0} already exists!'.format(uid))
- except KeyError:
- log('creating user {0}'.format(username))
- cmd = ['useradd']
- if uid:
- cmd.extend(['--uid', str(uid)])
- if home_dir:
- cmd.extend(['--home', str(home_dir)])
- if system_user or password is None:
- cmd.append('--system')
- else:
- cmd.extend([
- '--create-home',
- '--shell', shell,
- '--password', password,
- ])
- if not primary_group:
- try:
- grp.getgrnam(username)
- primary_group = username # avoid "group exists" error
- except KeyError:
- pass
- if primary_group:
- cmd.extend(['-g', primary_group])
- if secondary_groups:
- cmd.extend(['-G', ','.join(secondary_groups)])
- cmd.append(username)
- subprocess.check_call(cmd)
- user_info = pwd.getpwnam(username)
- return user_info
-
-
-def user_exists(username):
- """Check if a user exists"""
- try:
- pwd.getpwnam(username)
- user_exists = True
- except KeyError:
- user_exists = False
- return user_exists
-
-
-def uid_exists(uid):
- """Check if a uid exists"""
- try:
- pwd.getpwuid(uid)
- uid_exists = True
- except KeyError:
- uid_exists = False
- return uid_exists
-
-
-def group_exists(groupname):
- """Check if a group exists"""
- try:
- grp.getgrnam(groupname)
- group_exists = True
- except KeyError:
- group_exists = False
- return group_exists
-
-
-def gid_exists(gid):
- """Check if a gid exists"""
- try:
- grp.getgrgid(gid)
- gid_exists = True
- except KeyError:
- gid_exists = False
- return gid_exists
-
-
-def add_group(group_name, system_group=False, gid=None):
- """Add a group to the system
-
- Will log but otherwise succeed if the group already exists.
-
- :param str group_name: group to create
- :param bool system_group: Create system group
- :param int gid: GID for user being created
-
- :returns: The password database entry struct, as returned by `grp.getgrnam`
- """
- try:
- group_info = grp.getgrnam(group_name)
- log('group {0} already exists!'.format(group_name))
- if gid:
- group_info = grp.getgrgid(gid)
- log('group with gid {0} already exists!'.format(gid))
- except KeyError:
- log('creating group {0}'.format(group_name))
- add_new_group(group_name, system_group, gid)
- group_info = grp.getgrnam(group_name)
- return group_info
-
-
-def add_user_to_group(username, group):
- """Add a user to a group"""
- cmd = ['gpasswd', '-a', username, group]
- log("Adding user {} to group {}".format(username, group))
- subprocess.check_call(cmd)
-
-
-def chage(username, lastday=None, expiredate=None, inactive=None,
- mindays=None, maxdays=None, root=None, warndays=None):
- """Change user password expiry information
-
- :param str username: User to update
- :param str lastday: Set when password was changed in YYYY-MM-DD format
- :param str expiredate: Set when user's account will no longer be
- accessible in YYYY-MM-DD format.
- -1 will remove an account expiration date.
- :param str inactive: Set the number of days of inactivity after a password
- has expired before the account is locked.
- -1 will remove an account's inactivity.
- :param str mindays: Set the minimum number of days between password
- changes to MIN_DAYS.
- 0 indicates the password can be changed anytime.
- :param str maxdays: Set the maximum number of days during which a
- password is valid.
- -1 as MAX_DAYS will remove checking maxdays
- :param str root: Apply changes in the CHROOT_DIR directory
- :param str warndays: Set the number of days of warning before a password
- change is required
- :raises subprocess.CalledProcessError: if call to chage fails
- """
- cmd = ['chage']
- if root:
- cmd.extend(['--root', root])
- if lastday:
- cmd.extend(['--lastday', lastday])
- if expiredate:
- cmd.extend(['--expiredate', expiredate])
- if inactive:
- cmd.extend(['--inactive', inactive])
- if mindays:
- cmd.extend(['--mindays', mindays])
- if maxdays:
- cmd.extend(['--maxdays', maxdays])
- if warndays:
- cmd.extend(['--warndays', warndays])
- cmd.append(username)
- subprocess.check_call(cmd)
-
-
-remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
-
-
-def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
- """Replicate the contents of a path"""
- options = options or ['--delete', '--executability']
- cmd = ['/usr/bin/rsync', flags]
- if timeout:
- cmd = ['timeout', str(timeout)] + cmd
- cmd.extend(options)
- cmd.append(from_path)
- cmd.append(to_path)
- log(" ".join(cmd))
- return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip()
-
-
-def symlink(source, destination):
- """Create a symbolic link"""
- log("Symlinking {} as {}".format(source, destination))
- cmd = [
- 'ln',
- '-sf',
- source,
- destination,
- ]
- subprocess.check_call(cmd)
-
-
-def mkdir(path, owner='root', group='root', perms=0o555, force=False):
- """Create a directory"""
- log("Making dir {} {}:{} {:o}".format(path, owner, group,
- perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- realpath = os.path.abspath(path)
- path_exists = os.path.exists(realpath)
- if path_exists and force:
- if not os.path.isdir(realpath):
- log("Removing non-directory file {} prior to mkdir()".format(path))
- os.unlink(realpath)
- os.makedirs(realpath, perms)
- elif not path_exists:
- os.makedirs(realpath, perms)
- os.chown(realpath, uid, gid)
- os.chmod(realpath, perms)
-
-
-def write_file(path, content, owner='root', group='root', perms=0o444):
- """Create or overwrite a file with the contents of a byte string."""
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- # lets see if we can grab the file and compare the context, to avoid doing
- # a write.
- existing_content = None
- existing_uid, existing_gid, existing_perms = None, None, None
- try:
- with open(path, 'rb') as target:
- existing_content = target.read()
- stat = os.stat(path)
- existing_uid, existing_gid, existing_perms = (
- stat.st_uid, stat.st_gid, stat.st_mode
- )
- except Exception:
- pass
- if content != existing_content:
- log("Writing file {} {}:{} {:o}".format(path, owner, group, perms),
- level=DEBUG)
- with open(path, 'wb') as target:
- os.fchown(target.fileno(), uid, gid)
- os.fchmod(target.fileno(), perms)
- if six.PY3 and isinstance(content, six.string_types):
- content = content.encode('UTF-8')
- target.write(content)
- return
- # the contents were the same, but we might still need to change the
- # ownership or permissions.
- if existing_uid != uid:
- log("Changing uid on already existing content: {} -> {}"
- .format(existing_uid, uid), level=DEBUG)
- os.chown(path, uid, -1)
- if existing_gid != gid:
- log("Changing gid on already existing content: {} -> {}"
- .format(existing_gid, gid), level=DEBUG)
- os.chown(path, -1, gid)
- if existing_perms != perms:
- log("Changing permissions on existing content: {} -> {}"
- .format(existing_perms, perms), level=DEBUG)
- os.chmod(path, perms)
-
-
-def fstab_remove(mp):
- """Remove the given mountpoint entry from /etc/fstab"""
- return Fstab.remove_by_mountpoint(mp)
-
-
-def fstab_add(dev, mp, fs, options=None):
- """Adds the given device entry to the /etc/fstab file"""
- return Fstab.add(dev, mp, fs, options=options)
-
-
-def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
- """Mount a filesystem at a particular mountpoint"""
- cmd_args = ['mount']
- if options is not None:
- cmd_args.extend(['-o', options])
- cmd_args.extend([device, mountpoint])
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
- return False
-
- if persist:
- return fstab_add(device, mountpoint, filesystem, options=options)
- return True
-
-
-def umount(mountpoint, persist=False):
- """Unmount a filesystem"""
- cmd_args = ['umount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
-
- if persist:
- return fstab_remove(mountpoint)
- return True
-
-
-def mounts():
- """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
- with open('/proc/mounts') as f:
- # [['/mount/point','/dev/path'],[...]]
- system_mounts = [m[1::-1] for m in [l.strip().split()
- for l in f.readlines()]]
- return system_mounts
-
-
-def fstab_mount(mountpoint):
- """Mount filesystem using fstab"""
- cmd_args = ['mount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
- return True
-
-
-def file_hash(path, hash_type='md5'):
- """Generate a hash checksum of the contents of 'path' or None if not found.
-
- :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- """
- if os.path.exists(path):
- h = getattr(hashlib, hash_type)()
- with open(path, 'rb') as source:
- h.update(source.read())
- return h.hexdigest()
- else:
- return None
-
-
-def path_hash(path):
- """Generate a hash checksum of all files matching 'path'. Standard
- wildcards like '*' and '?' are supported, see documentation for the 'glob'
- module for more information.
-
- :return: dict: A { filename: hash } dictionary for all matched files.
- Empty if none found.
- """
- return {
- filename: file_hash(filename)
- for filename in glob.iglob(path)
- }
-
-
-def check_hash(path, checksum, hash_type='md5'):
- """Validate a file using a cryptographic checksum.
-
- :param str checksum: Value of the checksum used to validate the file.
- :param str hash_type: Hash algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- :raises ChecksumError: If the file fails the checksum
-
- """
- actual_checksum = file_hash(path, hash_type)
- if checksum != actual_checksum:
- raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
-
-
-class ChecksumError(ValueError):
- """A class derived from Value error to indicate the checksum failed."""
- pass
-
-
-def restart_on_change(restart_map, stopstart=False, restart_functions=None):
- """Restart services based on configuration files changing
-
- This function is used a decorator, for example::
-
- @restart_on_change({
- '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
- '/etc/apache/sites-enabled/*': [ 'apache2' ]
- })
- def config_changed():
- pass # your code here
-
- In this example, the cinder-api and cinder-volume services
- would be restarted if /etc/ceph/ceph.conf is changed by the
- ceph_client_changed function. The apache2 service would be
- restarted if any file matching the pattern got changed, created
- or removed. Standard wildcards are supported, see documentation
- for the 'glob' module for more information.
-
- @param restart_map: {path_file_name: [service_name, ...]
- @param stopstart: DEFAULT false; whether to stop, start OR restart
- @param restart_functions: nonstandard functions to use to restart services
- {svc: func, ...}
- @returns result from decorated function
- """
- def wrap(f):
- @functools.wraps(f)
- def wrapped_f(*args, **kwargs):
- return restart_on_change_helper(
- (lambda: f(*args, **kwargs)), restart_map, stopstart,
- restart_functions)
- return wrapped_f
- return wrap
-
-
-def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
- restart_functions=None):
- """Helper function to perform the restart_on_change function.
-
- This is provided for decorators to restart services if files described
- in the restart_map have changed after an invocation of lambda_f().
-
- @param lambda_f: function to call.
- @param restart_map: {file: [service, ...]}
- @param stopstart: whether to stop, start or restart a service
- @param restart_functions: nonstandard functions to use to restart services
- {svc: func, ...}
- @returns result of lambda_f()
- """
- if restart_functions is None:
- restart_functions = {}
- checksums = {path: path_hash(path) for path in restart_map}
- r = lambda_f()
- # create a list of lists of the services to restart
- restarts = [restart_map[path]
- for path in restart_map
- if path_hash(path) != checksums[path]]
- # create a flat list of ordered services without duplicates from lists
- services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
- if services_list:
- actions = ('stop', 'start') if stopstart else ('restart',)
- for service_name in services_list:
- if service_name in restart_functions:
- restart_functions[service_name](service_name)
- else:
- for action in actions:
- service(action, service_name)
- return r
-
-
-def pwgen(length=None):
- """Generate a random pasword."""
- if length is None:
- # A random length is ok to use a weak PRNG
- length = random.choice(range(35, 45))
- alphanumeric_chars = [
- l for l in (string.ascii_letters + string.digits)
- if l not in 'l0QD1vAEIOUaeiou']
- # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
- # actual password
- random_generator = random.SystemRandom()
- random_chars = [
- random_generator.choice(alphanumeric_chars) for _ in range(length)]
- return(''.join(random_chars))
-
-
-def is_phy_iface(interface):
- """Returns True if interface is not virtual, otherwise False."""
- if interface:
- sys_net = '/sys/class/net'
- if os.path.isdir(sys_net):
- for iface in glob.glob(os.path.join(sys_net, '*')):
- if '/virtual/' in os.path.realpath(iface):
- continue
-
- if interface == os.path.basename(iface):
- return True
-
- return False
-
-
-def get_bond_master(interface):
- """Returns bond master if interface is bond slave otherwise None.
-
- NOTE: the provided interface is expected to be physical
- """
- if interface:
- iface_path = '/sys/class/net/%s' % (interface)
- if os.path.exists(iface_path):
- if '/virtual/' in os.path.realpath(iface_path):
- return None
-
- master = os.path.join(iface_path, 'master')
- if os.path.exists(master):
- master = os.path.realpath(master)
- # make sure it is a bond master
- if os.path.exists(os.path.join(master, 'bonding')):
- return os.path.basename(master)
-
- return None
-
-
-def list_nics(nic_type=None):
- """Return a list of nics of given type(s)"""
- if isinstance(nic_type, six.string_types):
- int_types = [nic_type]
- else:
- int_types = nic_type
-
- interfaces = []
- if nic_type:
- for int_type in int_types:
- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- ip_output = ip_output.split('\n')
- ip_output = (line for line in ip_output if line)
- for line in ip_output:
- if line.split()[1].startswith(int_type):
- matched = re.search('.*: (' + int_type +
- r'[0-9]+\.[0-9]+)@.*', line)
- if matched:
- iface = matched.groups()[0]
- else:
- iface = line.split()[1].replace(":", "")
-
- if iface not in interfaces:
- interfaces.append(iface)
- else:
- cmd = ['ip', 'a']
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- ip_output = (line.strip() for line in ip_output if line)
-
- key = re.compile(r'^[0-9]+:\s+(.+):')
- for line in ip_output:
- matched = re.search(key, line)
- if matched:
- iface = matched.group(1)
- iface = iface.partition("@")[0]
- if iface not in interfaces:
- interfaces.append(iface)
-
- return interfaces
-
-
-def set_nic_mtu(nic, mtu):
- """Set the Maximum Transmission Unit (MTU) on a network interface."""
- cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
- subprocess.check_call(cmd)
-
-
-def get_nic_mtu(nic):
- """Return the Maximum Transmission Unit (MTU) for a network interface."""
- cmd = ['ip', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- mtu = ""
- for line in ip_output:
- words = line.split()
- if 'mtu' in words:
- mtu = words[words.index("mtu") + 1]
- return mtu
-
-
-def get_nic_hwaddr(nic):
- """Return the Media Access Control (MAC) for a network interface."""
- cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- hwaddr = ""
- words = ip_output.split()
- if 'link/ether' in words:
- hwaddr = words[words.index('link/ether') + 1]
- return hwaddr
-
-
-@contextmanager
-def chdir(directory):
- """Change the current working directory to a different directory for a code
- block and return the previous directory after the block exits. Useful to
- run commands from a specificed directory.
-
- :param str directory: The directory path to change to for this context.
- """
- cur = os.getcwd()
- try:
- yield os.chdir(directory)
- finally:
- os.chdir(cur)
-
-
-def chownr(path, owner, group, follow_links=True, chowntopdir=False):
- """Recursively change user and group ownership of files and directories
- in given path. Doesn't chown path itself by default, only its children.
-
- :param str path: The string path to start changing ownership.
- :param str owner: The owner string to use when looking up the uid.
- :param str group: The group string to use when looking up the gid.
- :param bool follow_links: Also follow and chown links if True
- :param bool chowntopdir: Also chown path itself if True
- """
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- if follow_links:
- chown = os.chown
- else:
- chown = os.lchown
-
- if chowntopdir:
- broken_symlink = os.path.lexists(path) and not os.path.exists(path)
- if not broken_symlink:
- chown(path, uid, gid)
- for root, dirs, files in os.walk(path, followlinks=follow_links):
- for name in dirs + files:
- full = os.path.join(root, name)
- broken_symlink = os.path.lexists(full) and not os.path.exists(full)
- if not broken_symlink:
- chown(full, uid, gid)
-
-
-def lchownr(path, owner, group):
- """Recursively change user and group ownership of files and directories
- in a given path, not following symbolic links. See the documentation for
- 'os.lchown' for more information.
-
- :param str path: The string path to start changing ownership.
- :param str owner: The owner string to use when looking up the uid.
- :param str group: The group string to use when looking up the gid.
- """
- chownr(path, owner, group, follow_links=False)
-
-
-def owner(path):
- """Returns a tuple containing the username & groupname owning the path.
-
- :param str path: the string path to retrieve the ownership
- :return tuple(str, str): A (username, groupname) tuple containing the
- name of the user and group owning the path.
- :raises OSError: if the specified path does not exist
- """
- stat = os.stat(path)
- username = pwd.getpwuid(stat.st_uid)[0]
- groupname = grp.getgrgid(stat.st_gid)[0]
- return username, groupname
-
-
-def get_total_ram():
- """The total amount of system RAM in bytes.
-
- This is what is reported by the OS, and may be overcommitted when
- there are multiple containers hosted on the same machine.
- """
- with open('/proc/meminfo', 'r') as f:
- for line in f.readlines():
- if line:
- key, value, unit = line.split()
- if key == 'MemTotal:':
- assert unit == 'kB', 'Unknown unit'
- return int(value) * 1024 # Classic, not KiB.
- raise NotImplementedError()
-
-
-UPSTART_CONTAINER_TYPE = '/run/container_type'
-
-
-def is_container():
- """Determine whether unit is running in a container
-
- @return: boolean indicating if unit is in a container
- """
- if init_is_systemd():
- # Detect using systemd-detect-virt
- return subprocess.call(['systemd-detect-virt',
- '--container']) == 0
- else:
- # Detect using upstart container file marker
- return os.path.exists(UPSTART_CONTAINER_TYPE)
-
-
-def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH):
- """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list.
-
- This method has no effect if the path specified by updatedb_path does not
- exist or is not a file.
-
- @param path: string the path to add to the updatedb.conf PRUNEPATHS value
- @param updatedb_path: the path the updatedb.conf file
- """
- if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path):
- # If the updatedb.conf file doesn't exist then don't attempt to update
- # the file as the package providing mlocate may not be installed on
- # the local system
- return
-
- with open(updatedb_path, 'r+') as f_id:
- updatedb_text = f_id.read()
- output = updatedb(updatedb_text, path)
- f_id.seek(0)
- f_id.write(output)
- f_id.truncate()
-
-
-def updatedb(updatedb_text, new_path):
- lines = [line for line in updatedb_text.split("\n")]
- for i, line in enumerate(lines):
- if line.startswith("PRUNEPATHS="):
- paths_line = line.split("=")[1].replace('"', '')
- paths = paths_line.split(" ")
- if new_path not in paths:
- paths.append(new_path)
- lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
- output = "\n".join(lines)
- return output
-
-
-def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
- """ Modulo distribution
-
- This helper uses the unit number, a modulo value and a constant wait time
- to produce a calculated wait time distribution. This is useful in large
- scale deployments to distribute load during an expensive operation such as
- service restarts.
-
- If you have 1000 nodes that need to restart 100 at a time 1 minute at a
- time:
-
- time.wait(modulo_distribution(modulo=100, wait=60))
- restart()
-
- If you need restarts to happen serially set modulo to the exact number of
- nodes and set a high constant wait time:
-
- time.wait(modulo_distribution(modulo=10, wait=120))
- restart()
-
- @param modulo: int The modulo number creates the group distribution
- @param wait: int The constant time wait value
- @param non_zero_wait: boolean Override unit % modulo == 0,
- return modulo * wait. Used to avoid collisions with
- leader nodes which are often given priority.
- @return: int Calculated time to wait for unit operation
- """
- unit_number = int(local_unit().split('/')[1])
- calculated_wait_time = (unit_number % modulo) * wait
- if non_zero_wait and calculated_wait_time == 0:
- return modulo * wait
- else:
- return calculated_wait_time
-
-
-def install_ca_cert(ca_cert, name=None):
- """
- Install the given cert as a trusted CA.
-
- The ``name`` is the stem of the filename where the cert is written, and if
- not provided, it will default to ``juju-{charm_name}``.
-
- If the cert is empty or None, or is unchanged, nothing is done.
- """
- if not ca_cert:
- return
- if not isinstance(ca_cert, bytes):
- ca_cert = ca_cert.encode('utf8')
- if not name:
- name = 'juju-{}'.format(charm_name())
- cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name)
- new_hash = hashlib.md5(ca_cert).hexdigest()
- if file_hash(cert_file) == new_hash:
- return
- log("Installing new CA cert at: {}".format(cert_file), level=INFO)
- write_file(cert_file, ca_cert)
- subprocess.check_call(['update-ca-certificates', '--fresh'])
diff --git a/hooks/charmhelpers/core/host_factory/__init__.py b/hooks/charmhelpers/core/host_factory/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/hooks/charmhelpers/core/host_factory/centos.py b/hooks/charmhelpers/core/host_factory/centos.py
deleted file mode 100644
index 7781a39..0000000
--- a/hooks/charmhelpers/core/host_factory/centos.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import subprocess
-import yum
-import os
-
-from charmhelpers.core.strutils import BasicStringComparator
-
-
-class CompareHostReleases(BasicStringComparator):
- """Provide comparisons of Host releases.
-
- Use in the form of
-
- if CompareHostReleases(release) > 'trusty':
- # do something with mitaka
- """
-
- def __init__(self, item):
- raise NotImplementedError(
- "CompareHostReleases() is not implemented for CentOS")
-
-
-def service_available(service_name):
- # """Determine whether a system service is available."""
- if os.path.isdir('/run/systemd/system'):
- cmd = ['systemctl', 'is-enabled', service_name]
- else:
- cmd = ['service', service_name, 'is-enabled']
- return subprocess.call(cmd) == 0
-
-
-def add_new_group(group_name, system_group=False, gid=None):
- cmd = ['groupadd']
- if gid:
- cmd.extend(['--gid', str(gid)])
- if system_group:
- cmd.append('-r')
- cmd.append(group_name)
- subprocess.check_call(cmd)
-
-
-def lsb_release():
- """Return /etc/os-release in a dict."""
- d = {}
- with open('/etc/os-release', 'r') as lsb:
- for l in lsb:
- s = l.split('=')
- if len(s) != 2:
- continue
- d[s[0].strip()] = s[1].strip()
- return d
-
-
-def cmp_pkgrevno(package, revno, pkgcache=None):
- """Compare supplied revno with the revno of the installed package.
-
- * 1 => Installed revno is greater than supplied arg
- * 0 => Installed revno is the same as supplied arg
- * -1 => Installed revno is less than supplied arg
-
- This function imports YumBase function if the pkgcache argument
- is None.
- """
- if not pkgcache:
- y = yum.YumBase()
- packages = y.doPackageLists()
- pkgcache = {i.Name: i.version for i in packages['installed']}
- pkg = pkgcache[package]
- if pkg > revno:
- return 1
- if pkg < revno:
- return -1
- return 0
diff --git a/hooks/charmhelpers/core/host_factory/ubuntu.py b/hooks/charmhelpers/core/host_factory/ubuntu.py
deleted file mode 100644
index 0ee2b66..0000000
--- a/hooks/charmhelpers/core/host_factory/ubuntu.py
+++ /dev/null
@@ -1,114 +0,0 @@
-import subprocess
-
-from charmhelpers.core.hookenv import cached
-from charmhelpers.core.strutils import BasicStringComparator
-
-
-UBUNTU_RELEASES = (
- 'lucid',
- 'maverick',
- 'natty',
- 'oneiric',
- 'precise',
- 'quantal',
- 'raring',
- 'saucy',
- 'trusty',
- 'utopic',
- 'vivid',
- 'wily',
- 'xenial',
- 'yakkety',
- 'zesty',
- 'artful',
- 'bionic',
- 'cosmic',
- 'disco',
-)
-
-
-class CompareHostReleases(BasicStringComparator):
- """Provide comparisons of Ubuntu releases.
-
- Use in the form of
-
- if CompareHostReleases(release) > 'trusty':
- # do something with mitaka
- """
- _list = UBUNTU_RELEASES
-
-
-def service_available(service_name):
- """Determine whether a system service is available"""
- try:
- subprocess.check_output(
- ['service', service_name, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError as e:
- return b'unrecognized service' not in e.output
- else:
- return True
-
-
-def add_new_group(group_name, system_group=False, gid=None):
- cmd = ['addgroup']
- if gid:
- cmd.extend(['--gid', str(gid)])
- if system_group:
- cmd.append('--system')
- else:
- cmd.extend([
- '--group',
- ])
- cmd.append(group_name)
- subprocess.check_call(cmd)
-
-
-def lsb_release():
- """Return /etc/lsb-release in a dict"""
- d = {}
- with open('/etc/lsb-release', 'r') as lsb:
- for l in lsb:
- k, v = l.split('=')
- d[k.strip()] = v.strip()
- return d
-
-
-def get_distrib_codename():
- """Return the codename of the distribution
- :returns: The codename
- :rtype: str
- """
- return lsb_release()['DISTRIB_CODENAME'].lower()
-
-
-def cmp_pkgrevno(package, revno, pkgcache=None):
- """Compare supplied revno with the revno of the installed package.
-
- * 1 => Installed revno is greater than supplied arg
- * 0 => Installed revno is the same as supplied arg
- * -1 => Installed revno is less than supplied arg
-
- This function imports apt_cache function from charmhelpers.fetch if
- the pkgcache argument is None. Be sure to add charmhelpers.fetch if
- you call this function, or pass an apt_pkg.Cache() instance.
- """
- import apt_pkg
- if not pkgcache:
- from charmhelpers.fetch import apt_cache
- pkgcache = apt_cache()
- pkg = pkgcache[package]
- return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
-
-
-@cached
-def arch():
- """Return the package architecture as a string.
-
- :returns: the architecture
- :rtype: str
- :raises: subprocess.CalledProcessError if dpkg command fails
- """
- return subprocess.check_output(
- ['dpkg', '--print-architecture']
- ).rstrip().decode('UTF-8')
diff --git a/hooks/charmhelpers/core/hugepage.py b/hooks/charmhelpers/core/hugepage.py
deleted file mode 100644
index 54b5b5e..0000000
--- a/hooks/charmhelpers/core/hugepage.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import yaml
-from charmhelpers.core import fstab
-from charmhelpers.core import sysctl
-from charmhelpers.core.host import (
- add_group,
- add_user_to_group,
- fstab_mount,
- mkdir,
-)
-from charmhelpers.core.strutils import bytes_from_string
-from subprocess import check_output
-
-
-def hugepage_support(user, group='hugetlb', nr_hugepages=256,
- max_map_count=65536, mnt_point='/run/hugepages/kvm',
- pagesize='2MB', mount=True, set_shmmax=False):
- """Enable hugepages on system.
-
- Args:
- user (str) -- Username to allow access to hugepages to
- group (str) -- Group name to own hugepages
- nr_hugepages (int) -- Number of pages to reserve
- max_map_count (int) -- Number of Virtual Memory Areas a process can own
- mnt_point (str) -- Directory to mount hugepages on
- pagesize (str) -- Size of hugepages
- mount (bool) -- Whether to Mount hugepages
- """
- group_info = add_group(group)
- gid = group_info.gr_gid
- add_user_to_group(user, group)
- if max_map_count < 2 * nr_hugepages:
- max_map_count = 2 * nr_hugepages
- sysctl_settings = {
- 'vm.nr_hugepages': nr_hugepages,
- 'vm.max_map_count': max_map_count,
- 'vm.hugetlb_shm_group': gid,
- }
- if set_shmmax:
- shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
- shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
- if shmmax_minsize > shmmax_current:
- sysctl_settings['kernel.shmmax'] = shmmax_minsize
- sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
- mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
- lfstab = fstab.Fstab()
- fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
- if fstab_entry:
- lfstab.remove_entry(fstab_entry)
- entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
- lfstab.add_entry(entry)
- if mount:
- fstab_mount(mnt_point)
diff --git a/hooks/charmhelpers/core/kernel.py b/hooks/charmhelpers/core/kernel.py
deleted file mode 100644
index e01f4f8..0000000
--- a/hooks/charmhelpers/core/kernel.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import re
-import subprocess
-
-from charmhelpers.osplatform import get_platform
-from charmhelpers.core.hookenv import (
- log,
- INFO
-)
-
-__platform__ = get_platform()
-if __platform__ == "ubuntu":
- from charmhelpers.core.kernel_factory.ubuntu import ( # NOQA:F401
- persistent_modprobe,
- update_initramfs,
- ) # flake8: noqa -- ignore F401 for this import
-elif __platform__ == "centos":
- from charmhelpers.core.kernel_factory.centos import ( # NOQA:F401
- persistent_modprobe,
- update_initramfs,
- ) # flake8: noqa -- ignore F401 for this import
-
-__author__ = "Jorge Niedbalski "
-
-
-def modprobe(module, persist=True):
- """Load a kernel module and configure for auto-load on reboot."""
- cmd = ['modprobe', module]
-
- log('Loading kernel module %s' % module, level=INFO)
-
- subprocess.check_call(cmd)
- if persist:
- persistent_modprobe(module)
-
-
-def rmmod(module, force=False):
- """Remove a module from the linux kernel"""
- cmd = ['rmmod']
- if force:
- cmd.append('-f')
- cmd.append(module)
- log('Removing kernel module %s' % module, level=INFO)
- return subprocess.check_call(cmd)
-
-
-def lsmod():
- """Shows what kernel modules are currently loaded"""
- return subprocess.check_output(['lsmod'],
- universal_newlines=True)
-
-
-def is_module_loaded(module):
- """Checks if a kernel module is already loaded"""
- matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
- return len(matches) > 0
diff --git a/hooks/charmhelpers/core/kernel_factory/__init__.py b/hooks/charmhelpers/core/kernel_factory/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/hooks/charmhelpers/core/kernel_factory/centos.py b/hooks/charmhelpers/core/kernel_factory/centos.py
deleted file mode 100644
index 1c402c1..0000000
--- a/hooks/charmhelpers/core/kernel_factory/centos.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import subprocess
-import os
-
-
-def persistent_modprobe(module):
- """Load a kernel module and configure for auto-load on reboot."""
- if not os.path.exists('/etc/rc.modules'):
- open('/etc/rc.modules', 'a')
- os.chmod('/etc/rc.modules', 111)
- with open('/etc/rc.modules', 'r+') as modules:
- if module not in modules.read():
- modules.write('modprobe %s\n' % module)
-
-
-def update_initramfs(version='all'):
- """Updates an initramfs image."""
- return subprocess.check_call(["dracut", "-f", version])
diff --git a/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/hooks/charmhelpers/core/kernel_factory/ubuntu.py
deleted file mode 100644
index 3de372f..0000000
--- a/hooks/charmhelpers/core/kernel_factory/ubuntu.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import subprocess
-
-
-def persistent_modprobe(module):
- """Load a kernel module and configure for auto-load on reboot."""
- with open('/etc/modules', 'r+') as modules:
- if module not in modules.read():
- modules.write(module + "\n")
-
-
-def update_initramfs(version='all'):
- """Updates an initramfs image."""
- return subprocess.check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/hooks/charmhelpers/core/services/__init__.py b/hooks/charmhelpers/core/services/__init__.py
deleted file mode 100644
index 61fd074..0000000
--- a/hooks/charmhelpers/core/services/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from .base import * # NOQA
-from .helpers import * # NOQA
diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py
deleted file mode 100644
index 179ad4f..0000000
--- a/hooks/charmhelpers/core/services/base.py
+++ /dev/null
@@ -1,362 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import json
-from inspect import getargspec
-from collections import Iterable, OrderedDict
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-__all__ = ['ServiceManager', 'ManagerCallback',
- 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
- 'service_restart', 'service_stop']
-
-
-class ServiceManager(object):
- def __init__(self, services=None):
- """
- Register a list of services, given their definitions.
-
- Service definitions are dicts in the following formats (all keys except
- 'service' are optional)::
-
- {
- "service": ,
- "required_data": ,
- "provided_data": ,
- "data_ready": ,
- "data_lost": ,
- "start": ,
- "stop": ,
- "ports": ,
- }
-
- The 'required_data' list should contain dicts of required data (or
- dependency managers that act like dicts and know how to collect the data).
- Only when all items in the 'required_data' list are populated are the list
- of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
- information.
-
- The 'provided_data' list should contain relation data providers, most likely
- a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
- that will indicate a set of data to set on a given relation.
-
- The 'data_ready' value should be either a single callback, or a list of
- callbacks, to be called when all items in 'required_data' pass `is_ready()`.
- Each callback will be called with the service name as the only parameter.
- After all of the 'data_ready' callbacks are called, the 'start' callbacks
- are fired.
-
- The 'data_lost' value should be either a single callback, or a list of
- callbacks, to be called when a 'required_data' item no longer passes
- `is_ready()`. Each callback will be called with the service name as the
- only parameter. After all of the 'data_lost' callbacks are called,
- the 'stop' callbacks are fired.
-
- The 'start' value should be either a single callback, or a list of
- callbacks, to be called when starting the service, after the 'data_ready'
- callbacks are complete. Each callback will be called with the service
- name as the only parameter. This defaults to
- `[host.service_start, services.open_ports]`.
-
- The 'stop' value should be either a single callback, or a list of
- callbacks, to be called when stopping the service. If the service is
- being stopped because it no longer has all of its 'required_data', this
- will be called after all of the 'data_lost' callbacks are complete.
- Each callback will be called with the service name as the only parameter.
- This defaults to `[services.close_ports, host.service_stop]`.
-
- The 'ports' value should be a list of ports to manage. The default
- 'start' handler will open the ports after the service is started,
- and the default 'stop' handler will close the ports prior to stopping
- the service.
-
-
- Examples:
-
- The following registers an Upstart service called bingod that depends on
- a mongodb relation and which runs a custom `db_migrate` function prior to
- restarting the service, and a Runit service called spadesd::
-
- manager = services.ServiceManager([
- {
- 'service': 'bingod',
- 'ports': [80, 443],
- 'required_data': [MongoRelation(), config(), {'my': 'data'}],
- 'data_ready': [
- services.template(source='bingod.conf'),
- services.template(source='bingod.ini',
- target='/etc/bingod.ini',
- owner='bingo', perms=0400),
- ],
- },
- {
- 'service': 'spadesd',
- 'data_ready': services.template(source='spadesd_run.j2',
- target='/etc/sv/spadesd/run',
- perms=0555),
- 'start': runit_start,
- 'stop': runit_stop,
- },
- ])
- manager.manage()
- """
- self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
- self._ready = None
- self.services = OrderedDict()
- for service in services or []:
- service_name = service['service']
- self.services[service_name] = service
-
- def manage(self):
- """
- Handle the current hook by doing The Right Thing with the registered services.
- """
- hookenv._run_atstart()
- try:
- hook_name = hookenv.hook_name()
- if hook_name == 'stop':
- self.stop_services()
- else:
- self.reconfigure_services()
- self.provide_data()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- hookenv._run_atexit()
- hookenv._run_atexit()
-
- def provide_data(self):
- """
- Set the relation data for each provider in the ``provided_data`` list.
-
- A provider must have a `name` attribute, which indicates which relation
- to set data on, and a `provide_data()` method, which returns a dict of
- data to set.
-
- The `provide_data()` method can optionally accept two parameters:
-
- * ``remote_service`` The name of the remote service that the data will
- be provided to. The `provide_data()` method will be called once
- for each connected service (not unit). This allows the method to
- tailor its data to the given service.
- * ``service_ready`` Whether or not the service definition had all of
- its requirements met, and thus the ``data_ready`` callbacks run.
-
- Note that the ``provided_data`` methods are now called **after** the
- ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
- a chance to generate any data necessary for the providing to the remote
- services.
- """
- for service_name, service in self.services.items():
- service_ready = self.is_ready(service_name)
- for provider in service.get('provided_data', []):
- for relid in hookenv.relation_ids(provider.name):
- units = hookenv.related_units(relid)
- if not units:
- continue
- remote_service = units[0].split('/')[0]
- argspec = getargspec(provider.provide_data)
- if len(argspec.args) > 1:
- data = provider.provide_data(remote_service, service_ready)
- else:
- data = provider.provide_data()
- if data:
- hookenv.relation_set(relid, data)
-
- def reconfigure_services(self, *service_names):
- """
- Update all files for one or more registered services, and,
- if ready, optionally restart them.
-
- If no service names are given, reconfigures all registered services.
- """
- for service_name in service_names or self.services.keys():
- if self.is_ready(service_name):
- self.fire_event('data_ready', service_name)
- self.fire_event('start', service_name, default=[
- service_restart,
- manage_ports])
- self.save_ready(service_name)
- else:
- if self.was_ready(service_name):
- self.fire_event('data_lost', service_name)
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
- self.save_lost(service_name)
-
- def stop_services(self, *service_names):
- """
- Stop one or more registered services, by name.
-
- If no service names are given, stops all registered services.
- """
- for service_name in service_names or self.services.keys():
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
-
- def get_service(self, service_name):
- """
- Given the name of a registered service, return its service definition.
- """
- service = self.services.get(service_name)
- if not service:
- raise KeyError('Service not registered: %s' % service_name)
- return service
-
- def fire_event(self, event_name, service_name, default=None):
- """
- Fire a data_ready, data_lost, start, or stop event on a given service.
- """
- service = self.get_service(service_name)
- callbacks = service.get(event_name, default)
- if not callbacks:
- return
- if not isinstance(callbacks, Iterable):
- callbacks = [callbacks]
- for callback in callbacks:
- if isinstance(callback, ManagerCallback):
- callback(self, service_name, event_name)
- else:
- callback(service_name)
-
- def is_ready(self, service_name):
- """
- Determine if a registered service is ready, by checking its 'required_data'.
-
- A 'required_data' item can be any mapping type, and is considered ready
- if `bool(item)` evaluates as True.
- """
- service = self.get_service(service_name)
- reqs = service.get('required_data', [])
- return all(bool(req) for req in reqs)
-
- def _load_ready_file(self):
- if self._ready is not None:
- return
- if os.path.exists(self._ready_file):
- with open(self._ready_file) as fp:
- self._ready = set(json.load(fp))
- else:
- self._ready = set()
-
- def _save_ready_file(self):
- if self._ready is None:
- return
- with open(self._ready_file, 'w') as fp:
- json.dump(list(self._ready), fp)
-
- def save_ready(self, service_name):
- """
- Save an indicator that the given service is now data_ready.
- """
- self._load_ready_file()
- self._ready.add(service_name)
- self._save_ready_file()
-
- def save_lost(self, service_name):
- """
- Save an indicator that the given service is no longer data_ready.
- """
- self._load_ready_file()
- self._ready.discard(service_name)
- self._save_ready_file()
-
- def was_ready(self, service_name):
- """
- Determine if the given service was previously data_ready.
- """
- self._load_ready_file()
- return service_name in self._ready
-
-
-class ManagerCallback(object):
- """
- Special case of a callback that takes the `ServiceManager` instance
- in addition to the service name.
-
- Subclasses should implement `__call__` which should accept three parameters:
-
- * `manager` The `ServiceManager` instance
- * `service_name` The name of the service it's being triggered for
- * `event_name` The name of the event that this callback is handling
- """
- def __call__(self, manager, service_name, event_name):
- raise NotImplementedError()
-
-
-class PortManagerCallback(ManagerCallback):
- """
- Callback class that will open or close ports, for use as either
- a start or stop action.
- """
- def __call__(self, manager, service_name, event_name):
- service = manager.get_service(service_name)
- # turn this generator into a list,
- # as we'll be going over it multiple times
- new_ports = list(service.get('ports', []))
- port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
- if os.path.exists(port_file):
- with open(port_file) as fp:
- old_ports = fp.read().split(',')
- for old_port in old_ports:
- if bool(old_port) and not self.ports_contains(old_port, new_ports):
- hookenv.close_port(old_port)
- with open(port_file, 'w') as fp:
- fp.write(','.join(str(port) for port in new_ports))
- for port in new_ports:
- # A port is either a number or 'ICMP'
- protocol = 'TCP'
- if str(port).upper() == 'ICMP':
- protocol = 'ICMP'
- if event_name == 'start':
- hookenv.open_port(port, protocol)
- elif event_name == 'stop':
- hookenv.close_port(port, protocol)
-
- def ports_contains(self, port, ports):
- if not bool(port):
- return False
- if str(port).upper() != 'ICMP':
- port = int(port)
- return port in ports
-
-
-def service_stop(service_name):
- """
- Wrapper around host.service_stop to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_running(service_name):
- host.service_stop(service_name)
-
-
-def service_restart(service_name):
- """
- Wrapper around host.service_restart to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_available(service_name):
- if host.service_running(service_name):
- host.service_restart(service_name)
- else:
- host.service_start(service_name)
-
-
-# Convenience aliases
-open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py
deleted file mode 100644
index 3e6e30d..0000000
--- a/hooks/charmhelpers/core/services/helpers.py
+++ /dev/null
@@ -1,290 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import yaml
-
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-from charmhelpers.core import templating
-
-from charmhelpers.core.services.base import ManagerCallback
-
-
-__all__ = ['RelationContext', 'TemplateCallback',
- 'render_template', 'template']
-
-
-class RelationContext(dict):
- """
- Base class for a context generator that gets relation data from juju.
-
- Subclasses must provide the attributes `name`, which is the name of the
- interface of interest, `interface`, which is the type of the interface of
- interest, and `required_keys`, which is the set of keys required for the
- relation to be considered complete. The data for all interfaces matching
- the `name` attribute that are complete will used to populate the dictionary
- values (see `get_data`, below).
-
- The generated context will be namespaced under the relation :attr:`name`,
- to prevent potential naming conflicts.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = None
- interface = None
-
- def __init__(self, name=None, additional_required_keys=None):
- if not hasattr(self, 'required_keys'):
- self.required_keys = []
-
- if name is not None:
- self.name = name
- if additional_required_keys:
- self.required_keys.extend(additional_required_keys)
- self.get_data()
-
- def __bool__(self):
- """
- Returns True if all of the required_keys are available.
- """
- return self.is_ready()
-
- __nonzero__ = __bool__
-
- def __repr__(self):
- return super(RelationContext, self).__repr__()
-
- def is_ready(self):
- """
- Returns True if all of the `required_keys` are available from any units.
- """
- ready = len(self.get(self.name, [])) > 0
- if not ready:
- hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
- return ready
-
- def _is_ready(self, unit_data):
- """
- Helper method that tests a set of relation data and returns True if
- all of the `required_keys` are present.
- """
- return set(unit_data.keys()).issuperset(set(self.required_keys))
-
- def get_data(self):
- """
- Retrieve the relation data for each unit involved in a relation and,
- if complete, store it in a list under `self[self.name]`. This
- is automatically called when the RelationContext is instantiated.
-
- The units are sorted lexographically first by the service ID, then by
- the unit ID. Thus, if an interface has two other services, 'db:1'
- and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
- and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
- set of data, the relation data for the units will be stored in the
- order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
-
- If you only care about a single unit on the relation, you can just
- access it as `{{ interface[0]['key'] }}`. However, if you can at all
- support multiple units on a relation, you should iterate over the list,
- like::
-
- {% for unit in interface -%}
- {{ unit['key'] }}{% if not loop.last %},{% endif %}
- {%- endfor %}
-
- Note that since all sets of relation data from all related services and
- units are in a single list, if you need to know which service or unit a
- set of data came from, you'll need to extend this class to preserve
- that information.
- """
- if not hookenv.relation_ids(self.name):
- return
-
- ns = self.setdefault(self.name, [])
- for rid in sorted(hookenv.relation_ids(self.name)):
- for unit in sorted(hookenv.related_units(rid)):
- reldata = hookenv.relation_get(rid=rid, unit=unit)
- if self._is_ready(reldata):
- ns.append(reldata)
-
- def provide_data(self):
- """
- Return data to be relation_set for this interface.
- """
- return {}
-
-
-class MysqlRelation(RelationContext):
- """
- Relation context for the `mysql` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'db'
- interface = 'mysql'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'user', 'password', 'database']
- RelationContext.__init__(self, *args, **kwargs)
-
-
-class HttpRelation(RelationContext):
- """
- Relation context for the `http` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'website'
- interface = 'http'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'port']
- RelationContext.__init__(self, *args, **kwargs)
-
- def provide_data(self):
- return {
- 'host': hookenv.unit_get('private-address'),
- 'port': 80,
- }
-
-
-class RequiredConfig(dict):
- """
- Data context that loads config options with one or more mandatory options.
-
- Once the required options have been changed from their default values, all
- config options will be available, namespaced under `config` to prevent
- potential naming conflicts (for example, between a config option and a
- relation property).
-
- :param list *args: List of options that must be changed from their default values.
- """
-
- def __init__(self, *args):
- self.required_options = args
- self['config'] = hookenv.config()
- with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
- self.config = yaml.load(fp).get('options', {})
-
- def __bool__(self):
- for option in self.required_options:
- if option not in self['config']:
- return False
- current_value = self['config'][option]
- default_value = self.config[option].get('default')
- if current_value == default_value:
- return False
- if current_value in (None, '') and default_value in (None, ''):
- return False
- return True
-
- def __nonzero__(self):
- return self.__bool__()
-
-
-class StoredContext(dict):
- """
- A data context that always returns the data that it was first created with.
-
- This is useful to do a one-time generation of things like passwords, that
- will thereafter use the same value that was originally generated, instead
- of generating a new value each time it is run.
- """
- def __init__(self, file_name, config_data):
- """
- If the file exists, populate `self` with the data from the file.
- Otherwise, populate with the given data and persist it to the file.
- """
- if os.path.exists(file_name):
- self.update(self.read_context(file_name))
- else:
- self.store_context(file_name, config_data)
- self.update(config_data)
-
- def store_context(self, file_name, config_data):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'w') as file_stream:
- os.fchmod(file_stream.fileno(), 0o600)
- yaml.dump(config_data, file_stream)
-
- def read_context(self, file_name):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'r') as file_stream:
- data = yaml.load(file_stream)
- if not data:
- raise OSError("%s is empty" % file_name)
- return data
-
-
-class TemplateCallback(ManagerCallback):
- """
- Callback class that will render a Jinja2 template, for use as a ready
- action.
-
- :param str source: The template source file, relative to
- `$CHARM_DIR/templates`
-
- :param str target: The target to write the rendered template to (or None)
- :param str owner: The owner of the rendered file
- :param str group: The group of the rendered file
- :param int perms: The permissions of the rendered file
- :param partial on_change_action: functools partial to be executed when
- rendered file changes
- :param jinja2 loader template_loader: A jinja2 template loader
-
- :return str: The rendered template
- """
- def __init__(self, source, target,
- owner='root', group='root', perms=0o444,
- on_change_action=None, template_loader=None):
- self.source = source
- self.target = target
- self.owner = owner
- self.group = group
- self.perms = perms
- self.on_change_action = on_change_action
- self.template_loader = template_loader
-
- def __call__(self, manager, service_name, event_name):
- pre_checksum = ''
- if self.on_change_action and os.path.isfile(self.target):
- pre_checksum = host.file_hash(self.target)
- service = manager.get_service(service_name)
- context = {'ctx': {}}
- for ctx in service.get('required_data', []):
- context.update(ctx)
- context['ctx'].update(ctx)
-
- result = templating.render(self.source, self.target, context,
- self.owner, self.group, self.perms,
- template_loader=self.template_loader)
- if self.on_change_action:
- if pre_checksum == host.file_hash(self.target):
- hookenv.log(
- 'No change detected: {}'.format(self.target),
- hookenv.DEBUG)
- else:
- self.on_change_action()
-
- return result
-
-
-# Convenience aliases for templates
-render_template = template = TemplateCallback
diff --git a/hooks/charmhelpers/core/strutils.py b/hooks/charmhelpers/core/strutils.py
deleted file mode 100644
index e8df045..0000000
--- a/hooks/charmhelpers/core/strutils.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import six
-import re
-
-
-def bool_from_string(value):
- """Interpret string value as boolean.
-
- Returns True if value translates to True otherwise False.
- """
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
- value = value.strip().lower()
-
- if value in ['y', 'yes', 'true', 't', 'on']:
- return True
- elif value in ['n', 'no', 'false', 'f', 'off']:
- return False
-
- msg = "Unable to interpret string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
-
-def bytes_from_string(value):
- """Interpret human readable string value as bytes.
-
- Returns int
- """
- BYTE_POWER = {
- 'K': 1,
- 'KB': 1,
- 'M': 2,
- 'MB': 2,
- 'G': 3,
- 'GB': 3,
- 'T': 4,
- 'TB': 4,
- 'P': 5,
- 'PB': 5,
- }
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as bytes" % (value)
- raise ValueError(msg)
- matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if matches:
- size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
- else:
- # Assume that value passed in is bytes
- try:
- size = int(value)
- except ValueError:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return size
-
-
-class BasicStringComparator(object):
- """Provides a class that will compare strings from an iterator type object.
- Used to provide > and < comparisons on strings that may not necessarily be
- alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the
- z-wrap.
- """
-
- _list = None
-
- def __init__(self, item):
- if self._list is None:
- raise Exception("Must define the _list in the class definition!")
- try:
- self.index = self._list.index(item)
- except Exception:
- raise KeyError("Item '{}' is not in list '{}'"
- .format(item, self._list))
-
- def __eq__(self, other):
- assert isinstance(other, str) or isinstance(other, self.__class__)
- return self.index == self._list.index(other)
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __lt__(self, other):
- assert isinstance(other, str) or isinstance(other, self.__class__)
- return self.index < self._list.index(other)
-
- def __ge__(self, other):
- return not self.__lt__(other)
-
- def __gt__(self, other):
- assert isinstance(other, str) or isinstance(other, self.__class__)
- return self.index > self._list.index(other)
-
- def __le__(self, other):
- return not self.__gt__(other)
-
- def __str__(self):
- """Always give back the item at the index so it can be used in
- comparisons like:
-
- s_mitaka = CompareOpenStack('mitaka')
- s_newton = CompareOpenstack('newton')
-
- assert s_newton > s_mitaka
-
- @returns:
- """
- return self._list[self.index]
diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py
deleted file mode 100644
index f1f4a28..0000000
--- a/hooks/charmhelpers/core/sysctl.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import yaml
-
-from subprocess import check_call
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
-)
-
-__author__ = 'Jorge Niedbalski R. '
-
-
-def create(sysctl_dict, sysctl_file, ignore=False):
- """Creates a sysctl.conf file from a YAML associative array
-
- :param sysctl_dict: a dict or YAML-formatted string of sysctl
- options eg "{ 'kernel.max_pid': 1337 }"
- :type sysctl_dict: str
- :param sysctl_file: path to the sysctl file to be saved
- :type sysctl_file: str or unicode
- :param ignore: If True, ignore "unknown variable" errors.
- :type ignore: bool
- :returns: None
- """
- if type(sysctl_dict) is not dict:
- try:
- sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
- except yaml.YAMLError:
- log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
- level=ERROR)
- return
- else:
- sysctl_dict_parsed = sysctl_dict
-
- with open(sysctl_file, "w") as fd:
- for key, value in sysctl_dict_parsed.items():
- fd.write("{}={}\n".format(key, value))
-
- log("Updating sysctl_file: {} values: {}".format(sysctl_file,
- sysctl_dict_parsed),
- level=DEBUG)
-
- call = ["sysctl", "-p", sysctl_file]
- if ignore:
- call.append("-e")
-
- check_call(call)
diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py
deleted file mode 100644
index 9014015..0000000
--- a/hooks/charmhelpers/core/templating.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import sys
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8',
- template_loader=None, config_template=None):
- """
- Render a template.
-
- The `source` path, if not absolute, is relative to the `templates_dir`.
-
- The `target` path should be absolute. It can also be `None`, in which
- case no file will be written.
-
- The context should be a dict containing the values to be replaced in the
- template.
-
- config_template may be provided to render from a provided template instead
- of loading from a file.
-
- The `owner`, `group`, and `perms` options will be passed to `write_file`.
-
- If omitted, `templates_dir` defaults to the `templates` folder in the charm.
-
- The rendered template will be written to the file as well as being returned
- as a string.
-
- Note: Using this requires python-jinja2 or python3-jinja2; if it is not
- installed, calling this will attempt to use charmhelpers.fetch.apt_install
- to install it.
- """
- try:
- from jinja2 import FileSystemLoader, Environment, exceptions
- except ImportError:
- try:
- from charmhelpers.fetch import apt_install
- except ImportError:
- hookenv.log('Could not import jinja2, and could not import '
- 'charmhelpers.fetch to install it',
- level=hookenv.ERROR)
- raise
- if sys.version_info.major == 2:
- apt_install('python-jinja2', fatal=True)
- else:
- apt_install('python3-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, Environment, exceptions
-
- if template_loader:
- template_env = Environment(loader=template_loader)
- else:
- if templates_dir is None:
- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
- template_env = Environment(loader=FileSystemLoader(templates_dir))
-
- # load from a string if provided explicitly
- if config_template is not None:
- template = template_env.from_string(config_template)
- else:
- try:
- source = source
- template = template_env.get_template(source)
- except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
- raise e
- content = template.render(context)
- if target is not None:
- target_dir = os.path.dirname(target)
- if not os.path.exists(target_dir):
- # This is a terrible default directory permission, as the file
- # or its siblings will often contain secrets.
- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
- host.write_file(target, content.encode(encoding), owner, group, perms)
- return content
diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py
deleted file mode 100644
index ab55432..0000000
--- a/hooks/charmhelpers/core/unitdata.py
+++ /dev/null
@@ -1,525 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# Authors:
-# Kapil Thangavelu
-#
-"""
-Intro
------
-
-A simple way to store state in units. This provides a key value
-storage with support for versioned, transactional operation,
-and can calculate deltas from previous values to simplify unit logic
-when processing changes.
-
-
-Hook Integration
-----------------
-
-There are several extant frameworks for hook execution, including
-
- - charmhelpers.core.hookenv.Hooks
- - charmhelpers.core.services.ServiceManager
-
-The storage classes are framework agnostic, one simple integration is
-via the HookData contextmanager. It will record the current hook
-execution environment (including relation data, config data, etc.),
-setup a transaction and allow easy access to the changes from
-previously seen values. One consequence of the integration is the
-reservation of particular keys ('rels', 'unit', 'env', 'config',
-'charm_revisions') for their respective values.
-
-Here's a fully worked integration example using hookenv.Hooks::
-
- from charmhelper.core import hookenv, unitdata
-
- hook_data = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # Print all changes to configuration from previously seen
- # values.
- for changed, (prev, cur) in hook_data.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- # Directly access all charm config as a mapping.
- conf = db.getrange('config', True)
-
- # Directly access all relation data as a mapping
- rels = db.getrange('rels', True)
-
- if __name__ == '__main__':
- with hook_data():
- hook.execute()
-
-
-A more basic integration is via the hook_scope context manager which simply
-manages transaction scope (and records hook name, and timestamp)::
-
- >>> from unitdata import kv
- >>> db = kv()
- >>> with db.hook_scope('install'):
- ... # do work, in transactional scope.
- ... db.set('x', 1)
- >>> db.get('x')
- 1
-
-
-Usage
------
-
-Values are automatically json de/serialized to preserve basic typing
-and complex data struct capabilities (dicts, lists, ints, booleans, etc).
-
-Individual values can be manipulated via get/set::
-
- >>> kv.set('y', True)
- >>> kv.get('y')
- True
-
- # We can set complex values (dicts, lists) as a single key.
- >>> kv.set('config', {'a': 1, 'b': True'})
-
- # Also supports returning dictionaries as a record which
- # provides attribute access.
- >>> config = kv.get('config', record=True)
- >>> config.b
- True
-
-
-Groups of keys can be manipulated with update/getrange::
-
- >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
- >>> kv.getrange('gui.', strip=True)
- {'z': 1, 'y': 2}
-
-When updating values, its very helpful to understand which values
-have actually changed and how have they changed. The storage
-provides a delta method to provide for this::
-
- >>> data = {'debug': True, 'option': 2}
- >>> delta = kv.delta(data, 'config.')
- >>> delta.debug.previous
- None
- >>> delta.debug.current
- True
- >>> delta
- {'debug': (None, True), 'option': (None, 2)}
-
-Note the delta method does not persist the actual change, it needs to
-be explicitly saved via 'update' method::
-
- >>> kv.update(data, 'config.')
-
-Values modified in the context of a hook scope retain historical values
-associated to the hookname.
-
- >>> with db.hook_scope('config-changed'):
- ... db.set('x', 42)
- >>> db.gethistory('x')
- [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
- (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
-
-"""
-
-import collections
-import contextlib
-import datetime
-import itertools
-import json
-import os
-import pprint
-import sqlite3
-import sys
-
-__author__ = 'Kapil Thangavelu '
-
-
-class Storage(object):
- """Simple key value database for local unit state within charms.
-
- Modifications are not persisted unless :meth:`flush` is called.
-
- To support dicts, lists, integer, floats, and booleans values
- are automatically json encoded/decoded.
-
- Note: to facilitate unit testing, ':memory:' can be passed as the
- path parameter which causes sqlite3 to only build the db in memory.
- This should only be used for testing purposes.
- """
- def __init__(self, path=None):
- self.db_path = path
- if path is None:
- if 'UNIT_STATE_DB' in os.environ:
- self.db_path = os.environ['UNIT_STATE_DB']
- else:
- self.db_path = os.path.join(
- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
- if self.db_path != ':memory:':
- with open(self.db_path, 'a') as f:
- os.fchmod(f.fileno(), 0o600)
- self.conn = sqlite3.connect('%s' % self.db_path)
- self.cursor = self.conn.cursor()
- self.revision = None
- self._closed = False
- self._init()
-
- def close(self):
- if self._closed:
- return
- self.flush(False)
- self.cursor.close()
- self.conn.close()
- self._closed = True
-
- def get(self, key, default=None, record=False):
- self.cursor.execute('select data from kv where key=?', [key])
- result = self.cursor.fetchone()
- if not result:
- return default
- if record:
- return Record(json.loads(result[0]))
- return json.loads(result[0])
-
- def getrange(self, key_prefix, strip=False):
- """
- Get a range of keys starting with a common prefix as a mapping of
- keys to values.
-
- :param str key_prefix: Common prefix among all keys
- :param bool strip: Optionally strip the common prefix from the key
- names in the returned dict
- :return dict: A (possibly empty) dict of key-value mappings
- """
- self.cursor.execute("select key, data from kv where key like ?",
- ['%s%%' % key_prefix])
- result = self.cursor.fetchall()
-
- if not result:
- return {}
- if not strip:
- key_prefix = ''
- return dict([
- (k[len(key_prefix):], json.loads(v)) for k, v in result])
-
- def update(self, mapping, prefix=""):
- """
- Set the values of multiple keys at once.
-
- :param dict mapping: Mapping of keys to values
- :param str prefix: Optional prefix to apply to all keys in `mapping`
- before setting
- """
- for k, v in mapping.items():
- self.set("%s%s" % (prefix, k), v)
-
- def unset(self, key):
- """
- Remove a key from the database entirely.
- """
- self.cursor.execute('delete from kv where key=?', [key])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- [key, self.revision, json.dumps('DELETED')])
-
- def unsetrange(self, keys=None, prefix=""):
- """
- Remove a range of keys starting with a common prefix, from the database
- entirely.
-
- :param list keys: List of keys to remove.
- :param str prefix: Optional prefix to apply to all keys in ``keys``
- before removing.
- """
- if keys is not None:
- keys = ['%s%s' % (prefix, key) for key in keys]
- self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
- list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
- else:
- self.cursor.execute('delete from kv where key like ?',
- ['%s%%' % prefix])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
-
- def set(self, key, value):
- """
- Set a value in the database.
-
- :param str key: Key to set the value for
- :param value: Any JSON-serializable value to be set
- """
- serialized = json.dumps(value)
-
- self.cursor.execute('select data from kv where key=?', [key])
- exists = self.cursor.fetchone()
-
- # Skip mutations to the same value
- if exists:
- if exists[0] == serialized:
- return value
-
- if not exists:
- self.cursor.execute(
- 'insert into kv (key, data) values (?, ?)',
- (key, serialized))
- else:
- self.cursor.execute('''
- update kv
- set data = ?
- where key = ?''', [serialized, key])
-
- # Save
- if not self.revision:
- return value
-
- self.cursor.execute(
- 'select 1 from kv_revisions where key=? and revision=?',
- [key, self.revision])
- exists = self.cursor.fetchone()
-
- if not exists:
- self.cursor.execute(
- '''insert into kv_revisions (
- revision, key, data) values (?, ?, ?)''',
- (self.revision, key, serialized))
- else:
- self.cursor.execute(
- '''
- update kv_revisions
- set data = ?
- where key = ?
- and revision = ?''',
- [serialized, key, self.revision])
-
- return value
-
- def delta(self, mapping, prefix):
- """
- return a delta containing values that have changed.
- """
- previous = self.getrange(prefix, strip=True)
- if not previous:
- pk = set()
- else:
- pk = set(previous.keys())
- ck = set(mapping.keys())
- delta = DeltaSet()
-
- # added
- for k in ck.difference(pk):
- delta[k] = Delta(None, mapping[k])
-
- # removed
- for k in pk.difference(ck):
- delta[k] = Delta(previous[k], None)
-
- # changed
- for k in pk.intersection(ck):
- c = mapping[k]
- p = previous[k]
- if c != p:
- delta[k] = Delta(p, c)
-
- return delta
-
- @contextlib.contextmanager
- def hook_scope(self, name=""):
- """Scope all future interactions to the current hook execution
- revision."""
- assert not self.revision
- self.cursor.execute(
- 'insert into hooks (hook, date) values (?, ?)',
- (name or sys.argv[0],
- datetime.datetime.utcnow().isoformat()))
- self.revision = self.cursor.lastrowid
- try:
- yield self.revision
- self.revision = None
- except Exception:
- self.flush(False)
- self.revision = None
- raise
- else:
- self.flush()
-
- def flush(self, save=True):
- if save:
- self.conn.commit()
- elif self._closed:
- return
- else:
- self.conn.rollback()
-
- def _init(self):
- self.cursor.execute('''
- create table if not exists kv (
- key text,
- data text,
- primary key (key)
- )''')
- self.cursor.execute('''
- create table if not exists kv_revisions (
- key text,
- revision integer,
- data text,
- primary key (key, revision)
- )''')
- self.cursor.execute('''
- create table if not exists hooks (
- version integer primary key autoincrement,
- hook text,
- date text
- )''')
- self.conn.commit()
-
- def gethistory(self, key, deserialize=False):
- self.cursor.execute(
- '''
- select kv.revision, kv.key, kv.data, h.hook, h.date
- from kv_revisions kv,
- hooks h
- where kv.key=?
- and kv.revision = h.version
- ''', [key])
- if deserialize is False:
- return self.cursor.fetchall()
- return map(_parse_history, self.cursor.fetchall())
-
- def debug(self, fh=sys.stderr):
- self.cursor.execute('select * from kv')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
- self.cursor.execute('select * from kv_revisions')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
-
-
-def _parse_history(d):
- return (d[0], d[1], json.loads(d[2]), d[3],
- datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
-
-
-class HookData(object):
- """Simple integration for existing hook exec frameworks.
-
- Records all unit information, and stores deltas for processing
- by the hook.
-
- Sample::
-
- from charmhelper.core import hookenv, unitdata
-
- changes = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # View all changes to configuration
- for changed, (prev, cur) in changes.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- if __name__ == '__main__':
- with changes():
- hook.execute()
-
- """
- def __init__(self):
- self.kv = kv()
- self.conf = None
- self.rels = None
-
- @contextlib.contextmanager
- def __call__(self):
- from charmhelpers.core import hookenv
- hook_name = hookenv.hook_name()
-
- with self.kv.hook_scope(hook_name):
- self._record_charm_version(hookenv.charm_dir())
- delta_config, delta_relation = self._record_hook(hookenv)
- yield self.kv, delta_config, delta_relation
-
- def _record_charm_version(self, charm_dir):
- # Record revisions.. charm revisions are meaningless
- # to charm authors as they don't control the revision.
- # so logic dependnent on revision is not particularly
- # useful, however it is useful for debugging analysis.
- charm_rev = open(
- os.path.join(charm_dir, 'revision')).read().strip()
- charm_rev = charm_rev or '0'
- revs = self.kv.get('charm_revisions', [])
- if charm_rev not in revs:
- revs.append(charm_rev.strip() or '0')
- self.kv.set('charm_revisions', revs)
-
- def _record_hook(self, hookenv):
- data = hookenv.execution_environment()
- self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
- self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
- self.kv.set('env', dict(data['env']))
- self.kv.set('unit', data['unit'])
- self.kv.set('relid', data.get('relid'))
- return conf_delta, rels_delta
-
-
-class Record(dict):
-
- __slots__ = ()
-
- def __getattr__(self, k):
- if k in self:
- return self[k]
- raise AttributeError(k)
-
-
-class DeltaSet(Record):
-
- __slots__ = ()
-
-
-Delta = collections.namedtuple('Delta', ['previous', 'current'])
-
-
-_KV = None
-
-
-def kv():
- global _KV
- if _KV is None:
- _KV = Storage()
- return _KV
diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py
deleted file mode 100644
index 8572d34..0000000
--- a/hooks/charmhelpers/fetch/__init__.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import importlib
-from charmhelpers.osplatform import get_platform
-from yaml import safe_load
-from charmhelpers.core.hookenv import (
- config,
- log,
-)
-
-import six
-if six.PY3:
- from urllib.parse import urlparse, urlunparse
-else:
- from urlparse import urlparse, urlunparse
-
-
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
- 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
-)
-
-
-class SourceConfigError(Exception):
- pass
-
-
-class UnhandledSource(Exception):
- pass
-
-
-class AptLockError(Exception):
- pass
-
-
-class GPGKeyError(Exception):
- """Exception occurs when a GPG key cannot be fetched or used. The message
- indicates what the problem is.
- """
- pass
-
-
-class BaseFetchHandler(object):
-
- """Base class for FetchHandler implementations in fetch plugins"""
-
- def can_handle(self, source):
- """Returns True if the source can be handled. Otherwise returns
- a string explaining why it cannot"""
- return "Wrong source type"
-
- def install(self, source):
- """Try to download and unpack the source. Return the path to the
- unpacked files or raise UnhandledSource."""
- raise UnhandledSource("Wrong source type {}".format(source))
-
- def parse_url(self, url):
- return urlparse(url)
-
- def base_url(self, url):
- """Return url without querystring or fragment"""
- parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
- return urlunparse(parts)
-
-
-__platform__ = get_platform()
-module = "charmhelpers.fetch.%s" % __platform__
-fetch = importlib.import_module(module)
-
-filter_installed_packages = fetch.filter_installed_packages
-filter_missing_packages = fetch.filter_missing_packages
-install = fetch.apt_install
-upgrade = fetch.apt_upgrade
-update = _fetch_update = fetch.apt_update
-purge = fetch.apt_purge
-add_source = fetch.add_source
-
-if __platform__ == "ubuntu":
- apt_cache = fetch.apt_cache
- apt_install = fetch.apt_install
- apt_update = fetch.apt_update
- apt_upgrade = fetch.apt_upgrade
- apt_purge = fetch.apt_purge
- apt_autoremove = fetch.apt_autoremove
- apt_mark = fetch.apt_mark
- apt_hold = fetch.apt_hold
- apt_unhold = fetch.apt_unhold
- import_key = fetch.import_key
- get_upstream_version = fetch.get_upstream_version
-elif __platform__ == "centos":
- yum_search = fetch.yum_search
-
-
-def configure_sources(update=False,
- sources_var='install_sources',
- keys_var='install_keys'):
- """Configure multiple sources from charm configuration.
-
- The lists are encoded as yaml fragments in the configuration.
- The fragment needs to be included as a string. Sources and their
- corresponding keys are of the types supported by add_source().
-
- Example config:
- install_sources: |
- - "ppa:foo"
- - "http://example.com/repo precise main"
- install_keys: |
- - null
- - "a1b2c3d4"
-
- Note that 'null' (a.k.a. None) should not be quoted.
- """
- sources = safe_load((config(sources_var) or '').strip()) or []
- keys = safe_load((config(keys_var) or '').strip()) or None
-
- if isinstance(sources, six.string_types):
- sources = [sources]
-
- if keys is None:
- for source in sources:
- add_source(source, None)
- else:
- if isinstance(keys, six.string_types):
- keys = [keys]
-
- if len(sources) != len(keys):
- raise SourceConfigError(
- 'Install sources and keys lists are different lengths')
- for source, key in zip(sources, keys):
- add_source(source, key)
- if update:
- _fetch_update(fatal=True)
-
-
-def install_remote(source, *args, **kwargs):
- """Install a file tree from a remote source.
-
- The specified source should be a url of the form:
- scheme://[host]/path[#[option=value][&...]]
-
- Schemes supported are based on this modules submodules.
- Options supported are submodule-specific.
- Additional arguments are passed through to the submodule.
-
- For example::
-
- dest = install_remote('http://example.com/archive.tgz',
- checksum='deadbeef',
- hash_type='sha1')
-
- This will download `archive.tgz`, validate it using SHA1 and, if
- the file is ok, extract it and return the directory in which it
- was extracted. If the checksum fails, it will raise
- :class:`charmhelpers.core.host.ChecksumError`.
- """
- # We ONLY check for True here because can_handle may return a string
- # explaining why it can't handle a given source.
- handlers = [h for h in plugins() if h.can_handle(source) is True]
- for handler in handlers:
- try:
- return handler.install(source, *args, **kwargs)
- except UnhandledSource as e:
- log('Install source attempt unsuccessful: {}'.format(e),
- level='WARNING')
- raise UnhandledSource("No handler found for source {}".format(source))
-
-
-def install_from_config(config_var_name):
- """Install a file from config."""
- charm_config = config()
- source = charm_config[config_var_name]
- return install_remote(source)
-
-
-def plugins(fetch_handlers=None):
- if not fetch_handlers:
- fetch_handlers = FETCH_HANDLERS
- plugin_list = []
- for handler_name in fetch_handlers:
- package, classname = handler_name.rsplit('.', 1)
- try:
- handler_class = getattr(
- importlib.import_module(package),
- classname)
- plugin_list.append(handler_class())
- except NotImplementedError:
- # Skip missing plugins so that they can be ommitted from
- # installation if desired
- log("FetchHandler {} not found, skipping plugin".format(
- handler_name))
- return plugin_list
diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py
deleted file mode 100644
index d25587a..0000000
--- a/hooks/charmhelpers/fetch/archiveurl.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import hashlib
-import re
-
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.payload.archive import (
- get_archive_handler,
- extract,
-)
-from charmhelpers.core.host import mkdir, check_hash
-
-import six
-if six.PY3:
- from urllib.request import (
- build_opener, install_opener, urlopen, urlretrieve,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- )
- from urllib.parse import urlparse, urlunparse, parse_qs
- from urllib.error import URLError
-else:
- from urllib import urlretrieve
- from urllib2 import (
- build_opener, install_opener, urlopen,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- URLError
- )
- from urlparse import urlparse, urlunparse, parse_qs
-
-
-def splituser(host):
- '''urllib.splituser(), but six's support of this seems broken'''
- _userprog = re.compile('^(.*)@(.*)$')
- match = _userprog.match(host)
- if match:
- return match.group(1, 2)
- return None, host
-
-
-def splitpasswd(user):
- '''urllib.splitpasswd(), but six's support of this is missing'''
- _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
- match = _passwdprog.match(user)
- if match:
- return match.group(1, 2)
- return user, None
-
-
-class ArchiveUrlFetchHandler(BaseFetchHandler):
- """
- Handler to download archive files from arbitrary URLs.
-
- Can fetch from http, https, ftp, and file URLs.
-
- Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
-
- Installs the contents of the archive in $CHARM_DIR/fetched/.
- """
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
- # XXX: Why is this returning a boolean and a string? It's
- # doomed to fail since "bool(can_handle('foo://'))" will be True.
- return "Wrong source type"
- if get_archive_handler(self.base_url(source)):
- return True
- return False
-
- def download(self, source, dest):
- """
- Download an archive file.
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local path location to download archive file to.
- """
- # propagate all exceptions
- # URLError, OSError, etc
- proto, netloc, path, params, query, fragment = urlparse(source)
- if proto in ('http', 'https'):
- auth, barehost = splituser(netloc)
- if auth is not None:
- source = urlunparse((proto, barehost, path, params, query, fragment))
- username, password = splitpasswd(auth)
- passman = HTTPPasswordMgrWithDefaultRealm()
- # Realm is set to None in add_password to force the username and password
- # to be used whatever the realm
- passman.add_password(None, source, username, password)
- authhandler = HTTPBasicAuthHandler(passman)
- opener = build_opener(authhandler)
- install_opener(opener)
- response = urlopen(source)
- try:
- with open(dest, 'wb') as dest_file:
- dest_file.write(response.read())
- except Exception as e:
- if os.path.isfile(dest):
- os.unlink(dest)
- raise e
-
- # Mandatory file validation via Sha1 or MD5 hashing.
- def download_and_validate(self, url, hashsum, validate="sha1"):
- tempfile, headers = urlretrieve(url)
- check_hash(tempfile, hashsum, validate)
- return tempfile
-
- def install(self, source, dest=None, checksum=None, hash_type='sha1'):
- """
- Download and install an archive file, with optional checksum validation.
-
- The checksum can also be given on the `source` URL's fragment.
- For example::
-
- handler.install('http://example.com/file.tgz#sha1=deadbeef')
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local destination path to install to. If not given,
- installs to `$CHARM_DIR/archives/archive_file_name`.
- :param str checksum: If given, validate the archive file after download.
- :param str hash_type: Algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
-
- """
- url_parts = self.parse_url(source)
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
- try:
- self.download(source, dld_file)
- except URLError as e:
- raise UnhandledSource(e.reason)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- options = parse_qs(url_parts.fragment)
- for key, value in options.items():
- if not six.PY3:
- algorithms = hashlib.algorithms
- else:
- algorithms = hashlib.algorithms_available
- if key in algorithms:
- if len(value) != 1:
- raise TypeError(
- "Expected 1 hash value, not %d" % len(value))
- expected = value[0]
- check_hash(dld_file, expected, key)
- if checksum:
- check_hash(dld_file, checksum, hash_type)
- return extract(dld_file, dest)
diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py
deleted file mode 100644
index c4ab3ff..0000000
--- a/hooks/charmhelpers/fetch/bzrurl.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-from subprocess import STDOUT, check_output
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource,
- filter_installed_packages,
- install,
-)
-from charmhelpers.core.host import mkdir
-
-
-if filter_installed_packages(['bzr']) != []:
- install(['bzr'])
- if filter_installed_packages(['bzr']) != []:
- raise NotImplementedError('Unable to install bzr')
-
-
-class BzrUrlFetchHandler(BaseFetchHandler):
- """Handler for bazaar branches via generic and lp URLs."""
-
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
- return False
- elif not url_parts.scheme:
- return os.path.exists(os.path.join(source, '.bzr'))
- else:
- return True
-
- def branch(self, source, dest, revno=None):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
- cmd_opts = []
- if revno:
- cmd_opts += ['-r', str(revno)]
- if os.path.exists(dest):
- cmd = ['bzr', 'pull']
- cmd += cmd_opts
- cmd += ['--overwrite', '-d', dest, source]
- else:
- cmd = ['bzr', 'branch']
- cmd += cmd_opts
- cmd += [source, dest]
- check_output(cmd, stderr=STDOUT)
-
- def install(self, source, dest=None, revno=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
-
- if dest and not os.path.exists(dest):
- mkdir(dest, perms=0o755)
-
- try:
- self.branch(source, dest_dir, revno)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/hooks/charmhelpers/fetch/centos.py b/hooks/charmhelpers/fetch/centos.py
deleted file mode 100644
index a91dcff..0000000
--- a/hooks/charmhelpers/fetch/centos.py
+++ /dev/null
@@ -1,171 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import subprocess
-import os
-import time
-import six
-import yum
-
-from tempfile import NamedTemporaryFile
-from charmhelpers.core.hookenv import log
-
-YUM_NO_LOCK = 1 # The return code for "couldn't acquire lock" in YUM.
-YUM_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
-YUM_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
-
-
-def filter_installed_packages(packages):
- """Return a list of packages that require installation."""
- yb = yum.YumBase()
- package_list = yb.doPackageLists()
- temp_cache = {p.base_package_name: 1 for p in package_list['installed']}
-
- _pkgs = [p for p in packages if not temp_cache.get(p, False)]
- return _pkgs
-
-
-def install(packages, options=None, fatal=False):
- """Install one or more packages."""
- cmd = ['yum', '--assumeyes']
- if options is not None:
- cmd.extend(options)
- cmd.append('install')
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
- _run_yum_command(cmd, fatal)
-
-
-def upgrade(options=None, fatal=False, dist=False):
- """Upgrade all packages."""
- cmd = ['yum', '--assumeyes']
- if options is not None:
- cmd.extend(options)
- cmd.append('upgrade')
- log("Upgrading with options: {}".format(options))
- _run_yum_command(cmd, fatal)
-
-
-def update(fatal=False):
- """Update local yum cache."""
- cmd = ['yum', '--assumeyes', 'update']
- log("Update with fatal: {}".format(fatal))
- _run_yum_command(cmd, fatal)
-
-
-def purge(packages, fatal=False):
- """Purge one or more packages."""
- cmd = ['yum', '--assumeyes', 'remove']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Purging {}".format(packages))
- _run_yum_command(cmd, fatal)
-
-
-def yum_search(packages):
- """Search for a package."""
- output = {}
- cmd = ['yum', 'search']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Searching for {}".format(packages))
- result = subprocess.check_output(cmd)
- for package in list(packages):
- output[package] = package in result
- return output
-
-
-def add_source(source, key=None):
- """Add a package source to this system.
-
- @param source: a URL with a rpm package
-
- @param key: A key to be added to the system's keyring and used
- to verify the signatures on packages. Ideally, this should be an
- ASCII format GPG public key including the block headers. A GPG key
- id may also be used, but be aware that only insecure protocols are
- available to retrieve the actual public key from a public keyserver
- placing your Juju environment at risk.
- """
- if source is None:
- log('Source is not present. Skipping')
- return
-
- if source.startswith('http'):
- directory = '/etc/yum.repos.d/'
- for filename in os.listdir(directory):
- with open(directory + filename, 'r') as rpm_file:
- if source in rpm_file.read():
- break
- else:
- log("Add source: {!r}".format(source))
- # write in the charms.repo
- with open(directory + 'Charms.repo', 'a') as rpm_file:
- rpm_file.write('[%s]\n' % source[7:].replace('/', '_'))
- rpm_file.write('name=%s\n' % source[7:])
- rpm_file.write('baseurl=%s\n\n' % source)
- else:
- log("Unknown source: {!r}".format(source))
-
- if key:
- if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
- with NamedTemporaryFile('w+') as key_file:
- key_file.write(key)
- key_file.flush()
- key_file.seek(0)
- subprocess.check_call(['rpm', '--import', key_file.name])
- else:
- subprocess.check_call(['rpm', '--import', key])
-
-
-def _run_yum_command(cmd, fatal=False):
- """Run an YUM command.
-
- Checks the output and retry if the fatal flag is set to True.
-
- :param: cmd: str: The yum command to run.
- :param: fatal: bool: Whether the command's output should be checked and
- retried.
- """
- env = os.environ.copy()
-
- if fatal:
- retry_count = 0
- result = None
-
- # If the command is considered "fatal", we need to retry if the yum
- # lock was not acquired.
-
- while result is None or result == YUM_NO_LOCK:
- try:
- result = subprocess.check_call(cmd, env=env)
- except subprocess.CalledProcessError as e:
- retry_count = retry_count + 1
- if retry_count > YUM_NO_LOCK_RETRY_COUNT:
- raise
- result = e.returncode
- log("Couldn't acquire YUM lock. Will retry in {} seconds."
- "".format(YUM_NO_LOCK_RETRY_DELAY))
- time.sleep(YUM_NO_LOCK_RETRY_DELAY)
-
- else:
- subprocess.call(cmd, env=env)
diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py
deleted file mode 100644
index 070ca9b..0000000
--- a/hooks/charmhelpers/fetch/giturl.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-from subprocess import check_output, CalledProcessError, STDOUT
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource,
- filter_installed_packages,
- install,
-)
-
-if filter_installed_packages(['git']) != []:
- install(['git'])
- if filter_installed_packages(['git']) != []:
- raise NotImplementedError('Unable to install git')
-
-
-class GitUrlFetchHandler(BaseFetchHandler):
- """Handler for git branches via generic and github URLs."""
-
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- # TODO (mattyw) no support for ssh git@ yet
- if url_parts.scheme not in ('http', 'https', 'git', ''):
- return False
- elif not url_parts.scheme:
- return os.path.exists(os.path.join(source, '.git'))
- else:
- return True
-
- def clone(self, source, dest, branch="master", depth=None):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
-
- if os.path.exists(dest):
- cmd = ['git', '-C', dest, 'pull', source, branch]
- else:
- cmd = ['git', 'clone', source, dest, '--branch', branch]
- if depth:
- cmd.extend(['--depth', depth])
- check_output(cmd, stderr=STDOUT)
-
- def install(self, source, branch="master", dest=None, depth=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- try:
- self.clone(source, dest_dir, branch, depth)
- except CalledProcessError as e:
- raise UnhandledSource(e)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/hooks/charmhelpers/fetch/python/__init__.py b/hooks/charmhelpers/fetch/python/__init__.py
deleted file mode 100644
index bff99dc..0000000
--- a/hooks/charmhelpers/fetch/python/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2019 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/hooks/charmhelpers/fetch/python/debug.py b/hooks/charmhelpers/fetch/python/debug.py
deleted file mode 100644
index 757135e..0000000
--- a/hooks/charmhelpers/fetch/python/debug.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import print_function
-
-import atexit
-import sys
-
-from charmhelpers.fetch.python.rpdb import Rpdb
-from charmhelpers.core.hookenv import (
- open_port,
- close_port,
- ERROR,
- log
-)
-
-__author__ = "Jorge Niedbalski "
-
-DEFAULT_ADDR = "0.0.0.0"
-DEFAULT_PORT = 4444
-
-
-def _error(message):
- log(message, level=ERROR)
-
-
-def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT):
- """
- Set a trace point using the remote debugger
- """
- atexit.register(close_port, port)
- try:
- log("Starting a remote python debugger session on %s:%s" % (addr,
- port))
- open_port(port)
- debugger = Rpdb(addr=addr, port=port)
- debugger.set_trace(sys._getframe().f_back)
- except Exception:
- _error("Cannot start a remote debug session on %s:%s" % (addr,
- port))
diff --git a/hooks/charmhelpers/fetch/python/packages.py b/hooks/charmhelpers/fetch/python/packages.py
deleted file mode 100644
index 6e95028..0000000
--- a/hooks/charmhelpers/fetch/python/packages.py
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import six
-import subprocess
-import sys
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import charm_dir, log
-
-__author__ = "Jorge Niedbalski "
-
-
-def pip_execute(*args, **kwargs):
- """Overriden pip_execute() to stop sys.path being changed.
-
- The act of importing main from the pip module seems to cause add wheels
- from the /usr/share/python-wheels which are installed by various tools.
- This function ensures that sys.path remains the same after the call is
- executed.
- """
- try:
- _path = sys.path
- try:
- from pip import main as _pip_execute
- except ImportError:
- apt_update()
- if six.PY2:
- apt_install('python-pip')
- else:
- apt_install('python3-pip')
- from pip import main as _pip_execute
- _pip_execute(*args, **kwargs)
- finally:
- sys.path = _path
-
-
-def parse_options(given, available):
- """Given a set of options, check if available"""
- for key, value in sorted(given.items()):
- if not value:
- continue
- if key in available:
- yield "--{0}={1}".format(key, value)
-
-
-def pip_install_requirements(requirements, constraints=None, **options):
- """Install a requirements file.
-
- :param constraints: Path to pip constraints file.
- http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
- """
- command = ["install"]
-
- available_options = ('proxy', 'src', 'log', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- command.append("-r {0}".format(requirements))
- if constraints:
- command.append("-c {0}".format(constraints))
- log("Installing from file: {} with constraints {} "
- "and options: {}".format(requirements, constraints, command))
- else:
- log("Installing from file: {} with options: {}".format(requirements,
- command))
- pip_execute(command)
-
-
-def pip_install(package, fatal=False, upgrade=False, venv=None,
- constraints=None, **options):
- """Install a python package"""
- if venv:
- venv_python = os.path.join(venv, 'bin/pip')
- command = [venv_python, "install"]
- else:
- command = ["install"]
-
- available_options = ('proxy', 'src', 'log', 'index-url', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- if upgrade:
- command.append('--upgrade')
-
- if constraints:
- command.extend(['-c', constraints])
-
- if isinstance(package, list):
- command.extend(package)
- else:
- command.append(package)
-
- log("Installing {} package with options: {}".format(package,
- command))
- if venv:
- subprocess.check_call(command)
- else:
- pip_execute(command)
-
-
-def pip_uninstall(package, **options):
- """Uninstall a python package"""
- command = ["uninstall", "-q", "-y"]
-
- available_options = ('proxy', 'log', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- if isinstance(package, list):
- command.extend(package)
- else:
- command.append(package)
-
- log("Uninstalling {} package with options: {}".format(package,
- command))
- pip_execute(command)
-
-
-def pip_list():
- """Returns the list of current python installed packages
- """
- return pip_execute(["list"])
-
-
-def pip_create_virtualenv(path=None):
- """Create an isolated Python environment."""
- if six.PY2:
- apt_install('python-virtualenv')
- else:
- apt_install('python3-virtualenv')
-
- if path:
- venv_path = path
- else:
- venv_path = os.path.join(charm_dir(), 'venv')
-
- if not os.path.exists(venv_path):
- subprocess.check_call(['virtualenv', venv_path])
diff --git a/hooks/charmhelpers/fetch/python/rpdb.py b/hooks/charmhelpers/fetch/python/rpdb.py
deleted file mode 100644
index 9b31610..0000000
--- a/hooks/charmhelpers/fetch/python/rpdb.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Remote Python Debugger (pdb wrapper)."""
-
-import pdb
-import socket
-import sys
-
-__author__ = "Bertrand Janin "
-__version__ = "0.1.3"
-
-
-class Rpdb(pdb.Pdb):
-
- def __init__(self, addr="127.0.0.1", port=4444):
- """Initialize the socket and initialize pdb."""
-
- # Backup stdin and stdout before replacing them by the socket handle
- self.old_stdout = sys.stdout
- self.old_stdin = sys.stdin
-
- # Open a 'reusable' socket to let the webapp reload on the same port
- self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
- self.skt.bind((addr, port))
- self.skt.listen(1)
- (clientsocket, address) = self.skt.accept()
- handle = clientsocket.makefile('rw')
- pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)
- sys.stdout = sys.stdin = handle
-
- def shutdown(self):
- """Revert stdin and stdout, close the socket."""
- sys.stdout = self.old_stdout
- sys.stdin = self.old_stdin
- self.skt.close()
- self.set_continue()
-
- def do_continue(self, arg):
- """Stop all operation on ``continue``."""
- self.shutdown()
- return 1
-
- do_EOF = do_quit = do_exit = do_c = do_cont = do_continue
diff --git a/hooks/charmhelpers/fetch/python/version.py b/hooks/charmhelpers/fetch/python/version.py
deleted file mode 100644
index 3eb4210..0000000
--- a/hooks/charmhelpers/fetch/python/version.py
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-
-__author__ = "Jorge Niedbalski "
-
-
-def current_version():
- """Current system python version"""
- return sys.version_info
-
-
-def current_version_string():
- """Current system python version as string major.minor.micro"""
- return "{0}.{1}.{2}".format(sys.version_info.major,
- sys.version_info.minor,
- sys.version_info.micro)
diff --git a/hooks/charmhelpers/fetch/snap.py b/hooks/charmhelpers/fetch/snap.py
deleted file mode 100644
index 395836c..0000000
--- a/hooks/charmhelpers/fetch/snap.py
+++ /dev/null
@@ -1,150 +0,0 @@
-# Copyright 2014-2017 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Charm helpers snap for classic charms.
-
-If writing reactive charms, use the snap layer:
-https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html
-"""
-import subprocess
-import os
-from time import sleep
-from charmhelpers.core.hookenv import log
-
-__author__ = 'Joseph Borg '
-
-# The return code for "couldn't acquire lock" in Snap
-# (hopefully this will be improved).
-SNAP_NO_LOCK = 1
-SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks.
-SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
-SNAP_CHANNELS = [
- 'edge',
- 'beta',
- 'candidate',
- 'stable',
-]
-
-
-class CouldNotAcquireLockException(Exception):
- pass
-
-
-class InvalidSnapChannel(Exception):
- pass
-
-
-def _snap_exec(commands):
- """
- Execute snap commands.
-
- :param commands: List commands
- :return: Integer exit code
- """
- assert type(commands) == list
-
- retry_count = 0
- return_code = None
-
- while return_code is None or return_code == SNAP_NO_LOCK:
- try:
- return_code = subprocess.check_call(['snap'] + commands,
- env=os.environ)
- except subprocess.CalledProcessError as e:
- retry_count += + 1
- if retry_count > SNAP_NO_LOCK_RETRY_COUNT:
- raise CouldNotAcquireLockException(
- 'Could not aquire lock after {} attempts'
- .format(SNAP_NO_LOCK_RETRY_COUNT))
- return_code = e.returncode
- log('Snap failed to acquire lock, trying again in {} seconds.'
- .format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN'))
- sleep(SNAP_NO_LOCK_RETRY_DELAY)
-
- return return_code
-
-
-def snap_install(packages, *flags):
- """
- Install a snap package.
-
- :param packages: String or List String package name
- :param flags: List String flags to pass to install command
- :return: Integer return code from snap
- """
- if type(packages) is not list:
- packages = [packages]
-
- flags = list(flags)
-
- message = 'Installing snap(s) "%s"' % ', '.join(packages)
- if flags:
- message += ' with option(s) "%s"' % ', '.join(flags)
-
- log(message, level='INFO')
- return _snap_exec(['install'] + flags + packages)
-
-
-def snap_remove(packages, *flags):
- """
- Remove a snap package.
-
- :param packages: String or List String package name
- :param flags: List String flags to pass to remove command
- :return: Integer return code from snap
- """
- if type(packages) is not list:
- packages = [packages]
-
- flags = list(flags)
-
- message = 'Removing snap(s) "%s"' % ', '.join(packages)
- if flags:
- message += ' with options "%s"' % ', '.join(flags)
-
- log(message, level='INFO')
- return _snap_exec(['remove'] + flags + packages)
-
-
-def snap_refresh(packages, *flags):
- """
- Refresh / Update snap package.
-
- :param packages: String or List String package name
- :param flags: List String flags to pass to refresh command
- :return: Integer return code from snap
- """
- if type(packages) is not list:
- packages = [packages]
-
- flags = list(flags)
-
- message = 'Refreshing snap(s) "%s"' % ', '.join(packages)
- if flags:
- message += ' with options "%s"' % ', '.join(flags)
-
- log(message, level='INFO')
- return _snap_exec(['refresh'] + flags + packages)
-
-
-def valid_snap_channel(channel):
- """ Validate snap channel exists
-
- :raises InvalidSnapChannel: When channel does not exist
- :return: Boolean
- """
- if channel.lower() in SNAP_CHANNELS:
- return True
- else:
- raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel))
diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py
deleted file mode 100644
index c6d9341..0000000
--- a/hooks/charmhelpers/fetch/ubuntu.py
+++ /dev/null
@@ -1,728 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from collections import OrderedDict
-import os
-import platform
-import re
-import six
-import time
-import subprocess
-
-from charmhelpers.core.host import get_distrib_codename
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- WARNING,
- env_proxy_settings,
-)
-from charmhelpers.fetch import SourceConfigError, GPGKeyError
-
-PROPOSED_POCKET = (
- "# Proposed\n"
- "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe "
- "multiverse restricted\n")
-PROPOSED_PORTS_POCKET = (
- "# Proposed\n"
- "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe "
- "multiverse restricted\n")
-# Only supports 64bit and ppc64 at the moment.
-ARCH_TO_PROPOSED_POCKET = {
- 'x86_64': PROPOSED_POCKET,
- 'ppc64le': PROPOSED_PORTS_POCKET,
- 'aarch64': PROPOSED_PORTS_POCKET,
- 's390x': PROPOSED_PORTS_POCKET,
-}
-CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
-CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
-"""
-CLOUD_ARCHIVE_POCKETS = {
- # Folsom
- 'folsom': 'precise-updates/folsom',
- 'folsom/updates': 'precise-updates/folsom',
- 'precise-folsom': 'precise-updates/folsom',
- 'precise-folsom/updates': 'precise-updates/folsom',
- 'precise-updates/folsom': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'precise-folsom/proposed': 'precise-proposed/folsom',
- 'precise-proposed/folsom': 'precise-proposed/folsom',
- # Grizzly
- 'grizzly': 'precise-updates/grizzly',
- 'grizzly/updates': 'precise-updates/grizzly',
- 'precise-grizzly': 'precise-updates/grizzly',
- 'precise-grizzly/updates': 'precise-updates/grizzly',
- 'precise-updates/grizzly': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-proposed/grizzly': 'precise-proposed/grizzly',
- # Havana
- 'havana': 'precise-updates/havana',
- 'havana/updates': 'precise-updates/havana',
- 'precise-havana': 'precise-updates/havana',
- 'precise-havana/updates': 'precise-updates/havana',
- 'precise-updates/havana': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'precise-havana/proposed': 'precise-proposed/havana',
- 'precise-proposed/havana': 'precise-proposed/havana',
- # Icehouse
- 'icehouse': 'precise-updates/icehouse',
- 'icehouse/updates': 'precise-updates/icehouse',
- 'precise-icehouse': 'precise-updates/icehouse',
- 'precise-icehouse/updates': 'precise-updates/icehouse',
- 'precise-updates/icehouse': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-proposed/icehouse': 'precise-proposed/icehouse',
- # Juno
- 'juno': 'trusty-updates/juno',
- 'juno/updates': 'trusty-updates/juno',
- 'trusty-juno': 'trusty-updates/juno',
- 'trusty-juno/updates': 'trusty-updates/juno',
- 'trusty-updates/juno': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'trusty-juno/proposed': 'trusty-proposed/juno',
- 'trusty-proposed/juno': 'trusty-proposed/juno',
- # Kilo
- 'kilo': 'trusty-updates/kilo',
- 'kilo/updates': 'trusty-updates/kilo',
- 'trusty-kilo': 'trusty-updates/kilo',
- 'trusty-kilo/updates': 'trusty-updates/kilo',
- 'trusty-updates/kilo': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-proposed/kilo': 'trusty-proposed/kilo',
- # Liberty
- 'liberty': 'trusty-updates/liberty',
- 'liberty/updates': 'trusty-updates/liberty',
- 'trusty-liberty': 'trusty-updates/liberty',
- 'trusty-liberty/updates': 'trusty-updates/liberty',
- 'trusty-updates/liberty': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-proposed/liberty': 'trusty-proposed/liberty',
- # Mitaka
- 'mitaka': 'trusty-updates/mitaka',
- 'mitaka/updates': 'trusty-updates/mitaka',
- 'trusty-mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka/updates': 'trusty-updates/mitaka',
- 'trusty-updates/mitaka': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
- # Newton
- 'newton': 'xenial-updates/newton',
- 'newton/updates': 'xenial-updates/newton',
- 'xenial-newton': 'xenial-updates/newton',
- 'xenial-newton/updates': 'xenial-updates/newton',
- 'xenial-updates/newton': 'xenial-updates/newton',
- 'newton/proposed': 'xenial-proposed/newton',
- 'xenial-newton/proposed': 'xenial-proposed/newton',
- 'xenial-proposed/newton': 'xenial-proposed/newton',
- # Ocata
- 'ocata': 'xenial-updates/ocata',
- 'ocata/updates': 'xenial-updates/ocata',
- 'xenial-ocata': 'xenial-updates/ocata',
- 'xenial-ocata/updates': 'xenial-updates/ocata',
- 'xenial-updates/ocata': 'xenial-updates/ocata',
- 'ocata/proposed': 'xenial-proposed/ocata',
- 'xenial-ocata/proposed': 'xenial-proposed/ocata',
- 'xenial-proposed/ocata': 'xenial-proposed/ocata',
- # Pike
- 'pike': 'xenial-updates/pike',
- 'xenial-pike': 'xenial-updates/pike',
- 'xenial-pike/updates': 'xenial-updates/pike',
- 'xenial-updates/pike': 'xenial-updates/pike',
- 'pike/proposed': 'xenial-proposed/pike',
- 'xenial-pike/proposed': 'xenial-proposed/pike',
- 'xenial-proposed/pike': 'xenial-proposed/pike',
- # Queens
- 'queens': 'xenial-updates/queens',
- 'xenial-queens': 'xenial-updates/queens',
- 'xenial-queens/updates': 'xenial-updates/queens',
- 'xenial-updates/queens': 'xenial-updates/queens',
- 'queens/proposed': 'xenial-proposed/queens',
- 'xenial-queens/proposed': 'xenial-proposed/queens',
- 'xenial-proposed/queens': 'xenial-proposed/queens',
- # Rocky
- 'rocky': 'bionic-updates/rocky',
- 'bionic-rocky': 'bionic-updates/rocky',
- 'bionic-rocky/updates': 'bionic-updates/rocky',
- 'bionic-updates/rocky': 'bionic-updates/rocky',
- 'rocky/proposed': 'bionic-proposed/rocky',
- 'bionic-rocky/proposed': 'bionic-proposed/rocky',
- 'bionic-proposed/rocky': 'bionic-proposed/rocky',
- # Stein
- 'stein': 'bionic-updates/stein',
- 'bionic-stein': 'bionic-updates/stein',
- 'bionic-stein/updates': 'bionic-updates/stein',
- 'bionic-updates/stein': 'bionic-updates/stein',
- 'stein/proposed': 'bionic-proposed/stein',
- 'bionic-stein/proposed': 'bionic-proposed/stein',
- 'bionic-proposed/stein': 'bionic-proposed/stein',
-}
-
-
-APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
-CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries.
-CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times.
-
-
-def filter_installed_packages(packages):
- """Return a list of packages that require installation."""
- cache = apt_cache()
- _pkgs = []
- for package in packages:
- try:
- p = cache[package]
- p.current_ver or _pkgs.append(package)
- except KeyError:
- log('Package {} has no installation candidate.'.format(package),
- level='WARNING')
- _pkgs.append(package)
- return _pkgs
-
-
-def filter_missing_packages(packages):
- """Return a list of packages that are installed.
-
- :param packages: list of packages to evaluate.
- :returns list: Packages that are installed.
- """
- return list(
- set(packages) -
- set(filter_installed_packages(packages))
- )
-
-
-def apt_cache(in_memory=True, progress=None):
- """Build and return an apt cache."""
- from apt import apt_pkg
- apt_pkg.init()
- if in_memory:
- apt_pkg.config.set("Dir::Cache::pkgcache", "")
- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
- return apt_pkg.Cache(progress)
-
-
-def apt_install(packages, options=None, fatal=False):
- """Install one or more packages."""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- cmd.append('install')
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_upgrade(options=None, fatal=False, dist=False):
- """Upgrade all packages."""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- if dist:
- cmd.append('dist-upgrade')
- else:
- cmd.append('upgrade')
- log("Upgrading with options: {}".format(options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_update(fatal=False):
- """Update local apt cache."""
- cmd = ['apt-get', 'update']
- _run_apt_command(cmd, fatal)
-
-
-def apt_purge(packages, fatal=False):
- """Purge one or more packages."""
- cmd = ['apt-get', '--assume-yes', 'purge']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Purging {}".format(packages))
- _run_apt_command(cmd, fatal)
-
-
-def apt_autoremove(purge=True, fatal=False):
- """Purge one or more packages."""
- cmd = ['apt-get', '--assume-yes', 'autoremove']
- if purge:
- cmd.append('--purge')
- _run_apt_command(cmd, fatal)
-
-
-def apt_mark(packages, mark, fatal=False):
- """Flag one or more packages using apt-mark."""
- log("Marking {} as {}".format(packages, mark))
- cmd = ['apt-mark', mark]
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
-
- if fatal:
- subprocess.check_call(cmd, universal_newlines=True)
- else:
- subprocess.call(cmd, universal_newlines=True)
-
-
-def apt_hold(packages, fatal=False):
- return apt_mark(packages, 'hold', fatal=fatal)
-
-
-def apt_unhold(packages, fatal=False):
- return apt_mark(packages, 'unhold', fatal=fatal)
-
-
-def import_key(key):
- """Import an ASCII Armor key.
-
- A Radix64 format keyid is also supported for backwards
- compatibility. In this case Ubuntu keyserver will be
- queried for a key via HTTPS by its keyid. This method
- is less preferrable because https proxy servers may
- require traffic decryption which is equivalent to a
- man-in-the-middle attack (a proxy server impersonates
- keyserver TLS certificates and has to be explicitly
- trusted by the system).
-
- :param key: A GPG key in ASCII armor format,
- including BEGIN and END markers or a keyid.
- :type key: (bytes, str)
- :raises: GPGKeyError if the key could not be imported
- """
- key = key.strip()
- if '-' in key or '\n' in key:
- # Send everything not obviously a keyid to GPG to import, as
- # we trust its validation better than our own. eg. handling
- # comments before the key.
- log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
- if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and
- '-----END PGP PUBLIC KEY BLOCK-----' in key):
- log("Writing provided PGP key in the binary format", level=DEBUG)
- if six.PY3:
- key_bytes = key.encode('utf-8')
- else:
- key_bytes = key
- key_name = _get_keyid_by_gpg_key(key_bytes)
- key_gpg = _dearmor_gpg_key(key_bytes)
- _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg)
- else:
- raise GPGKeyError("ASCII armor markers missing from GPG key")
- else:
- log("PGP key found (looks like Radix64 format)", level=WARNING)
- log("SECURELY importing PGP key from keyserver; "
- "full key not provided.", level=WARNING)
- # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL
- # to retrieve GPG keys. `apt-key adv` command is deprecated as is
- # apt-key in general as noted in its manpage. See lp:1433761 for more
- # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop
- # gpg
- key_asc = _get_key_by_keyid(key)
- # write the key in GPG format so that apt-key list shows it
- key_gpg = _dearmor_gpg_key(key_asc)
- _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg)
-
-
-def _get_keyid_by_gpg_key(key_material):
- """Get a GPG key fingerprint by GPG key material.
- Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded
- or binary GPG key material. Can be used, for example, to generate file
- names for keys passed via charm options.
-
- :param key_material: ASCII armor-encoded or binary GPG key material
- :type key_material: bytes
- :raises: GPGKeyError if invalid key material has been provided
- :returns: A GPG key fingerprint
- :rtype: str
- """
- # Use the same gpg command for both Xenial and Bionic
- cmd = 'gpg --with-colons --with-fingerprint'
- ps = subprocess.Popen(cmd.split(),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- stdin=subprocess.PIPE)
- out, err = ps.communicate(input=key_material)
- if six.PY3:
- out = out.decode('utf-8')
- err = err.decode('utf-8')
- if 'gpg: no valid OpenPGP data found.' in err:
- raise GPGKeyError('Invalid GPG key material provided')
- # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10)
- return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1)
-
-
-def _get_key_by_keyid(keyid):
- """Get a key via HTTPS from the Ubuntu keyserver.
- Different key ID formats are supported by SKS keyservers (the longer ones
- are more secure, see "dead beef attack" and https://evil32.com/). Since
- HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will
- impersonate keyserver.ubuntu.com and generate a certificate with
- keyserver.ubuntu.com in the CN field or in SubjAltName fields of a
- certificate. If such proxy behavior is expected it is necessary to add the
- CA certificate chain containing the intermediate CA of the SSLBump proxy to
- every machine that this code runs on via ca-certs cloud-init directive (via
- cloudinit-userdata model-config) or via other means (such as through a
- custom charm option). Also note that DNS resolution for the hostname in a
- URL is done at a proxy server - not at the client side.
-
- 8-digit (32 bit) key ID
- https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6
- 16-digit (64 bit) key ID
- https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6
- 40-digit key ID:
- https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6
-
- :param keyid: An 8, 16 or 40 hex digit keyid to find a key for
- :type keyid: (bytes, str)
- :returns: A key material for the specified GPG key id
- :rtype: (str, bytes)
- :raises: subprocess.CalledProcessError
- """
- # options=mr - machine-readable output (disables html wrappers)
- keyserver_url = ('https://keyserver.ubuntu.com'
- '/pks/lookup?op=get&options=mr&exact=on&search=0x{}')
- curl_cmd = ['curl', keyserver_url.format(keyid)]
- # use proxy server settings in order to retrieve the key
- return subprocess.check_output(curl_cmd,
- env=env_proxy_settings(['https']))
-
-
-def _dearmor_gpg_key(key_asc):
- """Converts a GPG key in the ASCII armor format to the binary format.
-
- :param key_asc: A GPG key in ASCII armor format.
- :type key_asc: (str, bytes)
- :returns: A GPG key in binary format
- :rtype: (str, bytes)
- :raises: GPGKeyError
- """
- ps = subprocess.Popen(['gpg', '--dearmor'],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- stdin=subprocess.PIPE)
- out, err = ps.communicate(input=key_asc)
- # no need to decode output as it is binary (invalid utf-8), only error
- if six.PY3:
- err = err.decode('utf-8')
- if 'gpg: no valid OpenPGP data found.' in err:
- raise GPGKeyError('Invalid GPG key material. Check your network setup'
- ' (MTU, routing, DNS) and/or proxy server settings'
- ' as well as destination keyserver status.')
- else:
- return out
-
-
-def _write_apt_gpg_keyfile(key_name, key_material):
- """Writes GPG key material into a file at a provided path.
-
- :param key_name: A key name to use for a key file (could be a fingerprint)
- :type key_name: str
- :param key_material: A GPG key material (binary)
- :type key_material: (str, bytes)
- """
- with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name),
- 'wb') as keyf:
- keyf.write(key_material)
-
-
-def add_source(source, key=None, fail_invalid=False):
- """Add a package source to this system.
-
- @param source: a URL or sources.list entry, as supported by
- add-apt-repository(1). Examples::
-
- ppa:charmers/example
- deb https://stub:key@private.example.com/ubuntu trusty main
-
- In addition:
- 'proposed:' may be used to enable the standard 'proposed'
- pocket for the release.
- 'cloud:' may be used to activate official cloud archive pockets,
- such as 'cloud:icehouse'
- 'distro' may be used as a noop
-
- Full list of source specifications supported by the function are:
-
- 'distro': A NOP; i.e. it has no effect.
- 'proposed': the proposed deb spec [2] is wrtten to
- /etc/apt/sources.list/proposed
- 'distro-proposed': adds -proposed to the debs [2]
- 'ppa:': add-apt-repository --yes
- 'deb ': add-apt-repository --yes deb
- 'http://....': add-apt-repository --yes http://...
- 'cloud-archive:': add-apt-repository -yes cloud-archive:
- 'cloud:[-staging]': specify a Cloud Archive pocket with
- optional staging version. If staging is used then the staging PPA [2]
- with be used. If staging is NOT used then the cloud archive [3] will be
- added, and the 'ubuntu-cloud-keyring' package will be added for the
- current distro.
-
- Otherwise the source is not recognised and this is logged to the juju log.
- However, no error is raised, unless sys_error_on_exit is True.
-
- [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
- where {} is replaced with the derived pocket name.
- [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \
- main universe multiverse restricted
- where {} is replaced with the lsb_release codename (e.g. xenial)
- [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu
- to /etc/apt/sources.list.d/cloud-archive-list
-
- @param key: A key to be added to the system's APT keyring and used
- to verify the signatures on packages. Ideally, this should be an
- ASCII format GPG public key including the block headers. A GPG key
- id may also be used, but be aware that only insecure protocols are
- available to retrieve the actual public key from a public keyserver
- placing your Juju environment at risk. ppa and cloud archive keys
- are securely added automtically, so sould not be provided.
-
- @param fail_invalid: (boolean) if True, then the function raises a
- SourceConfigError is there is no matching installation source.
-
- @raises SourceConfigError() if for cloud:, the is not a
- valid pocket in CLOUD_ARCHIVE_POCKETS
- """
- _mapping = OrderedDict([
- (r"^distro$", lambda: None), # This is a NOP
- (r"^(?:proposed|distro-proposed)$", _add_proposed),
- (r"^cloud-archive:(.*)$", _add_apt_repository),
- (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
- (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
- (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
- (r"^cloud:(.*)$", _add_cloud_pocket),
- (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
- ])
- if source is None:
- source = ''
- for r, fn in six.iteritems(_mapping):
- m = re.match(r, source)
- if m:
- # call the assoicated function with the captured groups
- # raises SourceConfigError on error.
- fn(*m.groups())
- if key:
- try:
- import_key(key)
- except GPGKeyError as e:
- raise SourceConfigError(str(e))
- break
- else:
- # nothing matched. log an error and maybe sys.exit
- err = "Unknown source: {!r}".format(source)
- log(err)
- if fail_invalid:
- raise SourceConfigError(err)
-
-
-def _add_proposed():
- """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list
-
- Uses get_distrib_codename to determine the correct stanza for
- the deb line.
-
- For intel architecutres PROPOSED_POCKET is used for the release, but for
- other architectures PROPOSED_PORTS_POCKET is used for the release.
- """
- release = get_distrib_codename()
- arch = platform.machine()
- if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET):
- raise SourceConfigError("Arch {} not supported for (distro-)proposed"
- .format(arch))
- with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
- apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release))
-
-
-def _add_apt_repository(spec):
- """Add the spec using add_apt_repository
-
- :param spec: the parameter to pass to add_apt_repository
- :type spec: str
- """
- if '{series}' in spec:
- series = get_distrib_codename()
- spec = spec.replace('{series}', series)
- # software-properties package for bionic properly reacts to proxy settings
- # passed as environment variables (See lp:1433761). This is not the case
- # LTS and non-LTS releases below bionic.
- _run_with_retries(['add-apt-repository', '--yes', spec],
- cmd_env=env_proxy_settings(['https']))
-
-
-def _add_cloud_pocket(pocket):
- """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
-
- Note that this overwrites the existing file if there is one.
-
- This function also converts the simple pocket in to the actual pocket using
- the CLOUD_ARCHIVE_POCKETS mapping.
-
- :param pocket: string representing the pocket to add a deb spec for.
- :raises: SourceConfigError if the cloud pocket doesn't exist or the
- requested release doesn't match the current distro version.
- """
- apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
- fatal=True)
- if pocket not in CLOUD_ARCHIVE_POCKETS:
- raise SourceConfigError(
- 'Unsupported cloud: source option %s' %
- pocket)
- actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
- apt.write(CLOUD_ARCHIVE.format(actual_pocket))
-
-
-def _add_cloud_staging(cloud_archive_release, openstack_release):
- """Add the cloud staging repository which is in
- ppa:ubuntu-cloud-archive/-staging
-
- This function checks that the cloud_archive_release matches the current
- codename for the distro that charm is being installed on.
-
- :param cloud_archive_release: string, codename for the release.
- :param openstack_release: String, codename for the openstack release.
- :raises: SourceConfigError if the cloud_archive_release doesn't match the
- current version of the os.
- """
- _verify_is_ubuntu_rel(cloud_archive_release, openstack_release)
- ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release)
- cmd = 'add-apt-repository -y {}'.format(ppa)
- _run_with_retries(cmd.split(' '))
-
-
-def _add_cloud_distro_check(cloud_archive_release, openstack_release):
- """Add the cloud pocket, but also check the cloud_archive_release against
- the current distro, and use the openstack_release as the full lookup.
-
- This just calls _add_cloud_pocket() with the openstack_release as pocket
- to get the correct cloud-archive.list for dpkg to work with.
-
- :param cloud_archive_release:String, codename for the distro release.
- :param openstack_release: String, spec for the release to look up in the
- CLOUD_ARCHIVE_POCKETS
- :raises: SourceConfigError if this is the wrong distro, or the pocket spec
- doesn't exist.
- """
- _verify_is_ubuntu_rel(cloud_archive_release, openstack_release)
- _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release))
-
-
-def _verify_is_ubuntu_rel(release, os_release):
- """Verify that the release is in the same as the current ubuntu release.
-
- :param release: String, lowercase for the release.
- :param os_release: String, the os_release being asked for
- :raises: SourceConfigError if the release is not the same as the ubuntu
- release.
- """
- ubuntu_rel = get_distrib_codename()
- if release != ubuntu_rel:
- raise SourceConfigError(
- 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu'
- 'version ({})'.format(release, os_release, ubuntu_rel))
-
-
-def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
- retry_message="", cmd_env=None):
- """Run a command and retry until success or max_retries is reached.
-
- :param: cmd: str: The apt command to run.
- :param: max_retries: int: The number of retries to attempt on a fatal
- command. Defaults to CMD_RETRY_COUNT.
- :param: retry_exitcodes: tuple: Optional additional exit codes to retry.
- Defaults to retry on exit code 1.
- :param: retry_message: str: Optional log prefix emitted during retries.
- :param: cmd_env: dict: Environment variables to add to the command run.
- """
-
- env = None
- kwargs = {}
- if cmd_env:
- env = os.environ.copy()
- env.update(cmd_env)
- kwargs['env'] = env
-
- if not retry_message:
- retry_message = "Failed executing '{}'".format(" ".join(cmd))
- retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY)
-
- retry_count = 0
- result = None
-
- retry_results = (None,) + retry_exitcodes
- while result in retry_results:
- try:
- # result = subprocess.check_call(cmd, env=env)
- result = subprocess.check_call(cmd, **kwargs)
- except subprocess.CalledProcessError as e:
- retry_count = retry_count + 1
- if retry_count > max_retries:
- raise
- result = e.returncode
- log(retry_message)
- time.sleep(CMD_RETRY_DELAY)
-
-
-def _run_apt_command(cmd, fatal=False):
- """Run an apt command with optional retries.
-
- :param: cmd: str: The apt command to run.
- :param: fatal: bool: Whether the command's output should be checked and
- retried.
- """
- # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment.
- cmd_env = {
- 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')}
-
- if fatal:
- _run_with_retries(
- cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,),
- retry_message="Couldn't acquire DPKG lock")
- else:
- env = os.environ.copy()
- env.update(cmd_env)
- subprocess.call(cmd, env=env)
-
-
-def get_upstream_version(package):
- """Determine upstream version based on installed package
-
- @returns None (if not installed) or the upstream version
- """
- import apt_pkg
- cache = apt_cache()
- try:
- pkg = cache[package]
- except Exception:
- # the package is unknown to the current apt cache.
- return None
-
- if not pkg.current_ver:
- # package is known, but no version is currently installed.
- return None
-
- return apt_pkg.upstream_version(pkg.current_ver.ver_str)
diff --git a/hooks/charmhelpers/osplatform.py b/hooks/charmhelpers/osplatform.py
deleted file mode 100644
index d9a4d5c..0000000
--- a/hooks/charmhelpers/osplatform.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import platform
-
-
-def get_platform():
- """Return the current OS platform.
-
- For example: if current os platform is Ubuntu then a string "ubuntu"
- will be returned (which is the name of the module).
- This string is used to decide which platform module should be imported.
- """
- # linux_distribution is deprecated and will be removed in Python 3.7
- # Warings *not* disabled, as we certainly need to fix this.
- tuple_platform = platform.linux_distribution()
- current_platform = tuple_platform[0]
- if "Ubuntu" in current_platform:
- return "ubuntu"
- elif "CentOS" in current_platform:
- return "centos"
- elif "debian" in current_platform:
- # Stock Python does not detect Ubuntu and instead returns debian.
- # Or at least it does in some build environments like Travis CI
- return "ubuntu"
- else:
- raise RuntimeError("This module is not supported on {}."
- .format(current_platform))
diff --git a/hooks/charmhelpers/payload/__init__.py b/hooks/charmhelpers/payload/__init__.py
deleted file mode 100644
index ee55cb3..0000000
--- a/hooks/charmhelpers/payload/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"Tools for working with files injected into a charm just before deployment."
diff --git a/hooks/charmhelpers/payload/execd.py b/hooks/charmhelpers/payload/execd.py
deleted file mode 100644
index 1502aa0..0000000
--- a/hooks/charmhelpers/payload/execd.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import sys
-import subprocess
-from charmhelpers.core import hookenv
-
-
-def default_execd_dir():
- return os.path.join(os.environ['CHARM_DIR'], 'exec.d')
-
-
-def execd_module_paths(execd_dir=None):
- """Generate a list of full paths to modules within execd_dir."""
- if not execd_dir:
- execd_dir = default_execd_dir()
-
- if not os.path.exists(execd_dir):
- return
-
- for subpath in os.listdir(execd_dir):
- module = os.path.join(execd_dir, subpath)
- if os.path.isdir(module):
- yield module
-
-
-def execd_submodule_paths(command, execd_dir=None):
- """Generate a list of full paths to the specified command within exec_dir.
- """
- for module_path in execd_module_paths(execd_dir):
- path = os.path.join(module_path, command)
- if os.access(path, os.X_OK) and os.path.isfile(path):
- yield path
-
-
-def execd_run(command, execd_dir=None, die_on_error=True, stderr=subprocess.STDOUT):
- """Run command for each module within execd_dir which defines it."""
- for submodule_path in execd_submodule_paths(command, execd_dir):
- try:
- subprocess.check_output(submodule_path, stderr=stderr,
- universal_newlines=True)
- except subprocess.CalledProcessError as e:
- hookenv.log("Error ({}) running {}. Output: {}".format(
- e.returncode, e.cmd, e.output))
- if die_on_error:
- sys.exit(e.returncode)
-
-
-def execd_preinstall(execd_dir=None):
- """Run charm-pre-install for each module within execd_dir."""
- execd_run('charm-pre-install', execd_dir=execd_dir)
diff --git a/hooks/config-changed b/hooks/config-changed
deleted file mode 120000
index 4a63a15..0000000
--- a/hooks/config-changed
+++ /dev/null
@@ -1 +0,0 @@
-lxd_hooks.py
\ No newline at end of file
diff --git a/hooks/install b/hooks/install
deleted file mode 100755
index 26d9b25..0000000
--- a/hooks/install
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash -e
-# Wrapper to deal with newer Ubuntu versions that don't have py2 installed
-# by default.
-
-declare -a DEPS=('apt')
-
-check_and_install() {
- pkg="${1}-${2}"
- if ! dpkg -s ${pkg} 2>&1 > /dev/null; then
- apt-get -y install ${pkg}
- fi
-}
-
-PYTHON="python"
-
-for dep in ${DEPS[@]}; do
- check_and_install ${PYTHON} ${dep}
-done
-
-exec ./hooks/install.real
diff --git a/hooks/install.real b/hooks/install.real
deleted file mode 120000
index 4a63a15..0000000
--- a/hooks/install.real
+++ /dev/null
@@ -1 +0,0 @@
-lxd_hooks.py
\ No newline at end of file
diff --git a/hooks/lxd-migration-relation-changed b/hooks/lxd-migration-relation-changed
deleted file mode 120000
index 4a63a15..0000000
--- a/hooks/lxd-migration-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-lxd_hooks.py
\ No newline at end of file
diff --git a/hooks/lxd-migration-relation-joined b/hooks/lxd-migration-relation-joined
deleted file mode 120000
index 4a63a15..0000000
--- a/hooks/lxd-migration-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-lxd_hooks.py
\ No newline at end of file
diff --git a/hooks/lxd-relation-changed b/hooks/lxd-relation-changed
deleted file mode 120000
index 4a63a15..0000000
--- a/hooks/lxd-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-lxd_hooks.py
\ No newline at end of file
diff --git a/hooks/lxd-relation-joined b/hooks/lxd-relation-joined
deleted file mode 120000
index 4a63a15..0000000
--- a/hooks/lxd-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-lxd_hooks.py
\ No newline at end of file
diff --git a/hooks/lxd_hooks.py b/hooks/lxd_hooks.py
deleted file mode 100755
index b65d58c..0000000
--- a/hooks/lxd_hooks.py
+++ /dev/null
@@ -1,173 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from socket import gethostname
-import sys
-import uuid
-
-from charmhelpers.core.hookenv import (
- Hooks,
- UnregisteredHookError,
- config,
- log,
- unit_get,
- relation_set,
- relation_get,
- relation_ids,
- related_units,
- status_set,
-)
-
-from charmhelpers.core.host import (
- umount,
- add_user_to_group,
-)
-
-from lxd_utils import (
- filesystem_mounted,
- determine_packages,
- install_lxd_source,
- configure_lxd_source,
- configure_lxd_block,
- lxd_trust_password,
- configure_lxd_remote,
- configure_lxd_host,
- assess_status,
- has_storage,
- LXD_POOL,
-)
-
-from charmhelpers.fetch import (
- apt_update,
- apt_install,
- add_source,
-)
-
-from charmhelpers.contrib.openstack.utils import (
- clear_unit_paused,
- clear_unit_upgrading,
- set_unit_paused,
- set_unit_upgrading,
-)
-
-hooks = Hooks()
-
-
-@hooks.hook('install.real')
-def install():
- status_set('maintenance', 'Installing LXD packages')
- if config('source'):
- add_source(config('source'))
- apt_update(fatal=True)
- apt_install(determine_packages(), fatal=True)
- if config('use-source'):
- install_lxd_source()
- configure_lxd_source()
-
-
-@hooks.hook()
-def config_changed():
- e_mountpoint = config('ephemeral-unmount')
- if e_mountpoint and filesystem_mounted(e_mountpoint):
- umount(e_mountpoint)
- configure_lxd_block()
- configure_lxd_host()
-
-
-@hooks.hook('lxd-migration-relation-joined')
-def lxd_relation_joined(rid=None):
- settings = {}
- settings['password'] = lxd_trust_password()
- settings['hostname'] = gethostname()
- settings['address'] = unit_get('private-address')
- if has_storage():
- settings['pool'] = LXD_POOL
- relation_set(relation_id=rid,
- relation_settings=settings)
-
-
-@hooks.hook('lxd-relation-changed')
-def lxd_relation_changed():
- user = relation_get('user')
- if user:
- add_user_to_group(user, 'lxd')
- for rid in relation_ids('lxd'):
- relation_set(relation_id=rid,
- nonce=uuid.uuid4())
- # Re-fire lxd-migration relation to ensure that
- # remotes have been setup for the user
- for rid in relation_ids('lxd-migration'):
- for unit in related_units(rid):
- lxd_migration_relation_changed(rid, unit)
-
-
-@hooks.hook('lxd-migration-relation-changed')
-def lxd_migration_relation_changed(rid=None, unit=None):
- settings = {
- 'password': relation_get('password',
- rid=rid,
- unit=unit),
- 'hostname': relation_get('hostname',
- rid=rid,
- unit=unit),
- 'address': relation_get('address',
- rid=rid,
- unit=unit),
- }
- if all(settings.values()):
- users = ['root']
- for rid in relation_ids('lxd'):
- for unit in related_units(rid):
- user = relation_get(attribute='user',
- rid=rid,
- unit=unit)
- if user:
- users.append(user)
- users = list(set(users))
- [configure_lxd_remote(settings, u) for u in users]
-
-
-@hooks.hook('pre-series-upgrade')
-def pre_series_upgrade():
- log("Running prepare series upgrade hook", "INFO")
- # NOTE: The Ceph packages handle the series upgrade gracefully.
- # In order to indicate the step of the series upgrade process for
- # administrators and automated scripts, the charm sets the paused and
- # upgrading states.
- set_unit_paused()
- set_unit_upgrading()
-
-
-@hooks.hook('post-series-upgrade')
-def post_series_upgrade():
- log("Running complete series upgrade hook", "INFO")
- # In order to indicate the step of the series upgrade process for
- # administrators and automated scripts, the charm clears the paused and
- # upgrading states.
- clear_unit_paused()
- clear_unit_upgrading()
-
-
-def main():
- try:
- hooks.execute(sys.argv)
- except UnregisteredHookError as e:
- log("Unknown hook {} - skipping.".format(e))
- assess_status()
-
-
-if __name__ == "__main__":
- main()
diff --git a/hooks/lxd_utils.py b/hooks/lxd_utils.py
deleted file mode 100644
index aadf2ff..0000000
--- a/hooks/lxd_utils.py
+++ /dev/null
@@ -1,621 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import glob
-import io
-import json
-import pwd
-import os
-import platform
-import shutil
-from subprocess import call, check_call, check_output, CalledProcessError
-import subprocess
-import tarfile
-import tempfile
-import uuid
-
-from charmhelpers.core.templating import render
-from charmhelpers.core.hookenv import (
- log,
- config,
- ERROR,
- INFO,
- status_set,
- application_version_set,
-)
-from charmhelpers.core.unitdata import kv
-from charmhelpers.core.host import (
- add_group,
- add_user_to_group,
- mkdir,
- mount,
- mounts,
- umount,
- service_stop,
- service_start,
- service_restart,
- pwgen,
- lsb_release,
- is_container,
- CompareHostReleases,
-)
-from charmhelpers.contrib.openstack.utils import (
- is_unit_upgrading_set,
-)
-from charmhelpers.contrib.storage.linux.utils import (
- is_block_device,
- zap_disk,
-)
-from charmhelpers.contrib.storage.linux.loopback import (
- ensure_loopback_device
-)
-from charmhelpers.contrib.storage.linux.lvm import (
- create_lvm_volume_group,
- create_lvm_physical_volume,
- list_lvm_volume_group,
- is_lvm_physical_volume,
- deactivate_lvm_volume_group,
- remove_lvm_physical_volume,
-)
-from charmhelpers.core.decorators import retry_on_exception
-from charmhelpers.core.kernel import modprobe
-from charmhelpers.fetch import (
- apt_install,
- get_upstream_version
-)
-
-BASE_PACKAGES = [
- 'btrfs-tools',
- 'lvm2',
- 'thin-provisioning-tools',
- 'criu',
- 'zfsutils-linux'
-]
-LXD_PACKAGES = ['lxd', 'lxd-client']
-LXD_SOURCE_PACKAGES = [
- 'lxc',
- 'lxc-dev',
- 'mercurial',
- 'git',
- 'pkg-config',
- 'protobuf-compiler',
- 'golang-goprotobuf-dev',
- 'build-essential',
- 'golang',
- 'xz-utils',
- 'tar',
- 'acl',
-]
-
-VERSION_PACKAGE = 'lxd'
-
-LXD_GIT = 'github.com/lxc/lxd'
-DEFAULT_LOOPBACK_SIZE = '10G'
-PW_LENGTH = 16
-ZFS_POOL_NAME = 'lxd'
-EXT4_USERNS_MOUNTS = "/sys/module/ext4/parameters/userns_mounts"
-# due to bug: 1793137 we have to make this the same as ZFS_POOL_NAME.
-LXD_POOL = 'lxd'
-VG_NAME = 'lxd_vg'
-
-
-def install_lxd():
- '''Install LXD'''
-
-
-def install_lxd_source(user='ubuntu'):
- '''Install LXD from source repositories; installs toolchain first'''
- log('Installing LXD from source')
-
- home = pwd.getpwnam(user).pw_dir
- GOPATH = os.path.join(home, 'go')
- LXD_SRC = os.path.join(GOPATH, 'src', 'github.com/lxc/lxd')
-
- if not os.path.exists(GOPATH):
- mkdir(GOPATH)
-
- env = os.environ.copy()
- env['GOPATH'] = GOPATH
- env['HTTP_PROXY'] = 'http://squid.internal:3128'
- env['HTTPS_PROXY'] = 'https://squid.internal:3128'
- cmd = 'go get -v %s' % LXD_GIT
- log('Installing LXD: %s' % (cmd))
- check_call(cmd, env=env, shell=True)
-
- if not os.path.exists(LXD_SRC):
- log('Failed to go get %s' % LXD_GIT, level=ERROR)
- raise
-
- cwd = os.getcwd()
- try:
- os.chdir(LXD_SRC)
- cmd = 'go get -v -d ./...'
- log('Downloading LXD deps: %s' % (cmd))
- call(cmd, env=env, shell=True)
-
- # build deps
- cmd = 'make'
- log('Building LXD deps: %s' % (cmd))
- call(cmd, env=env, shell=True)
- except Exception:
- log("failed to install lxd")
- raise
- finally:
- os.chdir(cwd)
-
-
-def configure_lxd_source(user='ubuntu'):
- '''Add required configuration and files when deploying LXD from source'''
- log('Configuring LXD Source')
- home = pwd.getpwnam(user).pw_dir
- GOPATH = os.path.join(home, 'go')
-
- templates_dir = 'templates'
- render('lxd_upstart', '/etc/init/lxd.conf', {},
- perms=0o644, templates_dir=templates_dir)
- render('lxd_service', '/lib/systemd/system/lxd.service', {},
- perms=0o644, templates_dir=templates_dir)
- add_group('lxd', system_group=True)
- add_user_to_group(user, 'lxd')
-
- service_stop('lxd')
- files = glob.glob('%s/bin/*' % GOPATH)
- for i in files:
- cmd = ['cp', i, '/usr/bin']
- check_call(cmd)
- service_start('lxd')
-
-
-def get_block_devices():
- """Returns a list of block devices provided by the config."""
- lxd_block_devices = config('block-devices')
- if lxd_block_devices is None:
- return []
- else:
- return lxd_block_devices.split(' ')
-
-
-def configure_lxd_block():
- '''Configure a block device for use by LXD for containers'''
- log('Configuring LXD container storage if not configured')
- # determine if the lxd block has already been configured
- if has_storage(LXD_POOL):
- log("LXD storage pool {} already configured".format(LXD_POOL))
- return
- elif filesystem_mounted('/var/lib/lxd'):
- log('/var/lib/lxd already configured, skipping')
- return
-
- lxd_block_devices = get_block_devices()
- if len(lxd_block_devices) < 1:
- log('block devices not provided - skipping')
- return
- if len(lxd_block_devices) > 1:
- log("More than one block device is not supported yet, only"
- " using the first")
- lxd_block_device = lxd_block_devices[0]
-
- dev = None
- if lxd_block_device.startswith('/dev/'):
- dev = lxd_block_device
- elif lxd_block_device.startswith('/'):
- log('Configuring loopback device for use with LXD')
- _bd = lxd_block_device.split('|')
- if len(_bd) == 2:
- dev, size = _bd
- else:
- dev = lxd_block_device
- size = DEFAULT_LOOPBACK_SIZE
- dev = ensure_loopback_device(dev, size)
-
- if not dev or not is_block_device(dev):
- log('Invalid block device provided: %s' % lxd_block_device)
- return
-
- # NOTE: check overwrite and ensure its only execute once.
- db = kv()
- if config('overwrite') and not db.get('scrubbed', False):
- clean_storage(dev)
- db.set('scrubbed', True)
- db.flush()
-
- if not os.path.exists('/var/lib/lxd'):
- mkdir('/var/lib/lxd')
-
- if config('storage-type') == 'btrfs':
- config_btrfs(dev)
- elif config('storage-type') == 'lvm':
- config_lvm(dev)
- elif config('storage-type') == 'zfs':
- config_zfs(dev)
-
-
-def config_btrfs(dev):
- status_set('maintenance',
- 'Configuring btrfs container storage')
- if has_storage():
- cmd = ['lxc', 'storage', 'create', LXD_POOL, 'btrfs',
- 'source={}'.format(dev)]
- check_call(cmd)
- else:
- lxd_stop()
- cmd = ['mkfs.btrfs', '-f', dev]
- check_call(cmd)
- mount(dev,
- '/var/lib/lxd',
- options='user_subvol_rm_allowed',
- persist=True,
- filesystem='btrfs')
- cmd = ['btrfs', 'quota', 'enable', '/var/lib/lxd']
- check_call(cmd)
- lxd_start()
-
-
-def config_lvm(dev):
- if (is_lvm_physical_volume(dev) and
- list_lvm_volume_group(dev) == VG_NAME):
- log('Device already configured for LVM/LXD, skipping')
- return
- status_set('maintenance',
- 'Configuring LVM container storage')
-
- cmd = ['systemctl', 'enable', 'lvm2-lvmetad']
- check_call(cmd)
- cmd = ['systemctl', 'start', 'lvm2-lvmetad']
- check_call(cmd)
- if has_storage():
- cmd = ['lxc', 'storage', 'create', LXD_POOL, 'lvm',
- 'source={}'.format(dev), 'lvm.vg_name={}'.format(VG_NAME)]
- check_call(cmd)
- else:
- create_lvm_physical_volume(dev)
- create_lvm_volume_group(VG_NAME, dev)
- cmd = ['lxc', 'config', 'set', 'storage.lvm_vg_name', VG_NAME]
- check_call(cmd)
-
- # The LVM thinpool logical volume is lazily created, either on
- # image import or container creation. This will force LV creation.
- create_and_import_busybox_image()
-
-
-def config_zfs(dev):
- status_set('maintenance',
- 'Configuring zfs container storage')
- if ZFS_POOL_NAME in zpools():
- log('ZFS pool already exist; skipping zfs configuration')
- return
-
- if config('overwrite'):
- cmd = ['zpool', 'create', '-f', ZFS_POOL_NAME, dev]
- else:
- cmd = ['zpool', 'create', ZFS_POOL_NAME, dev]
- check_call(cmd)
-
- if has_storage():
- cmd = ["lxc", "storage", "create", LXD_POOL, "zfs",
- "source={}".format(ZFS_POOL_NAME)]
- else:
- cmd = ['lxc', 'config', 'set', 'storage.zfs_pool_name',
- ZFS_POOL_NAME]
-
- check_call(cmd)
-
-
-def has_storage(search_for_pool=None):
- try:
- pools = (check_output(['lxc', 'storage', 'list'])
- .decode('utf-8')
- .splitlines())
- if search_for_pool is not None:
- for pool in pools:
- try:
- name = pool.split(' ')[1]
- if search_for_pool == name:
- return True
- except IndexError:
- pass
- return False
- return True
- except subprocess.CalledProcessError:
- return False
-
-
-def create_and_import_busybox_image():
- """Create a busybox image for lxd.
-
- This creates a busybox image without reaching out to
- the network.
-
- This function is, for the most part, heavily based on
- the busybox image generation in the pylxd integration
- tests.
- """
- workdir = tempfile.mkdtemp()
- xz = "xz"
-
- destination_tar = os.path.join(workdir, "busybox.tar")
- target_tarball = tarfile.open(destination_tar, "w:")
-
- metadata = {'architecture': os.uname()[4],
- 'creation_date': int(os.stat("/bin/busybox").st_ctime),
- 'properties': {
- 'os': "Busybox",
- 'architecture': os.uname()[4],
- 'description': "Busybox %s" % os.uname()[4],
- 'name': "busybox-%s" % os.uname()[4],
- # Don't overwrite actual busybox images.
- 'obfuscate': str(uuid.uuid4)}}
-
- # Add busybox
- with open("/bin/busybox", "rb") as fd:
- busybox_file = tarfile.TarInfo()
- busybox_file.size = os.stat("/bin/busybox").st_size
- busybox_file.mode = 0o755
- busybox_file.name = "rootfs/bin/busybox"
- target_tarball.addfile(busybox_file, fd)
-
- # Add symlinks
- busybox = subprocess.Popen(["/bin/busybox", "--list-full"],
- stdout=subprocess.PIPE,
- universal_newlines=True)
- busybox.wait()
-
- for path in busybox.stdout.read().split("\n"):
- if not path.strip():
- continue
-
- symlink_file = tarfile.TarInfo()
- symlink_file.type = tarfile.SYMTYPE
- symlink_file.linkname = "/bin/busybox"
- symlink_file.name = "rootfs/%s" % path.strip()
- target_tarball.addfile(symlink_file)
-
- # Add directories
- for path in ("dev", "mnt", "proc", "root", "sys", "tmp"):
- directory_file = tarfile.TarInfo()
- directory_file.type = tarfile.DIRTYPE
- directory_file.name = "rootfs/%s" % path
- target_tarball.addfile(directory_file)
-
- # Add the metadata file
- metadata_yaml = json.dumps(metadata, sort_keys=True,
- indent=4, separators=(',', ': '),
- ensure_ascii=False).encode('utf-8') + b"\n"
-
- metadata_file = tarfile.TarInfo()
- metadata_file.size = len(metadata_yaml)
- metadata_file.name = "metadata.yaml"
- target_tarball.addfile(metadata_file,
- io.BytesIO(metadata_yaml))
-
- inittab = tarfile.TarInfo()
- inittab.size = 1
- inittab.name = "/rootfs/etc/inittab"
- target_tarball.addfile(inittab, io.BytesIO(b"\n"))
-
- target_tarball.close()
-
- # Compress the tarball
- r = subprocess.call([xz, "-9", destination_tar])
- if r:
- raise Exception("Failed to compress: %s" % destination_tar)
-
- image_file = destination_tar+".xz"
-
- cmd = ['lxc', 'image', 'import', image_file, '--alias', 'busybox']
- check_call(cmd)
-
- shutil.rmtree(workdir)
-
-
-def determine_packages():
- packages = [] + BASE_PACKAGES
- packages = list(set(packages))
-
- # criu package doesn't exist for arm64/s390x prior to artful
- machine = platform.machine()
- if (CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) < 'artful' and
- (machine == 'arm64' or machine == 's390x')):
- packages.remove('criu')
-
- if config('use-source'):
- packages.extend(LXD_SOURCE_PACKAGES)
- else:
- packages.extend(LXD_PACKAGES)
- return packages
-
-
-def filesystem_mounted(fs):
- return fs in [f for f, m in mounts()]
-
-
-def lxd_trust_password():
- db = kv()
- password = db.get('lxd-password')
- if not password:
- password = db.set('lxd-password', pwgen(PW_LENGTH))
- db.flush()
- return password
-
-
-def configure_lxd_remote(settings, user='root'):
- cmd = ['sudo', '-u', user,
- 'lxc', 'remote', 'list']
- output = check_output(cmd)
- if settings['hostname'] not in output:
- log('Adding new remote {hostname}:{address}'.format(**settings))
- cmd = ['sudo', '-u', user,
- 'lxc', 'remote', 'add',
- settings['hostname'],
- 'https://{}:8443'.format(settings['address']),
- '--accept-certificate',
- '--password={}'.format(settings['password'])]
- check_call(cmd)
- else:
- log('Updating remote {hostname}:{address}'.format(**settings))
- cmd = ['sudo', '-u', user,
- 'lxc', 'remote', 'set-url',
- settings['hostname'],
- 'https://{}:8443'.format(settings['address'])]
- check_call(cmd)
-
-
-@retry_on_exception(5, base_delay=2, exc_type=CalledProcessError)
-def configure_lxd_host():
- ubuntu_release = lsb_release()['DISTRIB_CODENAME'].lower()
- cmp_ubuntu_release = CompareHostReleases(ubuntu_release)
- if cmp_ubuntu_release > "vivid":
- log('>= Wily deployment - configuring LXD trust password and address',
- level=INFO)
- cmd = ['lxc', 'config', 'set',
- 'core.trust_password', lxd_trust_password()]
- check_call(cmd)
- cmd = ['lxc', 'config', 'set',
- 'core.https_address', '[::]']
- check_call(cmd)
-
- if not is_container():
- # NOTE(jamespage): None of the below is worth doing when running
- # within a container on an all-in-one install
-
- # Configure live migration
- if cmp_ubuntu_release == 'xenial':
- uname = os.uname()[2]
- if uname > '4.4.0-122-generic':
- pkg = "linux-modules-extra-{}"
- else:
- pkg = "linux-image-extra-{}"
- apt_install(pkg.format(uname), fatal=True)
-
- if cmp_ubuntu_release >= 'xenial':
- modprobe('netlink_diag')
-
- # Enable/disable use of ext4 within nova-lxd containers
- if os.path.exists(EXT4_USERNS_MOUNTS):
- with open(EXT4_USERNS_MOUNTS, 'w') as userns_mounts:
- userns_mounts.write(
- 'Y\n' if config('enable-ext4-userns') else 'N\n'
- )
-
- configure_uid_mapping()
- elif cmp_ubuntu_release == "vivid":
- log('Vivid deployment - loading overlay kernel module', level=INFO)
- cmd = ['modprobe', 'overlay']
- check_call(cmd)
- with open('/etc/modules', 'r+') as modules:
- if 'overlay' not in modules.read():
- modules.write('overlay')
-
-
-def clean_storage(block_device):
- '''Ensures a block device is clean. That is:
- - unmounted
- - any lvm volume groups are deactivated
- - any lvm physical device signatures removed
- - partition table wiped
-
- :param block_device: str: Full path to block device to clean.
- '''
- for mp, d in mounts():
- if d == block_device:
- log('clean_storage(): Found %s mounted @ %s, unmounting.' %
- (d, mp))
- umount(mp, persist=True)
-
- if is_lvm_physical_volume(block_device):
- deactivate_lvm_volume_group(block_device)
- remove_lvm_physical_volume(block_device)
-
- zap_disk(block_device)
-
-
-def lxd_running():
- '''Check whether LXD is running or not'''
- cmd = ['pgrep', 'lxd']
- try:
- check_call(cmd)
- return True
- except CalledProcessError:
- return False
-
-
-def lxd_stop():
- '''Stop LXD.socket and lxd.service'''
- cmd = ['systemctl', 'stop', 'lxd.socket']
- check_call(cmd)
- cmd = ['systemctl', 'stop', 'lxd']
- check_call(cmd)
-
-
-def lxd_start():
- cmd = ['systemctl', 'start', 'lxd']
- check_call(cmd)
-
-
-def assess_status():
- '''Determine status of current unit'''
- if is_unit_upgrading_set():
- status_set('blocked',
- 'Ready for do-release-upgrade and reboot. '
- 'Set complete when finished.')
- elif lxd_running():
- status_set('active', 'Unit is ready')
- else:
- status_set('blocked', 'LXD is not running')
- application_version_set(get_upstream_version(VERSION_PACKAGE))
-
-
-def zpools():
- '''
- Query the currently configured ZFS pools
-
- @return: list of strings of pool names
- '''
- try:
- zpools = check_output(['zpool', 'list', '-H']).splitlines()
- pools = []
- for l in zpools:
- l = l.decode('UTF-8')
- pools.append(l.split()[0])
- return pools
- except CalledProcessError:
- return []
-
-
-SUBUID = '/etc/subuid'
-SUBGID = '/etc/subgid'
-DEFAULT_COUNT = '327680000' # 5000 containers
-ROOT_USER = 'root'
-
-
-def configure_uid_mapping():
- '''Extend root user /etc/{subuid,subgid} mapping for LXD use'''
- restart_lxd = False
- for uidfile in (SUBUID, SUBGID):
- with open(uidfile, 'r+') as f_id:
- ids = []
- for s_id in f_id.readlines():
- _id = s_id.strip().split(':')
- if (_id[0] == ROOT_USER and
- _id[2] != DEFAULT_COUNT):
- _id[2] = DEFAULT_COUNT
- restart_lxd = True
- ids.append(_id)
- f_id.seek(0)
- for _id in ids:
- f_id.write('{}:{}:{}\n'.format(*_id))
- f_id.truncate()
- if restart_lxd:
- # NOTE: restart LXD to pickup changes in id map config
- service_restart('lxd')
diff --git a/hooks/post-series-upgrade b/hooks/post-series-upgrade
deleted file mode 120000
index 4a63a15..0000000
--- a/hooks/post-series-upgrade
+++ /dev/null
@@ -1 +0,0 @@
-lxd_hooks.py
\ No newline at end of file
diff --git a/hooks/pre-series-upgrade b/hooks/pre-series-upgrade
deleted file mode 120000
index 4a63a15..0000000
--- a/hooks/pre-series-upgrade
+++ /dev/null
@@ -1 +0,0 @@
-lxd_hooks.py
\ No newline at end of file
diff --git a/hooks/start b/hooks/start
deleted file mode 120000
index 4a63a15..0000000
--- a/hooks/start
+++ /dev/null
@@ -1 +0,0 @@
-lxd_hooks.py
\ No newline at end of file
diff --git a/hooks/stop b/hooks/stop
deleted file mode 120000
index 4a63a15..0000000
--- a/hooks/stop
+++ /dev/null
@@ -1 +0,0 @@
-lxd_hooks.py
\ No newline at end of file
diff --git a/hooks/update-status b/hooks/update-status
deleted file mode 120000
index 4a63a15..0000000
--- a/hooks/update-status
+++ /dev/null
@@ -1 +0,0 @@
-lxd_hooks.py
\ No newline at end of file
diff --git a/hooks/upgrade-charm b/hooks/upgrade-charm
deleted file mode 120000
index 4a63a15..0000000
--- a/hooks/upgrade-charm
+++ /dev/null
@@ -1 +0,0 @@
-lxd_hooks.py
\ No newline at end of file
diff --git a/icon.svg b/icon.svg
deleted file mode 100644
index 15cfd60..0000000
--- a/icon.svg
+++ /dev/null
@@ -1,321 +0,0 @@
-
-
-
-
diff --git a/lib/.keep b/lib/.keep
deleted file mode 100644
index f49b91a..0000000
--- a/lib/.keep
+++ /dev/null
@@ -1,3 +0,0 @@
- This file was created by release-tools to ensure that this empty
- directory is preserved in vcs re: lint check definitions in global
- tox.ini files. This file can be removed if/when this dir is actually in use.
diff --git a/metadata.yaml b/metadata.yaml
deleted file mode 100644
index 5a0bd6b..0000000
--- a/metadata.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-name: lxd
-summary: A hypervisor for LXC containers
-maintainer: OpenStack Charmers
-description: |
- A hypervisor for LXC containers
-tags:
- - misc
- - openstack
-series:
- - xenial
- - bionic
- - cosmic
- - disco
-subordinate: true
-peers:
- lxd-migration:
- interface: lxd-migration
-provides:
- lxd:
- interface: containers
- scope: container
-requires:
- container:
- interface: juju-info
- scope: container
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index b8fec1e..0000000
--- a/requirements.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
-pbr>=1.8.0,<1.9.0
-simplejson>=2.2.0
-netifaces>=0.10.4
-netaddr>=0.7.12,!=0.7.16
-Jinja2>=2.6 # BSD License (3 clause)
-six>=1.9.0
-dnspython>=1.12.0
-psutil>=1.1.1,<2.0.0
diff --git a/revision b/revision
deleted file mode 100644
index d00491f..0000000
--- a/revision
+++ /dev/null
@@ -1 +0,0 @@
-1
diff --git a/templates/lxd_service b/templates/lxd_service
deleted file mode 100644
index de70e2b..0000000
--- a/templates/lxd_service
+++ /dev/null
@@ -1,12 +0,0 @@
-[Unit]
-Description=Container hypervisor based on LXC
-After=cgmanager.service lxc.service
-Requires=cgmanager.service lxc.service
-
-[Service]
-ExecStart=/usr/bin/lxd --group lxd --debug
-KillMode=process
-Restart=on-failure
-
-[Install]
-WantedBy=multi-user.target
diff --git a/templates/lxd_upstart b/templates/lxd_upstart
deleted file mode 100644
index 95e6aba..0000000
--- a/templates/lxd_upstart
+++ /dev/null
@@ -1,9 +0,0 @@
-description "Container hypervisor based on LXC"
-author "Stéphane Graber "
-
-start on started lxc
-stop on runlevel [06]
-
-respawn
-
-exec /usr/bin/lxd --group lxd --debug
diff --git a/test-requirements.txt b/test-requirements.txt
deleted file mode 100644
index 272ce1d..0000000
--- a/test-requirements.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
-charm-tools>=2.4.4
-coverage>=3.6
-mock>=1.2
-flake8>=2.2.4,<=2.4.1
-stestr>=2.2.0
-requests>=2.18.4
-# BEGIN: Amulet OpenStack Charm Helper Requirements
-# Liberty client lower constraints
-amulet>=1.14.3,<2.0;python_version=='2.7'
-bundletester>=0.6.1,<1.0;python_version=='2.7'
-python-ceilometerclient>=1.5.0
-python-cinderclient>=1.4.0
-python-glanceclient>=1.1.0
-python-heatclient>=0.8.0
-python-keystoneclient>=1.7.1
-python-neutronclient>=3.1.0
-python-novaclient>=2.30.1
-python-openstackclient>=1.7.0
-python-swiftclient>=2.6.0
-pika>=0.10.0,<1.0
-distro-info
-git+https://github.com/juju/charm-helpers.git#egg=charmhelpers
-# END: Amulet OpenStack Charm Helper Requirements
-# NOTE: workaround for 14.04 pip/tox
-pytz
-pyudev # for ceph-* charm unit tests (not mocked?)
diff --git a/tests/README.md b/tests/README.md
deleted file mode 100644
index 046be7f..0000000
--- a/tests/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# Overview
-
-This directory provides Amulet tests to verify basic deployment functionality
-from the perspective of this charm, its requirements and its features, as
-exercised in a subset of the full OpenStack deployment test bundle topology.
-
-For full details on functional testing of OpenStack charms please refer to
-the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing)
-section of the OpenStack Charm Guide.
diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py
deleted file mode 100644
index eebb359..0000000
--- a/tests/basic_deployment.py
+++ /dev/null
@@ -1,430 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Basic functional black-box test
-# See tests/README before modifying or adding new tests.
-
-import amulet
-import time
-
-import keystoneclient
-from keystoneclient.v3 import client as keystone_client_v3
-from novaclient import client as nova_client
-
-from charmhelpers.contrib.openstack.amulet.deployment import (
- OpenStackAmuletDeployment
-)
-
-from charmhelpers.contrib.openstack.amulet.utils import (
- OpenStackAmuletUtils,
- DEBUG,
-)
-
-# Use DEBUG to turn on debug logging
-u = OpenStackAmuletUtils(DEBUG)
-
-
-LXD_IMAGE_URL = 'http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-root.tar.xz' # noqa
-LXD_IMAGE_NAME = 'trusty-server-cloudimg-amd64-root.tar.xz'
-
-
-class LXDBasicDeployment(OpenStackAmuletDeployment):
- """Amulet tests on a basic nova compute deployment."""
-
- def __init__(self, series=None, openstack=None, source=None,
- stable=False):
- """Deploy the test environment."""
- super(LXDBasicDeployment, self).__init__(series, openstack,
- source, stable)
- self._add_services()
- self._add_relations()
- self._configure_services()
- self._deploy()
-
- u.log.info('Waiting on extended status checks...')
- exclude_services = []
- self._auto_wait_for_status(exclude_services=exclude_services)
-
- self.d.sentry.wait()
- self._initialize_tests()
-
- def _add_services(self):
- """Add services
-
- Add the services that we're testing, where lxd is local,
- and the rest of the service are from lp branches that are
- compatible with the local charm (e.g. stable or next).
- """
- this_service = {'name': 'lxd'}
-
- other_services = [
- {'name': 'percona-cluster'},
- {'name': 'nova-compute', 'units': 2},
- {'name': 'rabbitmq-server'},
- {'name': 'nova-cloud-controller'},
- {'name': 'keystone'},
- {'name': 'glance'}
- ]
- if self._get_openstack_release() >= self.xenial_ocata:
- other_ocata_services = [
- {'name': 'neutron-gateway'},
- {'name': 'neutron-api'},
- {'name': 'neutron-openvswitch'},
- ]
- other_services += other_ocata_services
-
- super(LXDBasicDeployment, self)._add_services(this_service,
- other_services)
-
- def _add_relations(self):
- """Add all of the relations for the services."""
- relations = {
- 'lxd:lxd': 'nova-compute:lxd',
- 'nova-compute:image-service': 'glance:image-service',
- 'nova-compute:amqp': 'rabbitmq-server:amqp',
- 'nova-cloud-controller:shared-db': 'percona-cluster:shared-db',
- 'nova-cloud-controller:identity-service': 'keystone:'
- 'identity-service',
- 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp',
- 'nova-cloud-controller:cloud-compute': 'nova-compute:'
- 'cloud-compute',
- 'nova-cloud-controller:image-service': 'glance:image-service',
- 'keystone:shared-db': 'percona-cluster:shared-db',
- 'glance:identity-service': 'keystone:identity-service',
- 'glance:shared-db': 'percona-cluster:shared-db',
- 'glance:amqp': 'rabbitmq-server:amqp'
- }
- if self._get_openstack_release() >= self.xenial_ocata:
- ocata_relations = {
- 'neutron-gateway:amqp': 'rabbitmq-server:amqp',
- 'nova-cloud-controller:quantum-network-service':
- 'neutron-gateway:quantum-network-service',
- 'neutron-api:shared-db': 'percona-cluster:shared-db',
- 'neutron-api:amqp': 'rabbitmq-server:amqp',
- 'neutron-api:neutron-api': 'nova-cloud-controller:neutron-api',
- 'neutron-api:identity-service': 'keystone:identity-service',
- 'nova-compute:neutron-plugin': 'neutron-openvswitch:'
- 'neutron-plugin',
- 'rabbitmq-server:amqp': 'neutron-openvswitch:amqp',
- }
- relations.update(ocata_relations)
-
- super(LXDBasicDeployment, self)._add_relations(relations)
-
- def _configure_services(self):
- """Configure all of the services."""
- nova_cc_config = {
- 'ram-allocation-ratio': '5.0'
- }
- if self._get_openstack_release() >= self.xenial_ocata:
- nova_cc_config['network-manager'] = 'Neutron'
-
- lxd_config = {
- 'block-devices': '/dev/vdb',
- 'ephemeral-unmount': '/mnt',
- 'storage-type': 'zfs',
- 'overwrite': True
- }
-
- nova_config = {
- 'enable-live-migration': True,
- 'enable-resize': True,
- 'migration-auth-type': 'ssh',
- 'virt-type': 'lxd'
- }
-
- keystone_config = {
- 'admin-password': 'openstack',
- 'admin-token': 'ubuntutesting'
- }
-
- pxc_config = {
- 'max-connections': 1000,
- }
-
- configs = {
- 'nova-compute': nova_config,
- 'lxd': lxd_config,
- 'keystone': keystone_config,
- 'nova-cloud-controller': nova_cc_config,
- 'percona-cluster': pxc_config,
- }
-
- super(LXDBasicDeployment, self)._configure_services(configs)
-
- def _initialize_tests(self):
- """Perform final initialization before tests get run."""
-
- u.log.debug(self.d.sentry['lxd'])
- # Access the sentries for inspecting service units
- self.lxd0_sentry = self.d.sentry['lxd'][0]
- # XXX: rockstar (6 Mar 2016) - Due to what might be an amulet
- # bug, it's possible that we only detect a single lxd instance.
- # Either that, or something drastically more nefarious is going
- # on. In order to move ahead, this hack is put in place.
- # See https://github.com/juju/amulet/issues/122
- try:
- self.lxd1_sentry = self.d.sentry['lxd'][1]
- except IndexError:
- self.lxd1_sentry = None
- self.compute0_sentry = self.d.sentry['nova-compute'][0]
- self.compute1_sentry = self.d.sentry['nova-compute'][1]
-
- self.pxc_sentry = self.d.sentry['percona-cluster'][0]
- self.keystone_sentry = self.d.sentry['keystone'][0]
- self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]
- self.nova_cc_sentry = self.d.sentry['nova-cloud-controller'][0]
- self.glance_sentry = self.d.sentry['glance'][0]
-
- u.log.debug('openstack release val: {}'.format(
- self._get_openstack_release()))
- u.log.debug('openstack release str: {}'.format(
- self._get_openstack_release_string()))
-
- # Authenticate admin with keystone
- self.keystone_session, self.keystone = (
- u.get_default_keystone_session(self.keystone_sentry,
- self._get_openstack_release())
- )
-
- # Authenticate admin with glance endpoint
- self.glance = u.authenticate_glance_admin(self.keystone)
-
- self.nova_admin = nova_client.Client(2, session=self.keystone_session)
-
- u.create_flavor(nova=self.nova_admin,
- name='m1.tiny', ram=512, vcpus=1, disk=1)
-
- keystone_ip = self.keystone_sentry.info['public-address']
-
- # Create a demo tenant/role/user
- self.demo_tenant = 'demoTenant'
- self.demo_role = 'demoRole'
- self.demo_user = 'demoUser'
- self.demo_project = 'demoProject'
- self.demo_domain = 'demoDomain'
-
- if self._get_openstack_release() >= self.xenial_queens:
- self.create_users_v3()
- self.demo_user_session, _ = u.get_keystone_session(
- keystone_ip,
- self.demo_user,
- 'password',
- api_version=3,
- user_domain_name=self.demo_domain,
- project_domain_name=self.demo_domain,
- project_name=self.demo_project
- )
- self.keystone_demo = keystone_client_v3.Client(
- session=self.demo_user_session)
- self.nova_demo = nova_client.Client(
- 2,
- session=self.demo_user_session)
- else:
- self.create_users_v2()
- # Authenticate demo user with keystone
- self.keystone_demo = \
- u.authenticate_keystone_user(
- self.keystone, user=self.demo_user,
- password='password',
- tenant=self.demo_tenant)
- # Authenticate demo user with nova-api
- self.nova_demo = u.authenticate_nova_user(self.keystone,
- user=self.demo_user,
- password='password',
- tenant=self.demo_tenant)
-
- def create_users_v3(self):
- try:
- self.keystone.projects.find(name=self.demo_project)
- except keystoneclient.exceptions.NotFound:
- domain = self.keystone.domains.create(
- self.demo_domain,
- description='Demo Domain',
- enabled=True
- )
- project = self.keystone.projects.create(
- self.demo_project,
- domain,
- description='Demo Project',
- enabled=True,
- )
- user = self.keystone.users.create(
- self.demo_user,
- domain=domain.id,
- project=self.demo_project,
- password='password',
- email='demov3@demo.com',
- description='Demo',
- enabled=True)
- role = self.keystone.roles.find(name='Admin')
- self.keystone.roles.grant(
- role.id,
- user=user.id,
- project=project.id)
-
- def create_users_v2(self):
- if not u.tenant_exists(self.keystone, self.demo_tenant):
- tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
- description='demo tenant',
- enabled=True)
-
- self.keystone.roles.create(name=self.demo_role)
- self.keystone.users.create(name=self.demo_user,
- password='password',
- tenant_id=tenant.id,
- email='demo@demo.com')
-
- def test_100_services(self):
- """Verify the expected services are running on the corresponding
- service units."""
- u.log.debug('Checking system services on units...')
-
- services = {
- self.lxd0_sentry: ['lxd']
- }
- # XXX: rockstar (6 Mar 2016) - See related XXX comment
- # above.
- if self.lxd1_sentry is not None:
- services[self.lxd1_sentry] = ['lxd']
-
- ret = u.validate_services_by_name(services)
- if ret:
- amulet.raise_status(amulet.FAIL, msg=ret)
-
- u.log.debug('Ok')
-
- def test_104_openstack_compute_api_endpoint(self):
- """Verify the openstack compute api (osapi) endpoint data."""
- u.log.debug('Checking compute endpoint data...')
-
- endpoints = self.keystone.endpoints.list()
- admin_port = internal_port = public_port = '8774'
- expected = {
- 'id': u.not_null,
- 'region': 'RegionOne',
- 'adminurl': u.valid_url,
- 'internalurl': u.valid_url,
- 'publicurl': u.valid_url,
- 'service_id': u.not_null
- }
-
- ret = u.validate_endpoint_data(
- endpoints, admin_port, internal_port,
- public_port, expected,
- openstack_release=self._get_openstack_release()
- )
- if ret:
- message = 'osapi endpoint: {}'.format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('Ok')
-
- # TODO: Add bi-directional lxd service relation introspection
-
- def test_402_lxc_config_validate(self):
- """Inspect and validate lxc running config on all lxd units."""
- u.log.debug('Checking lxc config on lxd units...')
-
- cmd = 'sudo lxc config show'
- expected = [
- 'core.https_address: \'[::]\'',
- 'core.trust_password: true',
- ]
- invalid = []
- for sentry_unit in self.d.sentry['lxd']:
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- output, _ = u.run_cmd_unit(sentry_unit, cmd)
- for expected_content in expected:
- version, _ = u.run_cmd_unit(sentry_unit, 'sudo lxc --version')
- if expected_content not in output:
- invalid.append('{} {} lxc config does not contain '
- '{}'.format(unit_name, host,
- expected_content))
-
- if invalid:
- u.log.error('lxc config check failed')
- amulet.raise_status(amulet.FAIL, msg='; '.join(invalid))
-
- u.log.debug('Ok')
-
- def test_410_image_instance_create(self):
- """Create an image/instance, verify they exist, and delete them."""
- u.log.debug('Create glance image, nova LXD instance...')
-
- # Add nova key pair
- # TODO: Nova keypair create
-
- # Add glance image
- # XXX: rockstar (11 Apr 2016) - It is awkward that we are uploading
- # a rootfs image as raw in glance. This is an issue with nova-lxd
- # itself, and should be fixed soon.
- image = u.glance_create_image(self.glance,
- LXD_IMAGE_NAME,
- LXD_IMAGE_URL,
- disk_format='raw',
- hypervisor_type='lxc')
- if not image:
- amulet.raise_status(amulet.FAIL, msg='Image create failed')
-
- # Create nova instance
- instance_name = 'lxd-instance-{}'.format(time.time())
- instance = u.create_instance(self.nova_demo, LXD_IMAGE_NAME,
- instance_name, 'm1.tiny')
- if not instance:
- amulet.raise_status(amulet.FAIL, msg='Nova instance create failed')
-
- found = False
- for instance in self.nova_demo.servers.list():
- if instance.name == instance_name:
- found = True
- # TODO: Get instance IP address
- if instance.status != 'ACTIVE':
- msg = 'Nova instance is not active'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- if not found:
- message = 'Nova instance does not exist'
- amulet.raise_status(amulet.FAIL, msg=message)
-
- # TODO: Confirm nova instance: TCP port knock
- # NOTE(beisner):
- # This will require additional environment configuration
- # and post-deployment operation such as network creation
- # before it can be tested. The instance has no IP address.
- #
- # host = '1.2.3.4'
- # port = 22
- # timeout = 30
- # connected = u.port_knock_tcp(host, port, timeout)
- # if connected:
- # u.log.debug('Socket connect OK: {}:{}'.format(host, port))
- # else:
- # msg = 'Socket connect failed: {}:{}'.format(host, port)
- # amulet.raise_status(amulet.FAIL, msg)
-
- # TODO: ICMP instance ping
- # TODO: SSH instance login
-
- # Cleanup
- u.delete_resource(self.glance.images, image.id,
- msg='glance image')
-
- u.delete_resource(self.nova_demo.servers, instance.id,
- msg='nova instance')
- # TODO: Delete nova keypair
-
- u.log.debug('Ok')
diff --git a/tests/dev-basic-cosmic-rocky b/tests/dev-basic-cosmic-rocky
deleted file mode 100755
index 88b570b..0000000
--- a/tests/dev-basic-cosmic-rocky
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic lxd deployment on cosmic-rocky."""
-
-from basic_deployment import LXDBasicDeployment
-
-if __name__ == '__main__':
- deployment = LXDBasicDeployment(
- series='cosmic', openstack=None, source=None,
- stable=False)
- deployment.run_tests()
diff --git a/tests/dev-basic-disco-stein b/tests/dev-basic-disco-stein
deleted file mode 100755
index 9b4261e..0000000
--- a/tests/dev-basic-disco-stein
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic lxd deployment on disco-stein."""
-
-from basic_deployment import LXDBasicDeployment
-
-if __name__ == '__main__':
- deployment = LXDBasicDeployment(
- series='disco', openstack=None, source=None,
- stable=False)
- deployment.run_tests()
diff --git a/tests/gate-basic-bionic-queens b/tests/gate-basic-bionic-queens
deleted file mode 100755
index 68db7d1..0000000
--- a/tests/gate-basic-bionic-queens
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic lxd deployment on bionic-queens."""
-
-from basic_deployment import LXDBasicDeployment
-
-if __name__ == '__main__':
- deployment = LXDBasicDeployment(
- series='bionic', openstack=None, source=None,
- stable=False)
- deployment.run_tests()
diff --git a/tests/gate-basic-bionic-rocky b/tests/gate-basic-bionic-rocky
deleted file mode 100755
index e7079f3..0000000
--- a/tests/gate-basic-bionic-rocky
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic lxd deployment on bionic-rocky."""
-
-from basic_deployment import LXDBasicDeployment
-
-if __name__ == '__main__':
- deployment = LXDBasicDeployment(series='bionic',
- openstack='cloud:bionic-rocky',
- source='cloud:bionic-updates/rocky')
- deployment.run_tests()
diff --git a/tests/gate-basic-bionic-stein b/tests/gate-basic-bionic-stein
deleted file mode 100755
index 52c2b89..0000000
--- a/tests/gate-basic-bionic-stein
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic lxd deployment on bionic-stein."""
-
-from basic_deployment import LXDBasicDeployment
-
-if __name__ == '__main__':
- deployment = LXDBasicDeployment(series='bionic',
- openstack='cloud:bionic-stein',
- source='cloud:bionic-stein')
- deployment.run_tests()
diff --git a/tests/gate-basic-xenial-mitaka b/tests/gate-basic-xenial-mitaka
deleted file mode 100755
index 2bd7f34..0000000
--- a/tests/gate-basic-xenial-mitaka
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic lxd deployment on xenial-mitaka."""
-
-from basic_deployment import LXDBasicDeployment
-
-if __name__ == '__main__':
- deployment = LXDBasicDeployment(
- series='xenial', openstack=None, source=None,
- stable=False)
- deployment.run_tests()
diff --git a/tests/gate-basic-xenial-ocata b/tests/gate-basic-xenial-ocata
deleted file mode 100755
index a5553f7..0000000
--- a/tests/gate-basic-xenial-ocata
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic lxd deployment on xenial-ocata."""
-
-from basic_deployment import LXDBasicDeployment
-
-if __name__ == '__main__':
- deployment = LXDBasicDeployment(series='xenial',
- openstack='cloud:xenial-ocata',
- source='cloud:xenial-updates/ocata')
- deployment.run_tests()
diff --git a/tests/gate-basic-xenial-pike b/tests/gate-basic-xenial-pike
deleted file mode 100755
index 2d8e25c..0000000
--- a/tests/gate-basic-xenial-pike
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic lxd deployment on xenial-pike."""
-
-from basic_deployment import LXDBasicDeployment
-
-if __name__ == '__main__':
- deployment = LXDBasicDeployment(series='xenial',
- openstack='cloud:xenial-pike',
- source='cloud:xenial-updates/pike')
- deployment.run_tests()
diff --git a/tests/gate-basic-xenial-queens b/tests/gate-basic-xenial-queens
deleted file mode 100755
index 57cd766..0000000
--- a/tests/gate-basic-xenial-queens
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic lxd deployment on xenial-queens."""
-
-from basic_deployment import LXDBasicDeployment
-
-if __name__ == '__main__':
- deployment = LXDBasicDeployment(series='xenial',
- openstack='cloud:xenial-queens',
- source='cloud:xenial-updates/queens')
- deployment.run_tests()
diff --git a/tests/tests.yaml b/tests/tests.yaml
deleted file mode 100644
index a03e7ba..0000000
--- a/tests/tests.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Bootstrap the model if necessary.
-bootstrap: True
-# Re-use bootstrap node.
-reset: True
-# Use tox/requirements to drive the venv instead of bundletester's venv feature.
-virtualenv: False
-# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet.
-makefile: []
-# Do not specify juju PPA sources. Juju is presumed to be pre-installed
-# and configured in all test runner environments.
-#sources:
-# Do not specify or rely on system packages.
-#packages:
-# Do not specify python packages here. Use test-requirements.txt
-# and tox instead. ie. The venv is constructed before bundletester
-# is invoked.
-#python-packages:
-reset_timeout: 600
diff --git a/tox.ini b/tox.ini
deleted file mode 100644
index d9a94e9..0000000
--- a/tox.ini
+++ /dev/null
@@ -1,91 +0,0 @@
-# Classic charm: ./tox.ini
-# This file is managed centrally by release-tools and should not be modified
-# within individual charm repos.
-[tox]
-envlist = pep8,py27
-skipsdist = True
-
-[testenv]
-setenv = VIRTUAL_ENV={envdir}
- PYTHONHASHSEED=0
- CHARM_DIR={envdir}
- AMULET_SETUP_TIMEOUT=5400
-install_command =
- pip install {opts} {packages}
-commands = stestr run {posargs}
-whitelist_externals = juju
-passenv = HOME TERM AMULET_* CS_API_*
-
-[testenv:py27]
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-
-[testenv:py35]
-basepython = python3.5
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-
-[testenv:py36]
-basepython = python3.6
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-
-[testenv:pep8]
-basepython = python3
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-commands = flake8 {posargs} hooks unit_tests tests actions lib
- charm-proof
-
-[testenv:venv]
-basepython = python3
-commands = {posargs}
-
-[testenv:func27-noop]
-# DRY RUN - For Debug
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-commands =
- bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy
-
-[testenv:func27]
-# Charm Functional Test
-# Run all gate tests which are +x (expected to always pass)
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-commands =
- bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy
-
-[testenv:func27-smoke]
-# Charm Functional Test
-# Run a specific test as an Amulet smoke test (expected to always pass)
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-commands =
- bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-rocky --no-destroy
-
-[testenv:func27-dfs]
-# Charm Functional Test
-# Run all deploy-from-source tests which are +x (may not always pass!)
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-commands =
- bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy
-
-[testenv:func27-dev]
-# Charm Functional Test
-# Run all development test targets which are +x (may not always pass!)
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-commands =
- bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy
-
-[flake8]
-ignore = E402,E226,W504
-exclude = */charmhelpers
diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py
deleted file mode 100644
index 8c9a6a7..0000000
--- a/unit_tests/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-
-sys.path.append('hooks/')
diff --git a/unit_tests/test_lxd_utils.py b/unit_tests/test_lxd_utils.py
deleted file mode 100644
index ecfda00..0000000
--- a/unit_tests/test_lxd_utils.py
+++ /dev/null
@@ -1,290 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tests for hooks.lxd_utils."""
-import mock
-import textwrap
-
-import lxd_utils
-import testing
-
-
-class TestLXDUtilsDeterminePackages(testing.CharmTestCase):
- """Tests for hooks.lxd_utils.determine_packages."""
-
- TO_PATCH = [
- 'config',
- ]
-
- def setUp(self):
- super(TestLXDUtilsDeterminePackages, self).setUp(
- lxd_utils, self.TO_PATCH)
- self.config.side_effect = self.test_config.get
-
- def test_determine_packages(self):
- """A list of LXD packages should be returned."""
- expected = [
- 'btrfs-tools',
- 'criu',
- 'lvm2',
- 'lxd',
- 'lxd-client',
- 'thin-provisioning-tools',
- 'zfsutils-linux',
- ]
-
- packages = lxd_utils.determine_packages()
-
- self.assertEqual(expected, sorted(packages))
-
-
-class TestLXDUtilsCreateAndImportBusyboxImage(testing.CharmTestCase):
- """Tests for hooks.lxd_utils.create_and_import_busybox_image."""
-
- TO_PATCH = []
-
- def setUp(self):
- super(TestLXDUtilsCreateAndImportBusyboxImage, self).setUp(
- lxd_utils, self.TO_PATCH)
-
- @mock.patch('lxd_utils.open')
- @mock.patch('lxd_utils.os.stat')
- @mock.patch('lxd_utils.subprocess.Popen')
- @mock.patch('lxd_utils.shutil.rmtree')
- @mock.patch('lxd_utils.subprocess.call')
- @mock.patch('lxd_utils.tarfile.open')
- @mock.patch('lxd_utils.tempfile.mkdtemp')
- @mock.patch('lxd_utils.check_call')
- def test_create_and_import_busybox_image(
- self, check_call, mkdtemp, tarfile_open, subprocess_call,
- rmtree, Popen, stat, mock_open):
- """A busybox image is imported into lxd."""
- mkdtemp.return_value = '/not/a/real/path'
- tarfile_open.return_value = mock.Mock()
- subprocess_call.return_value = False
- Popen_rv = mock.Mock()
- Popen_rv.stdout.read.return_value = '\n'
- Popen.return_value = Popen_rv
- stat_rv = mock.Mock()
- stat_rv.st_ctime = 0
- stat_rv.st_size = 0
- stat.return_value = stat_rv
-
- lxd_utils.create_and_import_busybox_image()
-
- self.assertTrue(check_call.called)
- args = check_call.call_args[0][0]
- self.assertEqual(['lxc', 'image', 'import'], args[:3])
- self.assertEqual(['--alias', 'busybox'], args[4:])
-
- # Assert all other mocks *would* have been called.
- mkdtemp.assert_called_once_with()
- tarfile_open.assert_called_once_with(
- '/not/a/real/path/busybox.tar', 'w:')
- subprocess_call.assert_called_once_with(
- ['xz', '-9', '/not/a/real/path/busybox.tar'])
- Popen.assert_called_once_with(
- ['/bin/busybox', '--list-full'], stdout=-1,
- universal_newlines=True)
- Popen_rv.stdout.read.assert_called_once_with()
- stat.assert_called_with('/bin/busybox')
- mock_open.assert_called_once_with('/bin/busybox', 'rb')
-
-
-class TestGetBlockDevices(testing.CharmTestCase):
- """Tests for hooks.lxd_utils.get_block_devices."""
-
- TO_PATCH = [
- 'config',
- ]
-
- def setUp(self):
- super(TestGetBlockDevices, self).setUp(
- lxd_utils, self.TO_PATCH)
- self.config.side_effect = self.test_config.get
-
- def testEmpty(self):
- """When no config is specified, an empty list is returned."""
- devices = lxd_utils.get_block_devices()
-
- self.assertEqual([], devices)
-
- def testSingleDevice(self):
- """Return a list with the single device."""
- self.test_config.set('block-devices', '/dev/vdb')
- devices = lxd_utils.get_block_devices()
-
- self.assertEqual(['/dev/vdb'], devices)
-
- def testMultipleDevices(self):
- """Return a list with all devices."""
- self.test_config.set('block-devices', '/dev/vdb /dev/vdc')
-
- devices = lxd_utils.get_block_devices()
-
- self.assertEqual(['/dev/vdb', '/dev/vdc'], devices)
-
-
-ZFS_SINGLE_POOL = """testpool 232G 976M 231G - 7% 0% 1.04x ONLINE -
-"""
-
-ZFS_MULTIPLE_POOLS = """testpool 232G 976M 231G - 7% 0% 1.04x ONLINE -
-testpool2 232G 976M 231G - 7% 0% 1.04x ONLINE -
-"""
-
-
-class TestZFSPool(testing.CharmTestCase):
- """Tests for hooks.lxd_utils.zpools"""
- TO_PATCH = [
- 'check_output',
- ]
-
- def setUp(self):
- super(TestZFSPool, self).setUp(lxd_utils, self.TO_PATCH)
-
- def test_no_pools(self):
- """When no pools are configured, an empty list is returned"""
- self.check_output.return_value = ""
- self.assertEqual(lxd_utils.zpools(), [])
-
- def test_single_pool(self):
- """Return a list with a single pool"""
- self.check_output.return_value = ZFS_SINGLE_POOL
- self.assertEqual(lxd_utils.zpools(), ['testpool'])
-
- def test_multiple_pools(self):
- """Return a list with a multiple pools"""
- self.check_output.return_value = ZFS_MULTIPLE_POOLS
- self.assertEqual(lxd_utils.zpools(), ['testpool', 'testpool2'])
-
-
-class TestLXDUtilsAssessStatus(testing.CharmTestCase):
- """Tests for hooks.lxd_utils.assess_status."""
-
- TO_PATCH = [
- 'application_version_set',
- 'get_upstream_version',
- 'status_set',
- 'lxd_running',
- ]
-
- def setUp(self):
- super(TestLXDUtilsAssessStatus, self).setUp(
- lxd_utils, self.TO_PATCH)
- self.get_upstream_version.return_value = '2.0.1'
-
- def test_assess_status_active(self):
- '''When LXD is running, ensure active is set'''
- self.lxd_running.return_value = True
- lxd_utils.assess_status()
- self.status_set.assert_called_with('active',
- 'Unit is ready')
- self.application_version_set.assert_called_with('2.0.1')
- self.get_upstream_version.assert_called_with(
- lxd_utils.VERSION_PACKAGE
- )
-
- def test_assess_status_blocked(self):
- '''When LXD is not running, ensure blocked is set'''
- self.lxd_running.return_value = False
- lxd_utils.assess_status()
- self.status_set.assert_called_with('blocked',
- 'LXD is not running')
- self.application_version_set.assert_called_with('2.0.1')
- self.get_upstream_version.assert_called_with(
- lxd_utils.VERSION_PACKAGE
- )
-
-
-class TestConfigureUIDGID(testing.CharmTestCase):
- """Tests for hooks.lxd_utils.configure_uid_mapping."""
-
- TO_PATCH = [
- 'check_call',
- 'service_restart'
- ]
-
- UIDMAP = [
- 'lxd:100000:65536',
- 'root:100000:65536',
- 'ubuntu:165536:65536',
- ]
-
- def setUp(self):
- super(TestConfigureUIDGID, self).setUp(
- lxd_utils, self.TO_PATCH)
-
- def test_configure_uid_mapping(self):
- with testing.patch_open() as (_open, _file):
- _file.readlines.return_value = self.UIDMAP
- lxd_utils.configure_uid_mapping()
- _open.assert_has_calls([
- mock.call('/etc/subuid', 'r+'),
- mock.call('/etc/subgid', 'r+')
- ])
- _file.write.assert_has_calls([
- mock.call('lxd:100000:65536\n'),
- mock.call('root:100000:327680000\n'),
- mock.call('ubuntu:165536:65536\n')
- ])
- self.service_restart.assert_called_with('lxd')
-
-
-class MyProcessError(Exception):
- pass
-
-
-class TestHasStorage(testing.CharmTestCase):
- """Tests for hooks.lxd_utils.has_storage"""
-
- TO_PATCH = [
- 'check_output',
- ]
-
- def setUp(self):
- super(TestHasStorage, self).setUp(lxd_utils, self.TO_PATCH)
-
- def test_has_storage_default(self):
- self.check_output.return_value = b""
- self.assertTrue(lxd_utils.has_storage())
-
- @mock.patch('subprocess.CalledProcessError', new=MyProcessError)
- def test_has_storage_default_error(self):
- def raise_error(*args, **kwargs):
- raise MyProcessError()
-
- self.check_output.side_effect = raise_error
- self.assertFalse(lxd_utils.has_storage())
-
- def test_has_storage_by_pool(self):
- self.check_output.return_value = textwrap.dedent(
- b"""
- +---------+-------------+--------+--------------------------------+---------+
- | NAME | DESCRIPTION | DRIVER | SOURCE | USED BY |
- +---------+-------------+--------+--------------------------------+---------+
- | default | | btrfs | /var/lib/lxd/disks/default.img | 1 |
- +---------+-------------+--------+--------------------------------+---------+
- """) # NOQA W501
- self.assertTrue(lxd_utils.has_storage('default'))
-
- def test_has_storage_missing_pool(self):
- self.check_output.return_value = textwrap.dedent(
- b"""
- +---------+-------------+--------+--------------------------------+---------+
- | NAME | DESCRIPTION | DRIVER | SOURCE | USED BY |
- +---------+-------------+--------+--------------------------------+---------+
- | default | | btrfs | /var/lib/lxd/disks/default.img | 1 |
- +---------+-------------+--------+--------------------------------+---------+
- """) # NOQA W501
- self.assertFalse(lxd_utils.has_storage('btrfs'))
diff --git a/unit_tests/testing.py b/unit_tests/testing.py
deleted file mode 100644
index 08c6db7..0000000
--- a/unit_tests/testing.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import unittest
-import os
-import yaml
-
-from contextlib import contextmanager
-from mock import patch, MagicMock
-
-patch('charmhelpers.contrib.openstack.utils.set_os_workload_status').start()
-patch('charmhelpers.core.hookenv.status_set').start()
-
-
-def load_config():
- '''
- Walk backwords from __file__ looking for config.yaml, load and return the
- 'options' section'
- '''
- config = None
- f = __file__
- while config is None:
- d = os.path.dirname(f)
- if os.path.isfile(os.path.join(d, 'config.yaml')):
- config = os.path.join(d, 'config.yaml')
- break
- f = d
-
- if not config:
- logging.error('Could not find config.yaml in any parent directory '
- 'of %s. ' % __file__)
- raise Exception
-
- return yaml.safe_load(open(config).read())['options']
-
-
-def get_default_config():
- '''
- Load default charm config from config.yaml return as a dict.
- If no default is set in config.yaml, its value is None.
- '''
- default_config = {}
- config = load_config()
- for k, v in config.iteritems():
- if 'default' in v:
- default_config[k] = v['default']
- else:
- default_config[k] = None
- return default_config
-
-
-class CharmTestCase(unittest.TestCase):
-
- def setUp(self, obj, patches):
- super(CharmTestCase, self).setUp()
- self.patches = patches
- self.obj = obj
- self.test_config = TestConfig()
- self.test_relation = TestRelation()
- self.patch_all()
-
- def patch(self, method):
- _m = patch.object(self.obj, method)
- mock = _m.start()
- self.addCleanup(_m.stop)
- return mock
-
- def patch_all(self):
- for method in self.patches:
- setattr(self, method, self.patch(method))
-
-
-class TestConfig(object):
-
- def __init__(self):
- self.config = get_default_config()
-
- def get(self, attr=None):
- if not attr:
- return self.get_all()
- try:
- return self.config[attr]
- except KeyError:
- return None
-
- def get_all(self):
- return self.config
-
- def set(self, attr, value):
- if attr not in self.config:
- raise KeyError
- self.config[attr] = value
-
-
-class TestRelation(object):
-
- def __init__(self, relation_data={}):
- self.relation_data = relation_data
-
- def set(self, relation_data):
- self.relation_data = relation_data
-
- def get(self, attr=None, unit=None, rid=None):
- if attr is None:
- return self.relation_data
- elif attr in self.relation_data:
- return self.relation_data[attr]
- return None
-
-
-@contextmanager
-def patch_open():
- '''Patch open() to allow mocking both open() itself and the file that is
- yielded.
-
- Yields the mock for "open" and "file", respectively.'''
- mock_open = MagicMock(spec=open)
- mock_file = MagicMock(spec=file) # noqa - transitional py2 py3 lint work-around
-
- @contextmanager
- def stub_open(*args, **kwargs):
- mock_open(*args, **kwargs)
- yield mock_file
-
- with patch('__builtin__.open', stub_open):
- yield mock_open, mock_file