diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 98435fd..0000000 --- a/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -bin -.testrepository -.coverage -.tox -*.sw[nop] -*.pyc diff --git a/.project b/.project deleted file mode 100644 index 435dd5a..0000000 --- a/.project +++ /dev/null @@ -1,17 +0,0 @@ - - - odl-controller - - - - - - org.python.pydev.PyDevBuilder - - - - - - org.python.pydev.pythonNature - - diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index 801646b..0000000 --- a/.testr.conf +++ /dev/null @@ -1,8 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION - -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/.zuul.yaml b/.zuul.yaml index aa9c508..e7c200a 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,4 +1,3 @@ - project: templates: - - python-charm-jobs - - openstack-python35-jobs-nonvoting + - noop-jobs diff --git a/LICENSE b/LICENSE deleted file mode 100644 index d645695..0000000 --- a/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Makefile b/Makefile deleted file mode 100644 index 158f74c..0000000 --- a/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/make -PYTHON := /usr/bin/env python - -lint: - @tox -e pep8 - -test: - @echo Starting unit tests... - @tox -e py27 - -functional_test: - @echo Starting amulet tests... - @tox -e func27 - -bin/charm_helpers_sync.py: - @mkdir -p bin - @curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py - - -sync: bin/charm_helpers_sync.py - @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml diff --git a/README.md b/README.md index a239e77..b72ee4f 100644 --- a/README.md +++ b/README.md @@ -1,42 +1,6 @@ -# Overview +This project is no longer maintained. -OpenDaylight (www.opendaylight.org) is a fully featured Software Defined Networking (SDN) solution for private clouds. It provides a Neutron plugin to -integrate with OpenStack. - -This charm is designed to be used in conjunction with the rest of the OpenStack related charms in the charm store to virtualize the network that Nova Compute instances plug into. - -This charm provides the controller component of an OpenDayLight installation. - -Only OpenStack Icehouse or newer is supported. - -# Usage - -To deploy the OpenDayLight controller: - - juju deploy odl-controller - -To integrate OpenDayLight into an OpenStack Cloud (subset of commands): - - juju deploy neutron-api-odl - juju deploy openvswitch-odl - -The neutron-gateway charm must also be deployed with 'ovs-odl' as the plugin configuration option: - - cat > config.yaml << EOF - neutron-gateway: - plugin: ovs-odl - EOF - juju deploy --config config.yaml neutron-gateway - -And then add relations between services to complete the deployment: - - juju add-relation neutron-api neutron-api-odl - juju add-relation neutron-api-odl odl-controller - - juju add-relation openvswitch-odl nova-compute - juju add-relation openvswitch-odl neutron-gateway - juju add-relation openvswitch-odl odl-controller - -# Contact Information - -Report bugs on [Launchpad](http://bugs.launchpad.net/charms/+source/odl-controller/+filebug) +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". diff --git a/actions/.keep b/actions/.keep deleted file mode 100644 index f49b91a..0000000 --- a/actions/.keep +++ /dev/null @@ -1,3 +0,0 @@ - This file was created by release-tools to ensure that this empty - directory is preserved in vcs re: lint check definitions in global - tox.ini files. This file can be removed if/when this dir is actually in use. diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml deleted file mode 100644 index 4b24474..0000000 --- a/charm-helpers-sync.yaml +++ /dev/null @@ -1,11 +0,0 @@ -repo: https://github.com/juju/charm-helpers -destination: hooks/charmhelpers -include: - - core - - fetch - - payload - - osplatform - - contrib.openstack|inc=* - - contrib.storage - - contrib.network.ip - - contrib.python.packages diff --git a/config.yaml b/config.yaml deleted file mode 100644 index 3f2c691..0000000 --- a/config.yaml +++ /dev/null @@ -1,40 +0,0 @@ -options: - profile: - type: string - default: default - description: | - SDN controller profile to configure OpenDayLight for; supported values include - - cisco-vpp: Cisco VPP for OpenStack - openvswitch-odl: Open vSwitch OpenDayLight for OpenStack - Helium release - openvswitch-odl-lithium: Open vSwitch OpenDayLight for OpenStack - Lithium release - openvswitch-odl-beryllium: Open vSwitch OpenDayLight for OpenStack - Beryllium release - openvswitch-odl-boron: Open vSwitch OpenDayLight for OpenStack - Boron release - - Only a single profile is supported at any one time. - install-url: - type: string - default: "https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz" - description: | - Web addressable location of OpenDayLight binaries to install - - If unset, the charm will install binaries from the opendaylight-karaf - package. - install-sources: - type: string - default: '' - description: | - Package sources to install. Can be used to specify where to install the - opendaylight-karaf package from. - install-keys: - type: string - default: '' - description: Apt keys for package install sources - http-proxy: - type: string - default: '' - description: Proxy to use for http connections for OpenDayLight - https-proxy: - type: string - default: '' - description: Proxy to use for https connections for OpenDayLight diff --git a/copyright b/copyright deleted file mode 100644 index 6c92060..0000000 --- a/copyright +++ /dev/null @@ -1,16 +0,0 @@ -Format: http://dep.debian.net/deps/dep5/ - -Files: * -Copyright: Copyright 2015, Canonical Ltd., All Rights Reserved. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. diff --git a/files/odl-controller.conf b/files/odl-controller.conf deleted file mode 100644 index 9995abb..0000000 --- a/files/odl-controller.conf +++ /dev/null @@ -1,22 +0,0 @@ -description "OpenDaylight Controller" -author "Robert Ayres " - -start on runlevel [2345] -stop on runlevel [!2345] - -chdir /opt/opendaylight-karaf -setuid opendaylight - -env ODL_HOME=/opt/opendaylight-karaf -env ODL_LOG=/var/log/opendaylight/odl-controller.log - -pre-start script - [ -e "$ODL_HOME" ] || { stop; exit 0; } -end script - -exec "$ODL_HOME/bin/karaf" server >> "$ODL_LOG" 2>&1 < /dev/null - -pre-stop script - "$ODL_HOME/bin/karaf" stop - sleep 10 -end script diff --git a/files/odl-controller.service b/files/odl-controller.service deleted file mode 100644 index 1cf22bb..0000000 --- a/files/odl-controller.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=OpenDayLight SDN Controller -After=network.target - -[Service] -Type=forking -User=opendaylight -Group=opendaylight -ExecStart=/opt/opendaylight-karaf/bin/start - -[Install] -WantedBy=multi-user.target diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py deleted file mode 100644 index e7aa471..0000000 --- a/hooks/charmhelpers/__init__.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Bootstrap charm-helpers, installing its dependencies if necessary using -# only standard libraries. -from __future__ import print_function -from __future__ import absolute_import - -import functools -import inspect -import subprocess -import sys - -try: - import six # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa - -try: - import yaml # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa - - -# Holds a list of mapping of mangled function names that have been deprecated -# using the @deprecate decorator below. This is so that the warning is only -# printed once for each usage of the function. -__deprecated_functions = {} - - -def deprecate(warning, date=None, log=None): - """Add a deprecation warning the first time the function is used. - The date, which is a string in semi-ISO8660 format indicate the year-month - that the function is officially going to be removed. - - usage: - - @deprecate('use core/fetch/add_source() instead', '2017-04') - def contributed_add_source_thing(...): - ... - - And it then prints to the log ONCE that the function is deprecated. - The reason for passing the logging function (log) is so that hookenv.log - can be used for a charm if needed. - - :param warning: String to indicat where it has moved ot. - :param date: optional sting, in YYYY-MM format to indicate when the - function will definitely (probably) be removed. - :param log: The log function to call to log. If not, logs to stdout - """ - def wrap(f): - - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - try: - module = inspect.getmodule(f) - file = inspect.getsourcefile(f) - lines = inspect.getsourcelines(f) - f_name = "{}-{}-{}..{}-{}".format( - module.__name__, file, lines[0], lines[-1], f.__name__) - except (IOError, TypeError): - # assume it was local, so just use the name of the function - f_name = f.__name__ - if f_name not in __deprecated_functions: - __deprecated_functions[f_name] = True - s = "DEPRECATION WARNING: Function {} is being removed".format( - f.__name__) - if date: - s = "{} on/around {}".format(s, date) - if warning: - s = "{} : {}".format(s, warning) - if log: - log(s) - else: - print(s) - return f(*args, **kwargs) - return wrapped_f - return wrap diff --git a/hooks/charmhelpers/contrib/__init__.py b/hooks/charmhelpers/contrib/__init__.py deleted file mode 100644 index d7567b8..0000000 --- a/hooks/charmhelpers/contrib/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/network/__init__.py b/hooks/charmhelpers/contrib/network/__init__.py deleted file mode 100644 index d7567b8..0000000 --- a/hooks/charmhelpers/contrib/network/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py deleted file mode 100644 index b13277b..0000000 --- a/hooks/charmhelpers/contrib/network/ip.py +++ /dev/null @@ -1,602 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import glob -import re -import subprocess -import six -import socket - -from functools import partial - -from charmhelpers.fetch import apt_install, apt_update -from charmhelpers.core.hookenv import ( - config, - log, - network_get_primary_address, - unit_get, - WARNING, - NoNetworkBinding, -) - -from charmhelpers.core.host import ( - lsb_release, - CompareHostReleases, -) - -try: - import netifaces -except ImportError: - apt_update(fatal=True) - if six.PY2: - apt_install('python-netifaces', fatal=True) - else: - apt_install('python3-netifaces', fatal=True) - import netifaces - -try: - import netaddr -except ImportError: - apt_update(fatal=True) - if six.PY2: - apt_install('python-netaddr', fatal=True) - else: - apt_install('python3-netaddr', fatal=True) - import netaddr - - -def _validate_cidr(network): - try: - netaddr.IPNetwork(network) - except (netaddr.core.AddrFormatError, ValueError): - raise ValueError("Network (%s) is not in CIDR presentation format" % - network) - - -def no_ip_found_error_out(network): - errmsg = ("No IP address found in network(s): %s" % network) - raise ValueError(errmsg) - - -def _get_ipv6_network_from_address(address): - """Get an netaddr.IPNetwork for the given IPv6 address - :param address: a dict as returned by netifaces.ifaddresses - :returns netaddr.IPNetwork: None if the address is a link local or loopback - address - """ - if address['addr'].startswith('fe80') or address['addr'] == "::1": - return None - - prefix = address['netmask'].split("/") - if len(prefix) > 1: - netmask = prefix[1] - else: - netmask = address['netmask'] - return netaddr.IPNetwork("%s/%s" % (address['addr'], - netmask)) - - -def get_address_in_network(network, fallback=None, fatal=False): - """Get an IPv4 or IPv6 address within the network from the host. - - :param network (str): CIDR presentation format. For example, - '192.168.1.0/24'. Supports multiple networks as a space-delimited list. - :param fallback (str): If no address is found, return fallback. - :param fatal (boolean): If no address is found, fallback is not - set and fatal is True then exit(1). - """ - if network is None: - if fallback is not None: - return fallback - - if fatal: - no_ip_found_error_out(network) - else: - return None - - networks = network.split() or [network] - for network in networks: - _validate_cidr(network) - network = netaddr.IPNetwork(network) - for iface in netifaces.interfaces(): - try: - addresses = netifaces.ifaddresses(iface) - except ValueError: - # If an instance was deleted between - # netifaces.interfaces() run and now, its interfaces are gone - continue - if network.version == 4 and netifaces.AF_INET in addresses: - for addr in addresses[netifaces.AF_INET]: - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - if cidr in network: - return str(cidr.ip) - - if network.version == 6 and netifaces.AF_INET6 in addresses: - for addr in addresses[netifaces.AF_INET6]: - cidr = _get_ipv6_network_from_address(addr) - if cidr and cidr in network: - return str(cidr.ip) - - if fallback is not None: - return fallback - - if fatal: - no_ip_found_error_out(network) - - return None - - -def is_ipv6(address): - """Determine whether provided address is IPv6 or not.""" - try: - address = netaddr.IPAddress(address) - except netaddr.AddrFormatError: - # probably a hostname - so not an address at all! - return False - - return address.version == 6 - - -def is_address_in_network(network, address): - """ - Determine whether the provided address is within a network range. - - :param network (str): CIDR presentation format. For example, - '192.168.1.0/24'. - :param address: An individual IPv4 or IPv6 address without a net - mask or subnet prefix. For example, '192.168.1.1'. - :returns boolean: Flag indicating whether address is in network. - """ - try: - network = netaddr.IPNetwork(network) - except (netaddr.core.AddrFormatError, ValueError): - raise ValueError("Network (%s) is not in CIDR presentation format" % - network) - - try: - address = netaddr.IPAddress(address) - except (netaddr.core.AddrFormatError, ValueError): - raise ValueError("Address (%s) is not in correct presentation format" % - address) - - if address in network: - return True - else: - return False - - -def _get_for_address(address, key): - """Retrieve an attribute of or the physical interface that - the IP address provided could be bound to. - - :param address (str): An individual IPv4 or IPv6 address without a net - mask or subnet prefix. For example, '192.168.1.1'. - :param key: 'iface' for the physical interface name or an attribute - of the configured interface, for example 'netmask'. - :returns str: Requested attribute or None if address is not bindable. - """ - address = netaddr.IPAddress(address) - for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) - if address.version == 4 and netifaces.AF_INET in addresses: - addr = addresses[netifaces.AF_INET][0]['addr'] - netmask = addresses[netifaces.AF_INET][0]['netmask'] - network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - cidr = network.cidr - if address in cidr: - if key == 'iface': - return iface - else: - return addresses[netifaces.AF_INET][0][key] - - if address.version == 6 and netifaces.AF_INET6 in addresses: - for addr in addresses[netifaces.AF_INET6]: - network = _get_ipv6_network_from_address(addr) - if not network: - continue - - cidr = network.cidr - if address in cidr: - if key == 'iface': - return iface - elif key == 'netmask' and cidr: - return str(cidr).split('/')[1] - else: - return addr[key] - return None - - -get_iface_for_address = partial(_get_for_address, key='iface') - - -get_netmask_for_address = partial(_get_for_address, key='netmask') - - -def resolve_network_cidr(ip_address): - ''' - Resolves the full address cidr of an ip_address based on - configured network interfaces - ''' - netmask = get_netmask_for_address(ip_address) - return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr) - - -def format_ipv6_addr(address): - """If address is IPv6, wrap it in '[]' otherwise return None. - - This is required by most configuration files when specifying IPv6 - addresses. - """ - if is_ipv6(address): - return "[%s]" % address - - return None - - -def is_ipv6_disabled(): - try: - result = subprocess.check_output( - ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], - stderr=subprocess.STDOUT, - universal_newlines=True) - except subprocess.CalledProcessError: - return True - - return "net.ipv6.conf.all.disable_ipv6 = 1" in result - - -def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, - fatal=True, exc_list=None): - """Return the assigned IP address for a given interface, if any. - - :param iface: network interface on which address(es) are expected to - be found. - :param inet_type: inet address family - :param inc_aliases: include alias interfaces in search - :param fatal: if True, raise exception if address not found - :param exc_list: list of addresses to ignore - :return: list of ip addresses - """ - # Extract nic if passed /dev/ethX - if '/' in iface: - iface = iface.split('/')[-1] - - if not exc_list: - exc_list = [] - - try: - inet_num = getattr(netifaces, inet_type) - except AttributeError: - raise Exception("Unknown inet type '%s'" % str(inet_type)) - - interfaces = netifaces.interfaces() - if inc_aliases: - ifaces = [] - for _iface in interfaces: - if iface == _iface or _iface.split(':')[0] == iface: - ifaces.append(_iface) - - if fatal and not ifaces: - raise Exception("Invalid interface '%s'" % iface) - - ifaces.sort() - else: - if iface not in interfaces: - if fatal: - raise Exception("Interface '%s' not found " % (iface)) - else: - return [] - - else: - ifaces = [iface] - - addresses = [] - for netiface in ifaces: - net_info = netifaces.ifaddresses(netiface) - if inet_num in net_info: - for entry in net_info[inet_num]: - if 'addr' in entry and entry['addr'] not in exc_list: - addresses.append(entry['addr']) - - if fatal and not addresses: - raise Exception("Interface '%s' doesn't have any %s addresses." % - (iface, inet_type)) - - return sorted(addresses) - - -get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') - - -def get_iface_from_addr(addr): - """Work out on which interface the provided address is configured.""" - for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) - for inet_type in addresses: - for _addr in addresses[inet_type]: - _addr = _addr['addr'] - # link local - ll_key = re.compile("(.+)%.*") - raw = re.match(ll_key, _addr) - if raw: - _addr = raw.group(1) - - if _addr == addr: - log("Address '%s' is configured on iface '%s'" % - (addr, iface)) - return iface - - msg = "Unable to infer net iface on which '%s' is configured" % (addr) - raise Exception(msg) - - -def sniff_iface(f): - """Ensure decorated function is called with a value for iface. - - If no iface provided, inject net iface inferred from unit private address. - """ - def iface_sniffer(*args, **kwargs): - if not kwargs.get('iface', None): - kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) - - return f(*args, **kwargs) - - return iface_sniffer - - -@sniff_iface -def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, - dynamic_only=True): - """Get assigned IPv6 address for a given interface. - - Returns list of addresses found. If no address found, returns empty list. - - If iface is None, we infer the current primary interface by doing a reverse - lookup on the unit private-address. - - We currently only support scope global IPv6 addresses i.e. non-temporary - addresses. If no global IPv6 address is found, return the first one found - in the ipv6 address list. - - :param iface: network interface on which ipv6 address(es) are expected to - be found. - :param inc_aliases: include alias interfaces in search - :param fatal: if True, raise exception if address not found - :param exc_list: list of addresses to ignore - :param dynamic_only: only recognise dynamic addresses - :return: list of ipv6 addresses - """ - addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', - inc_aliases=inc_aliases, fatal=fatal, - exc_list=exc_list) - - if addresses: - global_addrs = [] - for addr in addresses: - key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") - m = re.match(key_scope_link_local, addr) - if m: - eui_64_mac = m.group(1) - iface = m.group(2) - else: - global_addrs.append(addr) - - if global_addrs: - # Make sure any found global addresses are not temporary - cmd = ['ip', 'addr', 'show', iface] - out = subprocess.check_output(cmd).decode('UTF-8') - if dynamic_only: - key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*") - else: - key = re.compile("inet6 (.+)/[0-9]+ scope global.*") - - addrs = [] - for line in out.split('\n'): - line = line.strip() - m = re.match(key, line) - if m and 'temporary' not in line: - # Return the first valid address we find - for addr in global_addrs: - if m.group(1) == addr: - if not dynamic_only or \ - m.group(1).endswith(eui_64_mac): - addrs.append(addr) - - if addrs: - return addrs - - if fatal: - raise Exception("Interface '%s' does not have a scope global " - "non-temporary ipv6 address." % iface) - - return [] - - -def get_bridges(vnic_dir='/sys/devices/virtual/net'): - """Return a list of bridges on the system.""" - b_regex = "%s/*/bridge" % vnic_dir - return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] - - -def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): - """Return a list of nics comprising a given bridge on the system.""" - brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) - return [x.split('/')[-1] for x in glob.glob(brif_regex)] - - -def is_bridge_member(nic): - """Check if a given nic is a member of a bridge.""" - for bridge in get_bridges(): - if nic in get_bridge_nics(bridge): - return True - - return False - - -def is_ip(address): - """ - Returns True if address is a valid IP address. - """ - try: - # Test to see if already an IPv4/IPv6 address - address = netaddr.IPAddress(address) - return True - except (netaddr.AddrFormatError, ValueError): - return False - - -def ns_query(address): - try: - import dns.resolver - except ImportError: - if six.PY2: - apt_install('python-dnspython', fatal=True) - else: - apt_install('python3-dnspython', fatal=True) - import dns.resolver - - if isinstance(address, dns.name.Name): - rtype = 'PTR' - elif isinstance(address, six.string_types): - rtype = 'A' - else: - return None - - try: - answers = dns.resolver.query(address, rtype) - except dns.resolver.NXDOMAIN: - return None - - if answers: - return str(answers[0]) - return None - - -def get_host_ip(hostname, fallback=None): - """ - Resolves the IP for a given hostname, or returns - the input if it is already an IP. - """ - if is_ip(hostname): - return hostname - - ip_addr = ns_query(hostname) - if not ip_addr: - try: - ip_addr = socket.gethostbyname(hostname) - except Exception: - log("Failed to resolve hostname '%s'" % (hostname), - level=WARNING) - return fallback - return ip_addr - - -def get_hostname(address, fqdn=True): - """ - Resolves hostname for given IP, or returns the input - if it is already a hostname. - """ - if is_ip(address): - try: - import dns.reversename - except ImportError: - if six.PY2: - apt_install("python-dnspython", fatal=True) - else: - apt_install("python3-dnspython", fatal=True) - import dns.reversename - - rev = dns.reversename.from_address(address) - result = ns_query(rev) - - if not result: - try: - result = socket.gethostbyaddr(address)[0] - except Exception: - return None - else: - result = address - - if fqdn: - # strip trailing . - if result.endswith('.'): - return result[:-1] - else: - return result - else: - return result.split('.')[0] - - -def port_has_listener(address, port): - """ - Returns True if the address:port is open and being listened to, - else False. - - @param address: an IP address or hostname - @param port: integer port - - Note calls 'zc' via a subprocess shell - """ - cmd = ['nc', '-z', address, str(port)] - result = subprocess.call(cmd) - return not(bool(result)) - - -def assert_charm_supports_ipv6(): - """Check whether we are able to support charms ipv6.""" - release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(release) < "trusty": - raise Exception("IPv6 is not supported in the charms for Ubuntu " - "versions less than Trusty 14.04") - - -def get_relation_ip(interface, cidr_network=None): - """Return this unit's IP for the given interface. - - Allow for an arbitrary interface to use with network-get to select an IP. - Handle all address selection options including passed cidr network and - IPv6. - - Usage: get_relation_ip('amqp', cidr_network='10.0.0.0/8') - - @param interface: string name of the relation. - @param cidr_network: string CIDR Network to select an address from. - @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. - @returns IPv6 or IPv4 address - """ - # Select the interface address first - # For possible use as a fallback bellow with get_address_in_network - try: - # Get the interface specific IP - address = network_get_primary_address(interface) - except NotImplementedError: - # If network-get is not available - address = get_host_ip(unit_get('private-address')) - except NoNetworkBinding: - log("No network binding for {}".format(interface), WARNING) - address = get_host_ip(unit_get('private-address')) - - if config('prefer-ipv6'): - # Currently IPv6 has priority, eventually we want IPv6 to just be - # another network space. - assert_charm_supports_ipv6() - return get_ipv6_addr()[0] - elif cidr_network: - # If a specific CIDR network is passed get the address from that - # network. - return get_address_in_network(cidr_network, address) - - # Return the interface address - return address diff --git a/hooks/charmhelpers/contrib/openstack/__init__.py b/hooks/charmhelpers/contrib/openstack/__init__.py deleted file mode 100644 index d7567b8..0000000 --- a/hooks/charmhelpers/contrib/openstack/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/openstack/alternatives.py b/hooks/charmhelpers/contrib/openstack/alternatives.py deleted file mode 100644 index 547de09..0000000 --- a/hooks/charmhelpers/contrib/openstack/alternatives.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -''' Helper for managing alternatives for file conflict resolution ''' - -import subprocess -import shutil -import os - - -def install_alternative(name, target, source, priority=50): - ''' Install alternative configuration ''' - if (os.path.exists(target) and not os.path.islink(target)): - # Move existing file/directory away before installing - shutil.move(target, '{}.bak'.format(target)) - cmd = [ - 'update-alternatives', '--force', '--install', - target, name, source, str(priority) - ] - subprocess.check_call(cmd) - - -def remove_alternative(name, source): - """Remove an installed alternative configuration file - - :param name: string name of the alternative to remove - :param source: string full path to alternative to remove - """ - cmd = [ - 'update-alternatives', '--remove', - name, source - ] - subprocess.check_call(cmd) diff --git a/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/hooks/charmhelpers/contrib/openstack/amulet/__init__.py deleted file mode 100644 index d7567b8..0000000 --- a/hooks/charmhelpers/contrib/openstack/amulet/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py deleted file mode 100644 index 1c96752..0000000 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ /dev/null @@ -1,357 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -import re -import sys -import six -from collections import OrderedDict -from charmhelpers.contrib.amulet.deployment import ( - AmuletDeployment -) -from charmhelpers.contrib.openstack.amulet.utils import ( - OPENSTACK_RELEASES_PAIRS -) - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - - -class OpenStackAmuletDeployment(AmuletDeployment): - """OpenStack amulet deployment. - - This class inherits from AmuletDeployment and has additional support - that is specifically for use by OpenStack charms. - """ - - def __init__(self, series=None, openstack=None, source=None, - stable=True, log_level=DEBUG): - """Initialize the deployment environment.""" - super(OpenStackAmuletDeployment, self).__init__(series) - self.log = self.get_logger(level=log_level) - self.log.info('OpenStackAmuletDeployment: init') - self.openstack = openstack - self.source = source - self.stable = stable - - def get_logger(self, name="deployment-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def _determine_branch_locations(self, other_services): - """Determine the branch locations for the other services. - - Determine if the local branch being tested is derived from its - stable or next (dev) branch, and based on this, use the corresonding - stable or next branches for the other_services.""" - - self.log.info('OpenStackAmuletDeployment: determine branch locations') - - # Charms outside the ~openstack-charmers - base_charms = { - 'mysql': ['trusty'], - 'mongodb': ['trusty'], - 'nrpe': ['trusty', 'xenial'], - } - - for svc in other_services: - # If a location has been explicitly set, use it - if svc.get('location'): - continue - if svc['name'] in base_charms: - # NOTE: not all charms have support for all series we - # want/need to test against, so fix to most recent - # that each base charm supports - target_series = self.series - if self.series not in base_charms[svc['name']]: - target_series = base_charms[svc['name']][-1] - svc['location'] = 'cs:{}/{}'.format(target_series, - svc['name']) - elif self.stable: - svc['location'] = 'cs:{}/{}'.format(self.series, - svc['name']) - else: - svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( - self.series, - svc['name'] - ) - - return other_services - - def _add_services(self, this_service, other_services, use_source=None, - no_origin=None): - """Add services to the deployment and optionally set - openstack-origin/source. - - :param this_service dict: Service dictionary describing the service - whose amulet tests are being run - :param other_services dict: List of service dictionaries describing - the services needed to support the target - service - :param use_source list: List of services which use the 'source' config - option rather than 'openstack-origin' - :param no_origin list: List of services which do not support setting - the Cloud Archive. - Service Dict: - { - 'name': str charm-name, - 'units': int number of units, - 'constraints': dict of juju constraints, - 'location': str location of charm, - } - eg - this_service = { - 'name': 'openvswitch-odl', - 'constraints': {'mem': '8G'}, - } - other_services = [ - { - 'name': 'nova-compute', - 'units': 2, - 'constraints': {'mem': '4G'}, - 'location': cs:~bob/xenial/nova-compute - }, - { - 'name': 'mysql', - 'constraints': {'mem': '2G'}, - }, - {'neutron-api-odl'}] - use_source = ['mysql'] - no_origin = ['neutron-api-odl'] - """ - self.log.info('OpenStackAmuletDeployment: adding services') - - other_services = self._determine_branch_locations(other_services) - - super(OpenStackAmuletDeployment, self)._add_services(this_service, - other_services) - - services = other_services - services.append(this_service) - - use_source = use_source or [] - no_origin = no_origin or [] - - # Charms which should use the source config option - use_source = list(set( - use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy', 'percona-cluster', 'lxd'])) - - # Charms which can not use openstack-origin, ie. many subordinates - no_origin = list(set( - no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', - 'nrpe', 'openvswitch-odl', 'neutron-api-odl', - 'odl-controller', 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'])) - - if self.openstack: - for svc in services: - if svc['name'] not in use_source + no_origin: - config = {'openstack-origin': self.openstack} - self.d.configure(svc['name'], config) - - if self.source: - for svc in services: - if svc['name'] in use_source and svc['name'] not in no_origin: - config = {'source': self.source} - self.d.configure(svc['name'], config) - - def _configure_services(self, configs): - """Configure all of the services.""" - self.log.info('OpenStackAmuletDeployment: configure services') - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=None): - """Wait for all units to have a specific extended status, except - for any defined as excluded. Unless specified via message, any - status containing any case of 'ready' will be considered a match. - - Examples of message usage: - - Wait for all unit status to CONTAIN any case of 'ready' or 'ok': - message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) - - Wait for all units to reach this status (exact match): - message = re.compile('^Unit is ready and clustered$') - - Wait for all units to reach any one of these (exact match): - message = re.compile('Unit is ready|OK|Ready') - - Wait for at least one unit to reach this status (exact match): - message = {'ready'} - - See Amulet's sentry.wait_for_messages() for message usage detail. - https://github.com/juju/amulet/blob/master/amulet/sentry.py - - :param message: Expected status match - :param exclude_services: List of juju service names to ignore, - not to be used in conjuction with include_only. - :param include_only: List of juju service names to exclusively check, - not to be used in conjuction with exclude_services. - :param timeout: Maximum time in seconds to wait for status match - :returns: None. Raises if timeout is hit. - """ - if not timeout: - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) - self.log.info('Waiting for extended status on units for {}s...' - ''.format(timeout)) - - all_services = self.d.services.keys() - - if exclude_services and include_only: - raise ValueError('exclude_services can not be used ' - 'with include_only') - - if message: - if isinstance(message, re._pattern_type): - match = message.pattern - else: - match = message - - self.log.debug('Custom extended status wait match: ' - '{}'.format(match)) - else: - self.log.debug('Default extended status wait match: contains ' - 'READY (case-insensitive)') - message = re.compile('.*ready.*', re.IGNORECASE) - - if exclude_services: - self.log.debug('Excluding services from extended status match: ' - '{}'.format(exclude_services)) - else: - exclude_services = [] - - if include_only: - services = include_only - else: - services = list(set(all_services) - set(exclude_services)) - - self.log.debug('Waiting up to {}s for extended status on services: ' - '{}'.format(timeout, services)) - service_messages = {service: message for service in services} - - # Check for idleness - self.d.sentry.wait(timeout=timeout) - # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) - # Check for ready messages - self.d.sentry.wait_for_messages(service_messages, timeout=timeout) - - self.log.info('OK') - - def _get_openstack_release(self): - """Get openstack release. - - Return an integer representing the enum value of the openstack - release. - """ - # Must be ordered by OpenStack release (not by Ubuntu release): - for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): - setattr(self, os_pair, i) - - releases = { - ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, - ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, - ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('xenial', None): self.xenial_mitaka, - ('xenial', 'cloud:xenial-newton'): self.xenial_newton, - ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, - ('xenial', 'cloud:xenial-pike'): self.xenial_pike, - ('xenial', 'cloud:xenial-queens'): self.xenial_queens, - ('yakkety', None): self.yakkety_newton, - ('zesty', None): self.zesty_ocata, - ('artful', None): self.artful_pike, - ('bionic', None): self.bionic_queens, - ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, - ('cosmic', None): self.cosmic_rocky, - } - return releases[(self.series, self.openstack)] - - def _get_openstack_release_string(self): - """Get openstack release string. - - Return a string representing the openstack release. - """ - releases = OrderedDict([ - ('trusty', 'icehouse'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), - ]) - if self.openstack: - os_origin = self.openstack.split(':')[1] - return os_origin.split('%s-' % self.series)[1].split('/')[0] - else: - return releases[self.series] - - def get_ceph_expected_pools(self, radosgw=False): - """Return a list of expected ceph pools in a ceph + cinder + glance - test scenario, based on OpenStack release and whether ceph radosgw - is flagged as present or not.""" - - if self._get_openstack_release() == self.trusty_icehouse: - # Icehouse - pools = [ - 'data', - 'metadata', - 'rbd', - 'cinder-ceph', - 'glance' - ] - elif (self.trusty_kilo <= self._get_openstack_release() <= - self.zesty_ocata): - # Kilo through Ocata - pools = [ - 'rbd', - 'cinder-ceph', - 'glance' - ] - else: - # Pike and later - pools = [ - 'cinder-ceph', - 'glance' - ] - - if radosgw: - pools.extend([ - '.rgw.root', - '.rgw.control', - '.rgw', - '.rgw.gc', - '.users.uid' - ]) - - return pools diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py deleted file mode 100644 index ef4ab54..0000000 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ /dev/null @@ -1,1515 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import json -import logging -import os -import re -import six -import time -import urllib -import urlparse - -import cinderclient.v1.client as cinder_client -import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1.client as glance_client -import heatclient.v1.client as heat_client -from keystoneclient.v2_0 import client as keystone_client -from keystoneauth1.identity import ( - v3, - v2, -) -from keystoneauth1 import session as keystone_session -from keystoneclient.v3 import client as keystone_client_v3 -from novaclient import exceptions - -import novaclient.client as nova_client -import novaclient -import pika -import swiftclient - -from charmhelpers.core.decorators import retry_on_exception -from charmhelpers.contrib.amulet.utils import ( - AmuletUtils -) -from charmhelpers.core.host import CompareHostReleases - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - -NOVA_CLIENT_VERSION = "2" - -OPENSTACK_RELEASES_PAIRS = [ - 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', - 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', - 'yakkety_newton', 'xenial_ocata', 'zesty_ocata', - 'xenial_pike', 'artful_pike', 'xenial_queens', - 'bionic_queens', 'bionic_rocky', 'cosmic_rocky'] - - -class OpenStackAmuletUtils(AmuletUtils): - """OpenStack amulet utilities. - - This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charm tests. - """ - - def __init__(self, log_level=ERROR): - """Initialize the deployment environment.""" - super(OpenStackAmuletUtils, self).__init__(log_level) - - def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, openstack_release=None): - """Validate endpoint data. Pick the correct validator based on - OpenStack release. Expected data should be in the v2 format: - { - 'id': id, - 'region': region, - 'adminurl': adminurl, - 'internalurl': internalurl, - 'publicurl': publicurl, - 'service_id': service_id} - - """ - validation_function = self.validate_v2_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_endpoint_data - expected = { - 'id': expected['id'], - 'region': expected['region'], - 'region_id': 'RegionOne', - 'url': self.valid_url, - 'interface': self.not_null, - 'service_id': expected['service_id']} - return validation_function(endpoints, admin_port, internal_port, - public_port, expected) - - def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): - """Validate endpoint data. - - Validate actual endpoint data vs expected endpoint data. The ports - are used to find the matching endpoint. - """ - self.log.debug('Validating endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = False - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if (admin_port in ep.adminurl and - internal_port in ep.internalurl and - public_port in ep.publicurl): - found = True - actual = {'id': ep.id, - 'region': ep.region, - 'adminurl': ep.adminurl, - 'internalurl': ep.internalurl, - 'publicurl': ep.publicurl, - 'service_id': ep.service_id} - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if not found: - return 'endpoint not found' - - def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected, expected_num_eps=3): - """Validate keystone v3 endpoint data. - - Validate the v3 endpoint data which has changed from v2. The - ports are used to find the matching endpoint. - - The new v3 endpoint data looks like: - - ['}, - region=RegionOne, - region_id=RegionOne, - service_id=17f842a0dc084b928e476fafe67e4095, - url=http://10.5.6.5:9312>, - '}, - region=RegionOne, - region_id=RegionOne, - service_id=72fc8736fb41435e8b3584205bb2cfa3, - url=http://10.5.6.6:35357/v3>, - ... ] - """ - self.log.debug('Validating v3 endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = [] - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if ((admin_port in ep.url and ep.interface == 'admin') or - (internal_port in ep.url and ep.interface == 'internal') or - (public_port in ep.url and ep.interface == 'public')): - found.append(ep.interface) - # note we ignore the links member. - actual = {'id': ep.id, - 'region': ep.region, - 'region_id': ep.region_id, - 'interface': self.not_null, - 'url': ep.url, - 'service_id': ep.service_id, } - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if len(found) != expected_num_eps: - return 'Unexpected number of endpoints found' - - def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): - """Convert v2 endpoint data into v3. - - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - """ - self.log.warn("Endpoint ID and Region ID validation is limited to not " - "null checks after v2 to v3 conversion") - for svc in ep_data.keys(): - assert len(ep_data[svc]) == 1, "Unknown data format" - svc_ep_data = ep_data[svc][0] - ep_data[svc] = [ - { - 'url': svc_ep_data['adminURL'], - 'interface': 'admin', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['publicURL'], - 'interface': 'public', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}, - { - 'url': svc_ep_data['internalURL'], - 'interface': 'internal', - 'region': svc_ep_data['region'], - 'region_id': self.not_null, - 'id': self.not_null}] - return ep_data - - def validate_svc_catalog_endpoint_data(self, expected, actual, - openstack_release=None): - """Validate service catalog endpoint data. Pick the correct validator - for the OpenStack version. Expected data should be in the v2 format: - { - 'service_name1': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - 'service_name2': [ - { - 'adminURL': adminURL, - 'id': id, - 'region': region. - 'publicURL': publicURL, - 'internalURL': internalURL - }], - } - - """ - validation_function = self.validate_v2_svc_catalog_endpoint_data - xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') - if openstack_release and openstack_release >= xenial_queens: - validation_function = self.validate_v3_svc_catalog_endpoint_data - expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) - return validation_function(expected, actual) - - def validate_v2_svc_catalog_endpoint_data(self, expected, actual): - """Validate service catalog endpoint data. - - Validate a list of actual service catalog endpoints vs a list of - expected service catalog endpoints. - """ - self.log.debug('Validating service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - ret = self._validate_dict_data(expected[k][0], actual[k][0]) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_v3_svc_catalog_endpoint_data(self, expected, actual): - """Validate the keystone v3 catalog endpoint data. - - Validate a list of dictinaries that make up the keystone v3 service - catalogue. - - It is in the form of: - - - {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:35357/v3'}, - {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}, - {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}], - u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'f629388955bc407f8b11d8b7ca168086', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9312'}]} - - Note, that an added complication is that the order of admin, public, - internal against 'interface' in each region. - - Thus, the function sorts the expected and actual lists using the - interface key as a sort key, prior to the comparison. - """ - self.log.debug('Validating v3 service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - l_expected = sorted(v, key=lambda x: x['interface']) - l_actual = sorted(actual[k], key=lambda x: x['interface']) - if len(l_actual) != len(l_expected): - return ("endpoint {} has differing number of interfaces " - " - expected({}), actual({})" - .format(k, len(l_expected), len(l_actual))) - for i_expected, i_actual in zip(l_expected, l_actual): - self.log.debug("checking interface {}" - .format(i_expected['interface'])) - ret = self._validate_dict_data(i_expected, i_actual) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_tenant_data(self, expected, actual): - """Validate tenant data. - - Validate a list of actual tenant data vs list of expected tenant - data. - """ - self.log.debug('Validating tenant data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'enabled': act.enabled, 'description': act.description, - 'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected tenant data - {}".format(ret) - if not found: - return "tenant {} does not exist".format(e['name']) - return ret - - def validate_role_data(self, expected, actual): - """Validate role data. - - Validate a list of actual role data vs a list of expected role - data. - """ - self.log.debug('Validating role data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected role data - {}".format(ret) - if not found: - return "role {} does not exist".format(e['name']) - return ret - - def validate_user_data(self, expected, actual, api_version=None): - """Validate user data. - - Validate a list of actual user data vs a list of expected user - data. - """ - self.log.debug('Validating user data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - if e['name'] == act.name: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'id': act.id} - if api_version == 3: - a['default_project_id'] = getattr(act, - 'default_project_id', - 'none') - else: - a['tenantId'] = act.tenantId - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected user data - {}".format(ret) - if not found: - return "user {} does not exist".format(e['name']) - return ret - - def validate_flavor_data(self, expected, actual): - """Validate flavor data. - - Validate a list of actual flavors vs a list of expected flavors. - """ - self.log.debug('Validating flavor data...') - self.log.debug('actual: {}'.format(repr(actual))) - act = [a.name for a in actual] - return self._validate_list_data(expected, act) - - def tenant_exists(self, keystone, tenant): - """Return True if tenant exists.""" - self.log.debug('Checking if tenant exists ({})...'.format(tenant)) - return tenant in [t.name for t in keystone.tenants.list()] - - @retry_on_exception(num_retries=5, base_delay=1) - def keystone_wait_for_propagation(self, sentry_relation_pairs, - api_version): - """Iterate over list of sentry and relation tuples and verify that - api_version has the expected value. - - :param sentry_relation_pairs: list of sentry, relation name tuples used - for monitoring propagation of relation - data - :param api_version: api_version to expect in relation data - :returns: None if successful. Raise on error. - """ - for (sentry, relation_name) in sentry_relation_pairs: - rel = sentry.relation('identity-service', - relation_name) - self.log.debug('keystone relation data: {}'.format(rel)) - if rel.get('api_version') != str(api_version): - raise Exception("api_version not propagated through relation" - " data yet ('{}' != '{}')." - "".format(rel.get('api_version'), api_version)) - - def keystone_configure_api_version(self, sentry_relation_pairs, deployment, - api_version): - """Configure preferred-api-version of keystone in deployment and - monitor provided list of relation objects for propagation - before returning to caller. - - :param sentry_relation_pairs: list of sentry, relation tuples used for - monitoring propagation of relation data - :param deployment: deployment to configure - :param api_version: value preferred-api-version will be set to - :returns: None if successful. Raise on error. - """ - self.log.debug("Setting keystone preferred-api-version: '{}'" - "".format(api_version)) - - config = {'preferred-api-version': api_version} - deployment.d.configure('keystone', config) - deployment._auto_wait_for_status() - self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - - def authenticate_cinder_admin(self, keystone, api_version=2): - """Authenticates admin user with cinder.""" - self.log.debug('Authenticating cinder admin...') - _clients = { - 1: cinder_client.Client, - 2: cinder_clientv2.Client} - return _clients[api_version](session=keystone.session) - - def authenticate_keystone(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Authenticate with Keystone""" - self.log.debug('Authenticating with keystone...') - if not api_version: - api_version = 2 - sess, auth = self.get_keystone_session( - keystone_ip=keystone_ip, - username=username, - password=password, - api_version=api_version, - admin_port=admin_port, - user_domain_name=user_domain_name, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name - ) - if api_version == 2: - client = keystone_client.Client(session=sess) - else: - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client - - def get_keystone_session(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Return a keystone session object""" - ep = self.get_keystone_endpoint(keystone_ip, - api_version=api_version, - admin_port=admin_port) - if api_version == 2: - auth = v2.Password( - username=username, - password=password, - tenant_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - else: - auth = v3.Password( - user_domain_name=user_domain_name, - username=username, - password=password, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - return (sess, auth) - - def get_keystone_endpoint(self, keystone_ip, api_version=None, - admin_port=False): - """Return keystone endpoint""" - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if api_version == 2: - ep = base_ep + "/v2.0" - else: - ep = base_ep + "/v3" - return ep - - def get_default_keystone_session(self, keystone_sentry, - openstack_release=None, api_version=2): - """Return a keystone session object and client object assuming standard - default settings - - Example call in amulet tests: - self.keystone_session, self.keystone = u.get_default_keystone_session( - self.keystone_sentry, - openstack_release=self._get_openstack_release()) - - The session can then be used to auth other clients: - neutronclient.Client(session=session) - aodh_client.Client(session=session) - eyc - """ - self.log.debug('Authenticating keystone admin...') - # 11 => xenial_queens - if api_version == 3 or (openstack_release and openstack_release >= 11): - client_class = keystone_client_v3.Client - api_version = 3 - else: - client_class = keystone_client.Client - keystone_ip = keystone_sentry.info['public-address'] - session, auth = self.get_keystone_session( - keystone_ip, - api_version=api_version, - username='admin', - password='openstack', - project_name='admin', - user_domain_name='admin_domain', - project_domain_name='admin_domain') - client = client_class(session=session) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(session) - return session, client - - def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant=None, api_version=None, - keystone_ip=None, user_domain_name=None, - project_domain_name=None, - project_name=None): - """Authenticates admin user with the keystone admin endpoint.""" - self.log.debug('Authenticating keystone admin...') - if not keystone_ip: - keystone_ip = keystone_sentry.info['public-address'] - - # To support backward compatibility usage of this function - if not project_name: - project_name = tenant - if api_version == 3 and not user_domain_name: - user_domain_name = 'admin_domain' - if api_version == 3 and not project_domain_name: - project_domain_name = 'admin_domain' - if api_version == 3 and not project_name: - project_name = 'admin' - - return self.authenticate_keystone( - keystone_ip, user, password, - api_version=api_version, - user_domain_name=user_domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - admin_port=True) - - def authenticate_keystone_user(self, keystone, user, password, tenant): - """Authenticates a regular user with the keystone public endpoint.""" - self.log.debug('Authenticating keystone user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - keystone_ip = urlparse.urlparse(ep).hostname - - return self.authenticate_keystone(keystone_ip, user, password, - project_name=tenant) - - def authenticate_glance_admin(self, keystone): - """Authenticates admin user with glance.""" - self.log.debug('Authenticating glance admin...') - ep = keystone.service_catalog.url_for(service_type='image', - interface='adminURL') - if keystone.session: - return glance_client.Client(ep, session=keystone.session) - else: - return glance_client.Client(ep, token=keystone.auth_token) - - def authenticate_heat_admin(self, keystone): - """Authenticates the admin user with heat.""" - self.log.debug('Authenticating heat admin...') - ep = keystone.service_catalog.url_for(service_type='orchestration', - interface='publicURL') - if keystone.session: - return heat_client.Client(endpoint=ep, session=keystone.session) - else: - return heat_client.Client(endpoint=ep, token=keystone.auth_token) - - def authenticate_nova_user(self, keystone, user, password, tenant): - """Authenticates a regular user with nova-api.""" - self.log.debug('Authenticating nova user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return nova_client.Client(NOVA_CLIENT_VERSION, - session=keystone.session, - auth_url=ep) - elif novaclient.__version__[0] >= "7": - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, password=password, - project_name=tenant, auth_url=ep) - else: - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) - - def authenticate_swift_user(self, keystone, user, password, tenant): - """Authenticates a regular user with swift api.""" - self.log.debug('Authenticating swift user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return swiftclient.Connection(session=keystone.session) - else: - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') - - def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create the specified flavor.""" - try: - nova.flavors.find(name=name) - except (exceptions.NotFound, exceptions.NoUniqueMatch): - self.log.debug('Creating flavor ({})'.format(name)) - nova.flavors.create(name, ram, vcpus, disk, flavorid, - ephemeral, swap, rxtx_factor, is_public) - - def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance, - validate and return a resource pointer. - - :param glance: pointer to authenticated glance connection - :param image_name: display name for new image - :returns: glance image pointer - """ - self.log.debug('Creating glance cirros image ' - '({})...'.format(image_name)) - - # Download cirros image - http_proxy = os.getenv('AMULET_HTTP_PROXY') - self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - f = opener.open('http://download.cirros-cloud.net/version/released') - version = f.read().strip() - cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) - local_path = os.path.join('tests', cirros_img) - - if not os.path.exists(local_path): - cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', - version, cirros_img) - opener.retrieve(cirros_url, local_path) - f.close() - - # Create glance image - with open(local_path) as f: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', data=f) - - # Wait for image to reach active status - img_id = image.id - ret = self.resource_reaches_status(glance.images, img_id, - expected_stat='active', - msg='Image status wait') - if not ret: - msg = 'Glance image failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new image - self.log.debug('Validating image attributes...') - val_img_name = glance.images.get(img_id).name - val_img_stat = glance.images.get(img_id).status - val_img_pub = glance.images.get(img_id).is_public - val_img_cfmt = glance.images.get(img_id).container_format - val_img_dfmt = glance.images.get(img_id).disk_format - msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' - 'container fmt:{} disk fmt:{}'.format( - val_img_name, val_img_pub, img_id, - val_img_stat, val_img_cfmt, val_img_dfmt)) - - if val_img_name == image_name and val_img_stat == 'active' \ - and val_img_pub is True and val_img_cfmt == 'bare' \ - and val_img_dfmt == 'qcow2': - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return image - - def delete_image(self, glance, image): - """Delete the specified image.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_image.') - self.log.debug('Deleting glance image ({})...'.format(image)) - return self.delete_resource(glance.images, image, msg='glance image') - - def create_instance(self, nova, image_name, instance_name, flavor): - """Create the specified instance.""" - self.log.debug('Creating instance ' - '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.glance.find_image(image_name) - flavor = nova.flavors.find(name=flavor) - instance = nova.servers.create(name=instance_name, image=image, - flavor=flavor) - - count = 1 - status = instance.status - while status != 'ACTIVE' and count < 60: - time.sleep(3) - instance = nova.servers.get(instance.id) - status = instance.status - self.log.debug('instance status: {}'.format(status)) - count += 1 - - if status != 'ACTIVE': - self.log.error('instance creation timed out') - return None - - return instance - - def delete_instance(self, nova, instance): - """Delete the specified instance.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_instance.') - self.log.debug('Deleting instance ({})...'.format(instance)) - return self.delete_resource(nova.servers, instance, - msg='nova instance') - - def create_or_get_keypair(self, nova, keypair_name="testkey"): - """Create a new keypair, or return pointer if it already exists.""" - try: - _keypair = nova.keypairs.get(keypair_name) - self.log.debug('Keypair ({}) already exists, ' - 'using it.'.format(keypair_name)) - return _keypair - except Exception: - self.log.debug('Keypair ({}) does not exist, ' - 'creating it.'.format(keypair_name)) - - _keypair = nova.keypairs.create(name=keypair_name) - return _keypair - - def _get_cinder_obj_name(self, cinder_object): - """Retrieve name of cinder object. - - :param cinder_object: cinder snapshot or volume object - :returns: str cinder object name - """ - # v1 objects store name in 'display_name' attr but v2+ use 'name' - try: - return cinder_object.display_name - except AttributeError: - return cinder_object.name - - def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, - img_id=None, src_vol_id=None, snap_id=None): - """Create cinder volume, optionally from a glance image, OR - optionally as a clone of an existing volume, OR optionally - from a snapshot. Wait for the new volume status to reach - the expected status, validate and return a resource pointer. - - :param vol_name: cinder volume display name - :param vol_size: size in gigabytes - :param img_id: optional glance image id - :param src_vol_id: optional source volume id to clone - :param snap_id: optional snapshot id to use - :returns: cinder volume pointer - """ - # Handle parameter input and avoid impossible combinations - if img_id and not src_vol_id and not snap_id: - # Create volume from image - self.log.debug('Creating cinder volume from glance image...') - bootable = 'true' - elif src_vol_id and not img_id and not snap_id: - # Clone an existing volume - self.log.debug('Cloning cinder volume...') - bootable = cinder.volumes.get(src_vol_id).bootable - elif snap_id and not src_vol_id and not img_id: - # Create volume from snapshot - self.log.debug('Creating cinder volume from snapshot...') - snap = cinder.volume_snapshots.find(id=snap_id) - vol_size = snap.size - snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id - bootable = cinder.volumes.get(snap_vol_id).bootable - elif not img_id and not src_vol_id and not snap_id: - # Create volume - self.log.debug('Creating cinder volume...') - bootable = 'false' - else: - # Impossible combination of parameters - msg = ('Invalid method use - name:{} size:{} img_id:{} ' - 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, - img_id, src_vol_id, - snap_id)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create new volume - try: - vol_new = cinder.volumes.create(display_name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except TypeError: - vol_new = cinder.volumes.create(name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except Exception as e: - msg = 'Failed to create volume: {}'.format(e) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Wait for volume to reach available status - ret = self.resource_reaches_status(cinder.volumes, vol_id, - expected_stat="available", - msg="Volume status wait") - if not ret: - msg = 'Cinder volume failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new volume - self.log.debug('Validating volume attributes...') - val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) - val_vol_boot = cinder.volumes.get(vol_id).bootable - val_vol_stat = cinder.volumes.get(vol_id).status - val_vol_size = cinder.volumes.get(vol_id).size - msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' - '{} size:{}'.format(val_vol_name, vol_id, - val_vol_stat, val_vol_boot, - val_vol_size)) - - if val_vol_boot == bootable and val_vol_stat == 'available' \ - and val_vol_name == vol_name and val_vol_size == vol_size: - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return vol_new - - def delete_resource(self, resource, resource_id, - msg="resource", max_wait=120): - """Delete one openstack resource, such as one instance, keypair, - image, volume, stack, etc., and confirm deletion within max wait time. - - :param resource: pointer to os resource type, ex:glance_client.images - :param resource_id: unique name or id for the openstack resource - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, otherwise False - """ - self.log.debug('Deleting OpenStack resource ' - '{} ({})'.format(resource_id, msg)) - num_before = len(list(resource.list())) - resource.delete(resource_id) - - tries = 0 - num_after = len(list(resource.list())) - while num_after != (num_before - 1) and tries < (max_wait / 4): - self.log.debug('{} delete check: ' - '{} [{}:{}] {}'.format(msg, tries, - num_before, - num_after, - resource_id)) - time.sleep(4) - num_after = len(list(resource.list())) - tries += 1 - - self.log.debug('{}: expected, actual count = {}, ' - '{}'.format(msg, num_before - 1, num_after)) - - if num_after == (num_before - 1): - return True - else: - self.log.error('{} delete timed out'.format(msg)) - return False - - def resource_reaches_status(self, resource, resource_id, - expected_stat='available', - msg='resource', max_wait=120): - """Wait for an openstack resources status to reach an - expected status within a specified time. Useful to confirm that - nova instances, cinder vols, snapshots, glance images, heat stacks - and other resources eventually reach the expected status. - - :param resource: pointer to os resource type, ex: heat_client.stacks - :param resource_id: unique id for the openstack resource - :param expected_stat: status to expect resource to reach - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, False if status is not reached - """ - - tries = 0 - resource_stat = resource.get(resource_id).status - while resource_stat != expected_stat and tries < (max_wait / 4): - self.log.debug('{} status check: ' - '{} [{}:{}] {}'.format(msg, tries, - resource_stat, - expected_stat, - resource_id)) - time.sleep(4) - resource_stat = resource.get(resource_id).status - tries += 1 - - self.log.debug('{}: expected, actual status = {}, ' - '{}'.format(msg, resource_stat, expected_stat)) - - if resource_stat == expected_stat: - return True - else: - self.log.debug('{} never reached expected status: ' - '{}'.format(resource_id, expected_stat)) - return False - - def get_ceph_osd_id_cmd(self, index): - """Produce a shell command that will return a ceph-osd id.""" - return ("`initctl list | grep 'ceph-osd ' | " - "awk 'NR=={} {{ print $2 }}' | " - "grep -o '[0-9]*'`".format(index + 1)) - - def get_ceph_pools(self, sentry_unit): - """Return a dict of ceph pools from a single ceph unit, with - pool name as keys, pool id as vals.""" - pools = {} - cmd = 'sudo ceph osd lspools' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, - for pool in str(output).split(','): - pool_id_name = pool.split(' ') - if len(pool_id_name) == 2: - pool_id = pool_id_name[0] - pool_name = pool_id_name[1] - pools[pool_name] = int(pool_id) - - self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], - pools)) - return pools - - def get_ceph_df(self, sentry_unit): - """Return dict of ceph df json output, including ceph pool state. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :returns: Dict of ceph df output - """ - cmd = 'sudo ceph df --format=json' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return json.loads(output) - - def get_ceph_pool_sample(self, sentry_unit, pool_id=0): - """Take a sample of attributes of a ceph pool, returning ceph - pool name, object count and disk space used for the specified - pool ID number. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :param pool_id: Ceph pool ID - :returns: List of pool name, object count, kb disk space used - """ - df = self.get_ceph_df(sentry_unit) - for pool in df['pools']: - if pool['id'] == pool_id: - pool_name = pool['name'] - obj_count = pool['stats']['objects'] - kb_used = pool['stats']['kb_used'] - - self.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, pool_id, - obj_count, kb_used)) - return pool_name, obj_count, kb_used - - def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): - """Validate ceph pool samples taken over time, such as pool - object counts or pool kb used, before adding, after adding, and - after deleting items which affect those pool attributes. The - 2nd element is expected to be greater than the 1st; 3rd is expected - to be less than the 2nd. - - :param samples: List containing 3 data samples - :param sample_type: String for logging and usage context - :returns: None if successful, Failure message otherwise - """ - original, created, deleted = range(3) - if samples[created] <= samples[original] or \ - samples[deleted] >= samples[created]: - return ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - else: - self.log.debug('Ceph {} samples (OK): ' - '{}'.format(sample_type, samples)) - return None - - # rabbitmq/amqp specific helpers: - - def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): - """Wait for rmq units extended status to show cluster readiness, - after an optional initial sleep period. Initial sleep is likely - necessary to be effective following a config change, as status - message may not instantly update to non-ready.""" - - if init_sleep: - time.sleep(init_sleep) - - message = re.compile('^Unit is ready and clustered$') - deployment._auto_wait_for_status(message=message, - timeout=timeout, - include_only=['rabbitmq-server']) - - def add_rmq_test_user(self, sentry_units, - username="testuser1", password="changeme"): - """Add a test user via the first rmq juju unit, check connection as - the new user against all sentry units. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful. Raise on error. - """ - self.log.debug('Adding rmq user ({})...'.format(username)) - - # Check that user does not already exist - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - if username in output: - self.log.warning('User ({}) already exists, returning ' - 'gracefully.'.format(username)) - return - - perms = '".*" ".*" ".*"' - cmds = ['rabbitmqctl add_user {} {}'.format(username, password), - 'rabbitmqctl set_permissions {} {}'.format(username, perms)] - - # Add user via first unit - for cmd in cmds: - output, _ = self.run_cmd_unit(sentry_units[0], cmd) - - # Check connection against the other sentry_units - self.log.debug('Checking user connect against units...') - for sentry_unit in sentry_units: - connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, - username=username, - password=password) - connection.close() - - def delete_rmq_test_user(self, sentry_units, username="testuser1"): - """Delete a rabbitmq user via the first rmq juju unit. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful or no such user. - """ - self.log.debug('Deleting rmq user ({})...'.format(username)) - - # Check that the user exists - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - - if username not in output: - self.log.warning('User ({}) does not exist, returning ' - 'gracefully.'.format(username)) - return - - # Delete the user - cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) - - def get_rmq_cluster_status(self, sentry_unit): - """Execute rabbitmq cluster status command on a unit and return - the full output. - - :param unit: sentry unit - :returns: String containing console output of cluster status command - """ - cmd = 'rabbitmqctl cluster_status' - output, _ = self.run_cmd_unit(sentry_unit, cmd) - self.log.debug('{} cluster_status:\n{}'.format( - sentry_unit.info['unit_name'], output)) - return str(output) - - def get_rmq_cluster_running_nodes(self, sentry_unit): - """Parse rabbitmqctl cluster_status output string, return list of - running rabbitmq cluster nodes. - - :param unit: sentry unit - :returns: List containing node names of running nodes - """ - # NOTE(beisner): rabbitmqctl cluster_status output is not - # json-parsable, do string chop foo, then json.loads that. - str_stat = self.get_rmq_cluster_status(sentry_unit) - if 'running_nodes' in str_stat: - pos_start = str_stat.find("{running_nodes,") + 15 - pos_end = str_stat.find("]},", pos_start) + 1 - str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') - run_nodes = json.loads(str_run_nodes) - return run_nodes - else: - return [] - - def validate_rmq_cluster_running_nodes(self, sentry_units): - """Check that all rmq unit hostnames are represented in the - cluster_status output of all units. - - :param host_names: dict of juju unit names to host names - :param units: list of sentry unit pointers (all rmq units) - :returns: None if successful, otherwise return error message - """ - host_names = self.get_unit_hostnames(sentry_units) - errors = [] - - # Query every unit for cluster_status running nodes - for query_unit in sentry_units: - query_unit_name = query_unit.info['unit_name'] - running_nodes = self.get_rmq_cluster_running_nodes(query_unit) - - # Confirm that every unit is represented in the queried unit's - # cluster_status running nodes output. - for validate_unit in sentry_units: - val_host_name = host_names[validate_unit.info['unit_name']] - val_node_name = 'rabbit@{}'.format(val_host_name) - - if val_node_name not in running_nodes: - errors.append('Cluster member check failed on {}: {} not ' - 'in {}\n'.format(query_unit_name, - val_node_name, - running_nodes)) - if errors: - return ''.join(errors) - - def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): - """Check a single juju rmq unit for ssl and port in the config file.""" - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - conf_file = '/etc/rabbitmq/rabbitmq.config' - conf_contents = str(self.file_contents_safe(sentry_unit, - conf_file, max_wait=16)) - # Checks - conf_ssl = 'ssl' in conf_contents - conf_port = str(port) in conf_contents - - # Port explicitly checked in config - if port and conf_port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif port and not conf_port and conf_ssl: - self.log.debug('SSL is enabled @{} but not on port {} ' - '({})'.format(host, port, unit_name)) - return False - # Port not checked (useful when checking that ssl is disabled) - elif not port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif not conf_ssl: - self.log.debug('SSL not enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return False - else: - msg = ('Unknown condition when checking SSL status @{}:{} ' - '({})'.format(host, port, unit_name)) - amulet.raise_status(amulet.FAIL, msg) - - def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): - """Check that ssl is enabled on rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :param port: optional ssl port override to validate - :returns: None if successful, otherwise return error message - """ - for sentry_unit in sentry_units: - if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): - return ('Unexpected condition: ssl is disabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def validate_rmq_ssl_disabled_units(self, sentry_units): - """Check that ssl is enabled on listed rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :returns: True if successful. Raise on error. - """ - for sentry_unit in sentry_units: - if self.rmq_ssl_is_enabled_on_unit(sentry_unit): - return ('Unexpected condition: ssl is enabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def configure_rmq_ssl_on(self, sentry_units, deployment, - port=None, max_wait=60): - """Turn ssl charm config option on, with optional non-default - ssl port specification. Confirm that it is enabled on every - unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param port: amqp port, use defaults if None - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: on') - - # Enable RMQ SSL - config = {'ssl': 'on'} - if port: - config['ssl_port'] = port - - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): - """Turn ssl charm config option off, confirm that it is disabled - on every unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: off') - - # Disable RMQ SSL - config = {'ssl': 'off'} - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def connect_amqp_by_unit(self, sentry_unit, ssl=False, - port=None, fatal=True, - username="testuser1", password="changeme"): - """Establish and return a pika amqp connection to the rabbitmq service - running on a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :param fatal: boolean, default to True (raises on connect error) - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: pika amqp connection pointer or None if failed and non-fatal - """ - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - # Default port logic if port is not specified - if ssl and not port: - port = 5671 - elif not ssl and not port: - port = 5672 - - self.log.debug('Connecting to amqp on {}:{} ({}) as ' - '{}...'.format(host, port, unit_name, username)) - - try: - credentials = pika.PlainCredentials(username, password) - parameters = pika.ConnectionParameters(host=host, port=port, - credentials=credentials, - ssl=ssl, - connection_attempts=3, - retry_delay=5, - socket_timeout=1) - connection = pika.BlockingConnection(parameters) - assert connection.is_open is True - assert connection.is_closing is False - self.log.debug('Connect OK') - return connection - except Exception as e: - msg = ('amqp connection failed to {}:{} as ' - '{} ({})'.format(host, port, username, str(e))) - if fatal: - amulet.raise_status(amulet.FAIL, msg) - else: - self.log.warn(msg) - return None - - def publish_amqp_message_by_unit(self, sentry_unit, message, - queue="test", ssl=False, - username="testuser1", - password="changeme", - port=None): - """Publish an amqp message to a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param message: amqp message string - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: None. Raises exception if publish failed. - """ - self.log.debug('Publishing message to {} queue:\n{}'.format(queue, - message)) - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - - # NOTE(beisner): extra debug here re: pika hang potential: - # https://github.com/pika/pika/issues/297 - # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw - self.log.debug('Defining channel...') - channel = connection.channel() - self.log.debug('Declaring queue...') - channel.queue_declare(queue=queue, auto_delete=False, durable=True) - self.log.debug('Publishing message...') - channel.basic_publish(exchange='', routing_key=queue, body=message) - self.log.debug('Closing channel...') - channel.close() - self.log.debug('Closing connection...') - connection.close() - - def get_amqp_message_by_unit(self, sentry_unit, queue="test", - username="testuser1", - password="changeme", - ssl=False, port=None): - """Get an amqp message from a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: amqp message body as string. Raise if get fails. - """ - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - channel = connection.channel() - method_frame, _, body = channel.basic_get(queue) - - if method_frame: - self.log.debug('Retreived message from {} queue:\n{}'.format(queue, - body)) - channel.basic_ack(method_frame.delivery_tag) - channel.close() - connection.close() - return body - else: - msg = 'No message retrieved.' - amulet.raise_status(amulet.FAIL, msg) - - def validate_memcache(self, sentry_unit, conf, os_release, - earliest_release=5, section='keystone_authtoken', - check_kvs=None): - """Check Memcache is running and is configured to be used - - Example call from Amulet test: - - def test_110_memcache(self): - u.validate_memcache(self.neutron_api_sentry, - '/etc/neutron/neutron.conf', - self._get_openstack_release()) - - :param sentry_unit: sentry unit - :param conf: OpenStack config file to check memcache settings - :param os_release: Current OpenStack release int code - :param earliest_release: Earliest Openstack release to check int code - :param section: OpenStack config file section to check - :param check_kvs: Dict of settings to check in config file - :returns: None - """ - if os_release < earliest_release: - self.log.debug('Skipping memcache checks for deployment. {} <' - 'mitaka'.format(os_release)) - return - _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} - self.log.debug('Checking memcached is running') - ret = self.validate_services_by_name({sentry_unit: ['memcached']}) - if ret: - amulet.raise_status(amulet.FAIL, msg='Memcache running check' - 'failed {}'.format(ret)) - else: - self.log.debug('OK') - self.log.debug('Checking memcache url is configured in {}'.format( - conf)) - if self.validate_config_data(sentry_unit, conf, section, _kvs): - message = "Memcache config error in: {}".format(conf) - amulet.raise_status(amulet.FAIL, msg=message) - else: - self.log.debug('OK') - self.log.debug('Checking memcache configuration in ' - '/etc/memcached.conf') - contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', - fatal=True) - ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if CompareHostReleases(ubuntu_release) <= 'trusty': - memcache_listen_addr = 'ip6-localhost' - else: - memcache_listen_addr = '::1' - expected = { - '-p': '11211', - '-l': memcache_listen_addr} - found = [] - for key, value in expected.items(): - for line in contents.split('\n'): - if line.startswith(key): - self.log.debug('Checking {} is set to {}'.format( - key, - value)) - assert value == line.split()[-1] - self.log.debug(line.split()[-1]) - found.append(key) - if sorted(found) == sorted(expected.keys()): - self.log.debug('OK') - else: - message = "Memcache config error in: /etc/memcached.conf" - amulet.raise_status(amulet.FAIL, msg=message) diff --git a/hooks/charmhelpers/contrib/openstack/cert_utils.py b/hooks/charmhelpers/contrib/openstack/cert_utils.py deleted file mode 100644 index de853b5..0000000 --- a/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright 2014-2018 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Common python helper functions used for OpenStack charm certificats. - -import os -import json - -from charmhelpers.contrib.network.ip import ( - get_hostname, - resolve_network_cidr, -) -from charmhelpers.core.hookenv import ( - local_unit, - network_get_primary_address, - config, - relation_get, - unit_get, - NoNetworkBinding, - log, - WARNING, -) -from charmhelpers.contrib.openstack.ip import ( - ADMIN, - resolve_address, - get_vip_in_network, - INTERNAL, - PUBLIC, - ADDRESS_MAP) - -from charmhelpers.core.host import ( - mkdir, - write_file, -) - -from charmhelpers.contrib.hahelpers.apache import ( - install_ca_cert -) - - -class CertRequest(object): - - """Create a request for certificates to be generated - """ - - def __init__(self, json_encode=True): - self.entries = [] - self.hostname_entry = None - self.json_encode = json_encode - - def add_entry(self, net_type, cn, addresses): - """Add a request to the batch - - :param net_type: str netwrok space name request is for - :param cn: str Canonical Name for certificate - :param addresses: [] List of addresses to be used as SANs - """ - self.entries.append({ - 'cn': cn, - 'addresses': addresses}) - - def add_hostname_cn(self): - """Add a request for the hostname of the machine""" - ip = unit_get('private-address') - addresses = [ip] - # If a vip is being used without os-hostname config or - # network spaces then we need to ensure the local units - # cert has the approriate vip in the SAN list - vip = get_vip_in_network(resolve_network_cidr(ip)) - if vip: - addresses.append(vip) - self.hostname_entry = { - 'cn': get_hostname(ip), - 'addresses': addresses} - - def add_hostname_cn_ip(self, addresses): - """Add an address to the SAN list for the hostname request - - :param addr: [] List of address to be added - """ - for addr in addresses: - if addr not in self.hostname_entry['addresses']: - self.hostname_entry['addresses'].append(addr) - - def get_request(self): - """Generate request from the batched up entries - - """ - if self.hostname_entry: - self.entries.append(self.hostname_entry) - request = {} - for entry in self.entries: - sans = sorted(list(set(entry['addresses']))) - request[entry['cn']] = {'sans': sans} - if self.json_encode: - return {'cert_requests': json.dumps(request, sort_keys=True)} - else: - return {'cert_requests': request} - - -def get_certificate_request(json_encode=True): - """Generate a certificatee requests based on the network confioguration - - """ - req = CertRequest(json_encode=json_encode) - req.add_hostname_cn() - # Add os-hostname entries - for net_type in [INTERNAL, ADMIN, PUBLIC]: - net_config = config(ADDRESS_MAP[net_type]['override']) - try: - net_addr = resolve_address(endpoint_type=net_type) - ip = network_get_primary_address( - ADDRESS_MAP[net_type]['binding']) - addresses = [net_addr, ip] - vip = get_vip_in_network(resolve_network_cidr(ip)) - if vip: - addresses.append(vip) - if net_config: - req.add_entry( - net_type, - net_config, - addresses) - else: - # There is network address with no corresponding hostname. - # Add the ip to the hostname cert to allow for this. - req.add_hostname_cn_ip(addresses) - except NoNetworkBinding: - log("Skipping request for certificate for ip in {} space, no " - "local address found".format(net_type), WARNING) - return req.get_request() - - -def create_ip_cert_links(ssl_dir, custom_hostname_link=None): - """Create symlinks for SAN records - - :param ssl_dir: str Directory to create symlinks in - :param custom_hostname_link: str Additional link to be created - """ - hostname = get_hostname(unit_get('private-address')) - hostname_cert = os.path.join( - ssl_dir, - 'cert_{}'.format(hostname)) - hostname_key = os.path.join( - ssl_dir, - 'key_{}'.format(hostname)) - # Add links to hostname cert, used if os-hostname vars not set - for net_type in [INTERNAL, ADMIN, PUBLIC]: - try: - addr = resolve_address(endpoint_type=net_type) - cert = os.path.join(ssl_dir, 'cert_{}'.format(addr)) - key = os.path.join(ssl_dir, 'key_{}'.format(addr)) - if os.path.isfile(hostname_cert) and not os.path.isfile(cert): - os.symlink(hostname_cert, cert) - os.symlink(hostname_key, key) - except NoNetworkBinding: - log("Skipping creating cert symlink for ip in {} space, no " - "local address found".format(net_type), WARNING) - if custom_hostname_link: - custom_cert = os.path.join( - ssl_dir, - 'cert_{}'.format(custom_hostname_link)) - custom_key = os.path.join( - ssl_dir, - 'key_{}'.format(custom_hostname_link)) - if os.path.isfile(hostname_cert) and not os.path.isfile(custom_cert): - os.symlink(hostname_cert, custom_cert) - os.symlink(hostname_key, custom_key) - - -def install_certs(ssl_dir, certs, chain=None): - """Install the certs passed into the ssl dir and append the chain if - provided. - - :param ssl_dir: str Directory to create symlinks in - :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}} - :param chain: str Chain to be appended to certs - """ - for cn, bundle in certs.items(): - cert_filename = 'cert_{}'.format(cn) - key_filename = 'key_{}'.format(cn) - cert_data = bundle['cert'] - if chain: - # Append chain file so that clients that trust the root CA will - # trust certs signed by an intermediate in the chain - cert_data = cert_data + chain - write_file( - path=os.path.join(ssl_dir, cert_filename), - content=cert_data, perms=0o640) - write_file( - path=os.path.join(ssl_dir, key_filename), - content=bundle['key'], perms=0o640) - - -def process_certificates(service_name, relation_id, unit, - custom_hostname_link=None): - """Process the certificates supplied down the relation - - :param service_name: str Name of service the certifcates are for. - :param relation_id: str Relation id providing the certs - :param unit: str Unit providing the certs - :param custom_hostname_link: str Name of custom link to create - """ - data = relation_get(rid=relation_id, unit=unit) - ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) - mkdir(path=ssl_dir) - name = local_unit().replace('/', '_') - certs = data.get('{}.processed_requests'.format(name)) - chain = data.get('chain') - ca = data.get('ca') - if certs: - certs = json.loads(certs) - install_ca_cert(ca.encode()) - install_certs(ssl_dir, certs, chain) - create_ip_cert_links( - ssl_dir, - custom_hostname_link=custom_hostname_link) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py deleted file mode 100644 index 4e38467..0000000 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ /dev/null @@ -1,1904 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import glob -import json -import math -import os -import re -import time -from base64 import b64decode -from subprocess import check_call, CalledProcessError - -import six - -from charmhelpers.fetch import ( - apt_install, - filter_installed_packages, -) -from charmhelpers.core.hookenv import ( - config, - is_relation_made, - local_unit, - log, - relation_get, - relation_ids, - related_units, - relation_set, - unit_get, - unit_private_ip, - charm_name, - DEBUG, - INFO, - ERROR, - status_set, - network_get_primary_address -) - -from charmhelpers.core.sysctl import create as sysctl_create -from charmhelpers.core.strutils import bool_from_string -from charmhelpers.contrib.openstack.exceptions import OSContextError - -from charmhelpers.core.host import ( - get_bond_master, - is_phy_iface, - list_nics, - get_nic_hwaddr, - mkdir, - write_file, - pwgen, - lsb_release, - CompareHostReleases, - is_container, -) -from charmhelpers.contrib.hahelpers.cluster import ( - determine_apache_port, - determine_api_port, - https, - is_clustered, -) -from charmhelpers.contrib.hahelpers.apache import ( - get_cert, - get_ca_cert, - install_ca_cert, -) -from charmhelpers.contrib.openstack.neutron import ( - neutron_plugin_attribute, - parse_data_port_mappings, -) -from charmhelpers.contrib.openstack.ip import ( - resolve_address, - INTERNAL, - ADMIN, - PUBLIC, - ADDRESS_MAP, -) -from charmhelpers.contrib.network.ip import ( - get_address_in_network, - get_ipv4_addr, - get_ipv6_addr, - get_netmask_for_address, - format_ipv6_addr, - is_bridge_member, - is_ipv6_disabled, - get_relation_ip, -) -from charmhelpers.contrib.openstack.utils import ( - config_flags_parser, - enable_memcache, - snap_install_requested, - CompareOpenStackReleases, - os_release, -) -from charmhelpers.core.unitdata import kv - -try: - import psutil -except ImportError: - if six.PY2: - apt_install('python-psutil', fatal=True) - else: - apt_install('python3-psutil', fatal=True) - import psutil - -CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' -ADDRESS_TYPES = ['admin', 'internal', 'public'] -HAPROXY_RUN_DIR = '/var/run/haproxy/' - - -def ensure_packages(packages): - """Install but do not upgrade required plugin packages.""" - required = filter_installed_packages(packages) - if required: - apt_install(required, fatal=True) - - -def context_complete(ctxt): - _missing = [] - for k, v in six.iteritems(ctxt): - if v is None or v == '': - _missing.append(k) - - if _missing: - log('Missing required data: %s' % ' '.join(_missing), level=INFO) - return False - - return True - - -class OSContextGenerator(object): - """Base class for all context generators.""" - interfaces = [] - related = False - complete = False - missing_data = [] - - def __call__(self): - raise NotImplementedError - - def context_complete(self, ctxt): - """Check for missing data for the required context data. - Set self.missing_data if it exists and return False. - Set self.complete if no missing data and return True. - """ - # Fresh start - self.complete = False - self.missing_data = [] - for k, v in six.iteritems(ctxt): - if v is None or v == '': - if k not in self.missing_data: - self.missing_data.append(k) - - if self.missing_data: - self.complete = False - log('Missing required data: %s' % ' '.join(self.missing_data), - level=INFO) - else: - self.complete = True - return self.complete - - def get_related(self): - """Check if any of the context interfaces have relation ids. - Set self.related and return True if one of the interfaces - has relation ids. - """ - # Fresh start - self.related = False - try: - for interface in self.interfaces: - if relation_ids(interface): - self.related = True - return self.related - except AttributeError as e: - log("{} {}" - "".format(self, e), 'INFO') - return self.related - - -class SharedDBContext(OSContextGenerator): - interfaces = ['shared-db'] - - def __init__(self, database=None, user=None, relation_prefix=None, - ssl_dir=None, relation_id=None): - """Allows inspecting relation for settings prefixed with - relation_prefix. This is useful for parsing access for multiple - databases returned via the shared-db interface (eg, nova_password, - quantum_password) - """ - self.relation_prefix = relation_prefix - self.database = database - self.user = user - self.ssl_dir = ssl_dir - self.rel_name = self.interfaces[0] - self.relation_id = relation_id - - def __call__(self): - self.database = self.database or config('database') - self.user = self.user or config('database-user') - if None in [self.database, self.user]: - log("Could not generate shared_db context. Missing required charm " - "config options. (database name and user)", level=ERROR) - raise OSContextError - - ctxt = {} - - # NOTE(jamespage) if mysql charm provides a network upon which - # access to the database should be made, reconfigure relation - # with the service units local address and defer execution - access_network = relation_get('access-network') - if access_network is not None: - if self.relation_prefix is not None: - hostname_key = "{}_hostname".format(self.relation_prefix) - else: - hostname_key = "hostname" - access_hostname = get_address_in_network( - access_network, - unit_get('private-address')) - set_hostname = relation_get(attribute=hostname_key, - unit=local_unit()) - if set_hostname != access_hostname: - relation_set(relation_settings={hostname_key: access_hostname}) - return None # Defer any further hook execution for now.... - - password_setting = 'password' - if self.relation_prefix: - password_setting = self.relation_prefix + '_password' - - if self.relation_id: - rids = [self.relation_id] - else: - rids = relation_ids(self.interfaces[0]) - - for rid in rids: - self.related = True - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - host = rdata.get('db_host') - host = format_ipv6_addr(host) or host - ctxt = { - 'database_host': host, - 'database': self.database, - 'database_user': self.user, - 'database_password': rdata.get(password_setting), - 'database_type': 'mysql' - } - # Note(coreycb): We can drop mysql+pymysql if we want when the - # following review lands, though it seems mysql+pymysql would - # be preferred. https://review.openstack.org/#/c/462190/ - if snap_install_requested(): - ctxt['database_type'] = 'mysql+pymysql' - if self.context_complete(ctxt): - db_ssl(rdata, ctxt, self.ssl_dir) - return ctxt - return {} - - -class PostgresqlDBContext(OSContextGenerator): - interfaces = ['pgsql-db'] - - def __init__(self, database=None): - self.database = database - - def __call__(self): - self.database = self.database or config('database') - if self.database is None: - log('Could not generate postgresql_db context. Missing required ' - 'charm config options. (database name)', level=ERROR) - raise OSContextError - - ctxt = {} - for rid in relation_ids(self.interfaces[0]): - self.related = True - for unit in related_units(rid): - rel_host = relation_get('host', rid=rid, unit=unit) - rel_user = relation_get('user', rid=rid, unit=unit) - rel_passwd = relation_get('password', rid=rid, unit=unit) - ctxt = {'database_host': rel_host, - 'database': self.database, - 'database_user': rel_user, - 'database_password': rel_passwd, - 'database_type': 'postgresql'} - if self.context_complete(ctxt): - return ctxt - - return {} - - -def db_ssl(rdata, ctxt, ssl_dir): - if 'ssl_ca' in rdata and ssl_dir: - ca_path = os.path.join(ssl_dir, 'db-client.ca') - with open(ca_path, 'wb') as fh: - fh.write(b64decode(rdata['ssl_ca'])) - - ctxt['database_ssl_ca'] = ca_path - elif 'ssl_ca' in rdata: - log("Charm not setup for ssl support but ssl ca found", level=INFO) - return ctxt - - if 'ssl_cert' in rdata: - cert_path = os.path.join( - ssl_dir, 'db-client.cert') - if not os.path.exists(cert_path): - log("Waiting 1m for ssl client cert validity", level=INFO) - time.sleep(60) - - with open(cert_path, 'wb') as fh: - fh.write(b64decode(rdata['ssl_cert'])) - - ctxt['database_ssl_cert'] = cert_path - key_path = os.path.join(ssl_dir, 'db-client.key') - with open(key_path, 'wb') as fh: - fh.write(b64decode(rdata['ssl_key'])) - - ctxt['database_ssl_key'] = key_path - - return ctxt - - -class IdentityServiceContext(OSContextGenerator): - - def __init__(self, - service=None, - service_user=None, - rel_name='identity-service'): - self.service = service - self.service_user = service_user - self.rel_name = rel_name - self.interfaces = [self.rel_name] - - def _setup_pki_cache(self): - if self.service and self.service_user: - # This is required for pki token signing if we don't want /tmp to - # be used. - cachedir = '/var/cache/%s' % (self.service) - if not os.path.isdir(cachedir): - log("Creating service cache dir %s" % (cachedir), level=DEBUG) - mkdir(path=cachedir, owner=self.service_user, - group=self.service_user, perms=0o700) - - return cachedir - return None - - def __call__(self): - log('Generating template context for ' + self.rel_name, level=DEBUG) - ctxt = {} - - cachedir = self._setup_pki_cache() - if cachedir: - ctxt['signing_dir'] = cachedir - - for rid in relation_ids(self.rel_name): - self.related = True - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - serv_host = rdata.get('service_host') - serv_host = format_ipv6_addr(serv_host) or serv_host - auth_host = rdata.get('auth_host') - auth_host = format_ipv6_addr(auth_host) or auth_host - svc_protocol = rdata.get('service_protocol') or 'http' - auth_protocol = rdata.get('auth_protocol') or 'http' - api_version = rdata.get('api_version') or '2.0' - ctxt.update({'service_port': rdata.get('service_port'), - 'service_host': serv_host, - 'auth_host': auth_host, - 'auth_port': rdata.get('auth_port'), - 'admin_tenant_name': rdata.get('service_tenant'), - 'admin_user': rdata.get('service_username'), - 'admin_password': rdata.get('service_password'), - 'service_protocol': svc_protocol, - 'auth_protocol': auth_protocol, - 'api_version': api_version}) - - if float(api_version) > 2: - ctxt.update({'admin_domain_name': - rdata.get('service_domain')}) - - if self.context_complete(ctxt): - # NOTE(jamespage) this is required for >= icehouse - # so a missing value just indicates keystone needs - # upgrading - ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') - ctxt['admin_domain_id'] = rdata.get('service_domain_id') - return ctxt - - return {} - - -class IdentityCredentialsContext(IdentityServiceContext): - '''Context for identity-credentials interface type''' - - def __init__(self, - service=None, - service_user=None, - rel_name='identity-credentials'): - super(IdentityCredentialsContext, self).__init__(service, - service_user, - rel_name) - - def __call__(self): - log('Generating template context for ' + self.rel_name, level=DEBUG) - ctxt = {} - - cachedir = self._setup_pki_cache() - if cachedir: - ctxt['signing_dir'] = cachedir - - for rid in relation_ids(self.rel_name): - self.related = True - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - credentials_host = rdata.get('credentials_host') - credentials_host = ( - format_ipv6_addr(credentials_host) or credentials_host - ) - auth_host = rdata.get('auth_host') - auth_host = format_ipv6_addr(auth_host) or auth_host - svc_protocol = rdata.get('credentials_protocol') or 'http' - auth_protocol = rdata.get('auth_protocol') or 'http' - api_version = rdata.get('api_version') or '2.0' - ctxt.update({ - 'service_port': rdata.get('credentials_port'), - 'service_host': credentials_host, - 'auth_host': auth_host, - 'auth_port': rdata.get('auth_port'), - 'admin_tenant_name': rdata.get('credentials_project'), - 'admin_tenant_id': rdata.get('credentials_project_id'), - 'admin_user': rdata.get('credentials_username'), - 'admin_password': rdata.get('credentials_password'), - 'service_protocol': svc_protocol, - 'auth_protocol': auth_protocol, - 'api_version': api_version - }) - - if float(api_version) > 2: - ctxt.update({'admin_domain_name': - rdata.get('domain')}) - - if self.context_complete(ctxt): - return ctxt - - return {} - - -class AMQPContext(OSContextGenerator): - - def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None, - relation_id=None): - self.ssl_dir = ssl_dir - self.rel_name = rel_name - self.relation_prefix = relation_prefix - self.interfaces = [rel_name] - self.relation_id = relation_id - - def __call__(self): - log('Generating template context for amqp', level=DEBUG) - conf = config() - if self.relation_prefix: - user_setting = '%s-rabbit-user' % (self.relation_prefix) - vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) - else: - user_setting = 'rabbit-user' - vhost_setting = 'rabbit-vhost' - - try: - username = conf[user_setting] - vhost = conf[vhost_setting] - except KeyError as e: - log('Could not generate shared_db context. Missing required charm ' - 'config options: %s.' % e, level=ERROR) - raise OSContextError - - ctxt = {} - if self.relation_id: - rids = [self.relation_id] - else: - rids = relation_ids(self.rel_name) - for rid in rids: - ha_vip_only = False - self.related = True - transport_hosts = None - rabbitmq_port = '5672' - for unit in related_units(rid): - if relation_get('clustered', rid=rid, unit=unit): - ctxt['clustered'] = True - vip = relation_get('vip', rid=rid, unit=unit) - vip = format_ipv6_addr(vip) or vip - ctxt['rabbitmq_host'] = vip - transport_hosts = [vip] - else: - host = relation_get('private-address', rid=rid, unit=unit) - host = format_ipv6_addr(host) or host - ctxt['rabbitmq_host'] = host - transport_hosts = [host] - - ctxt.update({ - 'rabbitmq_user': username, - 'rabbitmq_password': relation_get('password', rid=rid, - unit=unit), - 'rabbitmq_virtual_host': vhost, - }) - - ssl_port = relation_get('ssl_port', rid=rid, unit=unit) - if ssl_port: - ctxt['rabbit_ssl_port'] = ssl_port - rabbitmq_port = ssl_port - - ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) - if ssl_ca: - ctxt['rabbit_ssl_ca'] = ssl_ca - - if relation_get('ha_queues', rid=rid, unit=unit) is not None: - ctxt['rabbitmq_ha_queues'] = True - - ha_vip_only = relation_get('ha-vip-only', - rid=rid, unit=unit) is not None - - if self.context_complete(ctxt): - if 'rabbit_ssl_ca' in ctxt: - if not self.ssl_dir: - log("Charm not setup for ssl support but ssl ca " - "found", level=INFO) - break - - ca_path = os.path.join( - self.ssl_dir, 'rabbit-client-ca.pem') - with open(ca_path, 'wb') as fh: - fh.write(b64decode(ctxt['rabbit_ssl_ca'])) - ctxt['rabbit_ssl_ca'] = ca_path - - # Sufficient information found = break out! - break - - # Used for active/active rabbitmq >= grizzly - if (('clustered' not in ctxt or ha_vip_only) and - len(related_units(rid)) > 1): - rabbitmq_hosts = [] - for unit in related_units(rid): - host = relation_get('private-address', rid=rid, unit=unit) - host = format_ipv6_addr(host) or host - rabbitmq_hosts.append(host) - - rabbitmq_hosts = sorted(rabbitmq_hosts) - ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) - transport_hosts = rabbitmq_hosts - - if transport_hosts: - transport_url_hosts = ','.join([ - "{}:{}@{}:{}".format(ctxt['rabbitmq_user'], - ctxt['rabbitmq_password'], - host_, - rabbitmq_port) - for host_ in transport_hosts]) - ctxt['transport_url'] = "rabbit://{}/{}".format( - transport_url_hosts, vhost) - - oslo_messaging_flags = conf.get('oslo-messaging-flags', None) - if oslo_messaging_flags: - ctxt['oslo_messaging_flags'] = config_flags_parser( - oslo_messaging_flags) - - if not self.complete: - return {} - - return ctxt - - -class CephContext(OSContextGenerator): - """Generates context for /etc/ceph/ceph.conf templates.""" - interfaces = ['ceph'] - - def __call__(self): - if not relation_ids('ceph'): - return {} - - log('Generating template context for ceph', level=DEBUG) - mon_hosts = [] - ctxt = { - 'use_syslog': str(config('use-syslog')).lower() - } - for rid in relation_ids('ceph'): - for unit in related_units(rid): - if not ctxt.get('auth'): - ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) - if not ctxt.get('key'): - ctxt['key'] = relation_get('key', rid=rid, unit=unit) - if not ctxt.get('rbd_features'): - default_features = relation_get('rbd-features', rid=rid, unit=unit) - if default_features is not None: - ctxt['rbd_features'] = default_features - - ceph_addrs = relation_get('ceph-public-address', rid=rid, - unit=unit) - if ceph_addrs: - for addr in ceph_addrs.split(' '): - mon_hosts.append(format_ipv6_addr(addr) or addr) - else: - priv_addr = relation_get('private-address', rid=rid, - unit=unit) - mon_hosts.append(format_ipv6_addr(priv_addr) or priv_addr) - - ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) - - if not os.path.isdir('/etc/ceph'): - os.mkdir('/etc/ceph') - - if not self.context_complete(ctxt): - return {} - - ensure_packages(['ceph-common']) - return ctxt - - -class HAProxyContext(OSContextGenerator): - """Provides half a context for the haproxy template, which describes - all peers to be included in the cluster. Each charm needs to include - its own context generator that describes the port mapping. - - :side effect: mkdir is called on HAPROXY_RUN_DIR - """ - interfaces = ['cluster'] - - def __init__(self, singlenode_mode=False, - address_types=ADDRESS_TYPES): - self.address_types = address_types - self.singlenode_mode = singlenode_mode - - def __call__(self): - if not os.path.isdir(HAPROXY_RUN_DIR): - mkdir(path=HAPROXY_RUN_DIR) - if not relation_ids('cluster') and not self.singlenode_mode: - return {} - - l_unit = local_unit().replace('/', '-') - cluster_hosts = {} - - # NOTE(jamespage): build out map of configured network endpoints - # and associated backends - for addr_type in self.address_types: - cfg_opt = 'os-{}-network'.format(addr_type) - # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather - # than 'internal' - if addr_type == 'internal': - _addr_map_type = INTERNAL - else: - _addr_map_type = addr_type - # Network spaces aware - laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'], - config(cfg_opt)) - if laddr: - netmask = get_netmask_for_address(laddr) - cluster_hosts[laddr] = { - 'network': "{}/{}".format(laddr, - netmask), - 'backends': collections.OrderedDict([(l_unit, - laddr)]) - } - for rid in relation_ids('cluster'): - for unit in sorted(related_units(rid)): - # API Charms will need to set {addr_type}-address with - # get_relation_ip(addr_type) - _laddr = relation_get('{}-address'.format(addr_type), - rid=rid, unit=unit) - if _laddr: - _unit = unit.replace('/', '-') - cluster_hosts[laddr]['backends'][_unit] = _laddr - - # NOTE(jamespage) add backend based on get_relation_ip - this - # will either be the only backend or the fallback if no acls - # match in the frontend - # Network spaces aware - addr = get_relation_ip('cluster') - cluster_hosts[addr] = {} - netmask = get_netmask_for_address(addr) - cluster_hosts[addr] = { - 'network': "{}/{}".format(addr, netmask), - 'backends': collections.OrderedDict([(l_unit, - addr)]) - } - for rid in relation_ids('cluster'): - for unit in sorted(related_units(rid)): - # API Charms will need to set their private-address with - # get_relation_ip('cluster') - _laddr = relation_get('private-address', - rid=rid, unit=unit) - if _laddr: - _unit = unit.replace('/', '-') - cluster_hosts[addr]['backends'][_unit] = _laddr - - ctxt = { - 'frontends': cluster_hosts, - 'default_backend': addr - } - - if config('haproxy-server-timeout'): - ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') - - if config('haproxy-client-timeout'): - ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') - - if config('haproxy-queue-timeout'): - ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout') - - if config('haproxy-connect-timeout'): - ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') - - if config('prefer-ipv6'): - ctxt['local_host'] = 'ip6-localhost' - ctxt['haproxy_host'] = '::' - else: - ctxt['local_host'] = '127.0.0.1' - ctxt['haproxy_host'] = '0.0.0.0' - - ctxt['ipv6_enabled'] = not is_ipv6_disabled() - - ctxt['stat_port'] = '8888' - - db = kv() - ctxt['stat_password'] = db.get('stat-password') - if not ctxt['stat_password']: - ctxt['stat_password'] = db.set('stat-password', - pwgen(32)) - db.flush() - - for frontend in cluster_hosts: - if (len(cluster_hosts[frontend]['backends']) > 1 or - self.singlenode_mode): - # Enable haproxy when we have enough peers. - log('Ensuring haproxy enabled in /etc/default/haproxy.', - level=DEBUG) - with open('/etc/default/haproxy', 'w') as out: - out.write('ENABLED=1\n') - - return ctxt - - log('HAProxy context is incomplete, this unit has no peers.', - level=INFO) - return {} - - -class ImageServiceContext(OSContextGenerator): - interfaces = ['image-service'] - - def __call__(self): - """Obtains the glance API server from the image-service relation. - Useful in nova and cinder (currently). - """ - log('Generating template context for image-service.', level=DEBUG) - rids = relation_ids('image-service') - if not rids: - return {} - - for rid in rids: - for unit in related_units(rid): - api_server = relation_get('glance-api-server', - rid=rid, unit=unit) - if api_server: - return {'glance_api_servers': api_server} - - log("ImageService context is incomplete. Missing required relation " - "data.", level=INFO) - return {} - - -class ApacheSSLContext(OSContextGenerator): - """Generates a context for an apache vhost configuration that configures - HTTPS reverse proxying for one or many endpoints. Generated context - looks something like:: - - { - 'namespace': 'cinder', - 'private_address': 'iscsi.mycinderhost.com', - 'endpoints': [(8776, 8766), (8777, 8767)] - } - - The endpoints list consists of a tuples mapping external ports - to internal ports. - """ - interfaces = ['https'] - - # charms should inherit this context and set external ports - # and service namespace accordingly. - external_ports = [] - service_namespace = None - - def enable_modules(self): - cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers'] - check_call(cmd) - - def configure_cert(self, cn=None): - ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) - mkdir(path=ssl_dir) - cert, key = get_cert(cn) - if cert and key: - if cn: - cert_filename = 'cert_{}'.format(cn) - key_filename = 'key_{}'.format(cn) - else: - cert_filename = 'cert' - key_filename = 'key' - - write_file(path=os.path.join(ssl_dir, cert_filename), - content=b64decode(cert), perms=0o640) - write_file(path=os.path.join(ssl_dir, key_filename), - content=b64decode(key), perms=0o640) - - def configure_ca(self): - ca_cert = get_ca_cert() - if ca_cert: - install_ca_cert(b64decode(ca_cert)) - - def canonical_names(self): - """Figure out which canonical names clients will access this service. - """ - cns = [] - for r_id in relation_ids('identity-service'): - for unit in related_units(r_id): - rdata = relation_get(rid=r_id, unit=unit) - for k in rdata: - if k.startswith('ssl_key_'): - cns.append(k.lstrip('ssl_key_')) - - return sorted(list(set(cns))) - - def get_network_addresses(self): - """For each network configured, return corresponding address and - hostnamr or vip (if available). - - Returns a list of tuples of the form: - - [(address_in_net_a, hostname_in_net_a), - (address_in_net_b, hostname_in_net_b), - ...] - - or, if no hostnames(s) available: - - [(address_in_net_a, vip_in_net_a), - (address_in_net_b, vip_in_net_b), - ...] - - or, if no vip(s) available: - - [(address_in_net_a, address_in_net_a), - (address_in_net_b, address_in_net_b), - ...] - """ - addresses = [] - for net_type in [INTERNAL, ADMIN, PUBLIC]: - net_config = config(ADDRESS_MAP[net_type]['config']) - # NOTE(jamespage): Fallback must always be private address - # as this is used to bind services on the - # local unit. - fallback = unit_get("private-address") - if net_config: - addr = get_address_in_network(net_config, - fallback) - else: - try: - addr = network_get_primary_address( - ADDRESS_MAP[net_type]['binding'] - ) - except NotImplementedError: - addr = fallback - - endpoint = resolve_address(net_type) - addresses.append((addr, endpoint)) - - return sorted(set(addresses)) - - def __call__(self): - if isinstance(self.external_ports, six.string_types): - self.external_ports = [self.external_ports] - - if not self.external_ports or not https(): - return {} - - use_keystone_ca = True - for rid in relation_ids('certificates'): - if related_units(rid): - use_keystone_ca = False - - if use_keystone_ca: - self.configure_ca() - - self.enable_modules() - - ctxt = {'namespace': self.service_namespace, - 'endpoints': [], - 'ext_ports': []} - - if use_keystone_ca: - cns = self.canonical_names() - if cns: - for cn in cns: - self.configure_cert(cn) - else: - # Expect cert/key provided in config (currently assumed that ca - # uses ip for cn) - for net_type in (INTERNAL, ADMIN, PUBLIC): - cn = resolve_address(endpoint_type=net_type) - self.configure_cert(cn) - - addresses = self.get_network_addresses() - for address, endpoint in addresses: - for api_port in self.external_ports: - ext_port = determine_apache_port(api_port, - singlenode_mode=True) - int_port = determine_api_port(api_port, singlenode_mode=True) - portmap = (address, endpoint, int(ext_port), int(int_port)) - ctxt['endpoints'].append(portmap) - ctxt['ext_ports'].append(int(ext_port)) - - ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) - return ctxt - - -class NeutronContext(OSContextGenerator): - interfaces = [] - - @property - def plugin(self): - return None - - @property - def network_manager(self): - return None - - @property - def packages(self): - return neutron_plugin_attribute(self.plugin, 'packages', - self.network_manager) - - @property - def neutron_security_groups(self): - return None - - def _ensure_packages(self): - for pkgs in self.packages: - ensure_packages(pkgs) - - def ovs_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - ovs_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'ovs', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return ovs_ctxt - - def nuage_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - nuage_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'vsp', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return nuage_ctxt - - def nvp_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - nvp_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'nvp', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return nvp_ctxt - - def n1kv_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - n1kv_config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - n1kv_user_config_flags = config('n1kv-config-flags') - restrict_policy_profiles = config('n1kv-restrict-policy-profiles') - n1kv_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'n1kv', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': n1kv_config, - 'vsm_ip': config('n1kv-vsm-ip'), - 'vsm_username': config('n1kv-vsm-username'), - 'vsm_password': config('n1kv-vsm-password'), - 'restrict_policy_profiles': restrict_policy_profiles} - - if n1kv_user_config_flags: - flags = config_flags_parser(n1kv_user_config_flags) - n1kv_ctxt['user_config_flags'] = flags - - return n1kv_ctxt - - def calico_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - calico_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'Calico', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return calico_ctxt - - def neutron_ctxt(self): - if https(): - proto = 'https' - else: - proto = 'http' - - if is_clustered(): - host = config('vip') - else: - host = unit_get('private-address') - - ctxt = {'network_manager': self.network_manager, - 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} - return ctxt - - def pg_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - ovs_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'plumgrid', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - return ovs_ctxt - - def midonet_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - midonet_config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - mido_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'midonet', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': midonet_config} - - return mido_ctxt - - def __call__(self): - if self.network_manager not in ['quantum', 'neutron']: - return {} - - if not self.plugin: - return {} - - ctxt = self.neutron_ctxt() - - if self.plugin == 'ovs': - ctxt.update(self.ovs_ctxt()) - elif self.plugin in ['nvp', 'nsx']: - ctxt.update(self.nvp_ctxt()) - elif self.plugin == 'n1kv': - ctxt.update(self.n1kv_ctxt()) - elif self.plugin == 'Calico': - ctxt.update(self.calico_ctxt()) - elif self.plugin == 'vsp': - ctxt.update(self.nuage_ctxt()) - elif self.plugin == 'plumgrid': - ctxt.update(self.pg_ctxt()) - elif self.plugin == 'midonet': - ctxt.update(self.midonet_ctxt()) - - alchemy_flags = config('neutron-alchemy-flags') - if alchemy_flags: - flags = config_flags_parser(alchemy_flags) - ctxt['neutron_alchemy_flags'] = flags - - return ctxt - - -class NeutronPortContext(OSContextGenerator): - - def resolve_ports(self, ports): - """Resolve NICs not yet bound to bridge(s) - - If hwaddress provided then returns resolved hwaddress otherwise NIC. - """ - if not ports: - return None - - hwaddr_to_nic = {} - hwaddr_to_ip = {} - for nic in list_nics(): - # Ignore virtual interfaces (bond masters will be identified from - # their slaves) - if not is_phy_iface(nic): - continue - - _nic = get_bond_master(nic) - if _nic: - log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), - level=DEBUG) - nic = _nic - - hwaddr = get_nic_hwaddr(nic) - hwaddr_to_nic[hwaddr] = nic - addresses = get_ipv4_addr(nic, fatal=False) - addresses += get_ipv6_addr(iface=nic, fatal=False) - hwaddr_to_ip[hwaddr] = addresses - - resolved = [] - mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) - for entry in ports: - if re.match(mac_regex, entry): - # NIC is in known NICs and does NOT hace an IP address - if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: - # If the nic is part of a bridge then don't use it - if is_bridge_member(hwaddr_to_nic[entry]): - continue - - # Entry is a MAC address for a valid interface that doesn't - # have an IP address assigned yet. - resolved.append(hwaddr_to_nic[entry]) - else: - # If the passed entry is not a MAC address, assume it's a valid - # interface, and that the user put it there on purpose (we can - # trust it to be the real external network). - resolved.append(entry) - - # Ensure no duplicates - return list(set(resolved)) - - -class OSConfigFlagContext(OSContextGenerator): - """Provides support for user-defined config flags. - - Users can define a comma-seperated list of key=value pairs - in the charm configuration and apply them at any point in - any file by using a template flag. - - Sometimes users might want config flags inserted within a - specific section so this class allows users to specify the - template flag name, allowing for multiple template flags - (sections) within the same context. - - NOTE: the value of config-flags may be a comma-separated list of - key=value pairs and some Openstack config files support - comma-separated lists as values. - """ - - def __init__(self, charm_flag='config-flags', - template_flag='user_config_flags'): - """ - :param charm_flag: config flags in charm configuration. - :param template_flag: insert point for user-defined flags in template - file. - """ - super(OSConfigFlagContext, self).__init__() - self._charm_flag = charm_flag - self._template_flag = template_flag - - def __call__(self): - config_flags = config(self._charm_flag) - if not config_flags: - return {} - - return {self._template_flag: - config_flags_parser(config_flags)} - - -class LibvirtConfigFlagsContext(OSContextGenerator): - """ - This context provides support for extending - the libvirt section through user-defined flags. - """ - def __call__(self): - ctxt = {} - libvirt_flags = config('libvirt-flags') - if libvirt_flags: - ctxt['libvirt_flags'] = config_flags_parser( - libvirt_flags) - return ctxt - - -class SubordinateConfigContext(OSContextGenerator): - - """ - Responsible for inspecting relations to subordinates that - may be exporting required config via a json blob. - - The subordinate interface allows subordinates to export their - configuration requirements to the principle for multiple config - files and multiple services. Ie, a subordinate that has interfaces - to both glance and nova may export to following yaml blob as json:: - - glance: - /etc/glance/glance-api.conf: - sections: - DEFAULT: - - [key1, value1] - /etc/glance/glance-registry.conf: - MYSECTION: - - [key2, value2] - nova: - /etc/nova/nova.conf: - sections: - DEFAULT: - - [key3, value3] - - - It is then up to the principle charms to subscribe this context to - the service+config file it is interestd in. Configuration data will - be available in the template context, in glance's case, as:: - - ctxt = { - ... other context ... - 'subordinate_configuration': { - 'DEFAULT': { - 'key1': 'value1', - }, - 'MYSECTION': { - 'key2': 'value2', - }, - } - } - """ - - def __init__(self, service, config_file, interface): - """ - :param service : Service name key to query in any subordinate - data found - :param config_file : Service's config file to query sections - :param interface : Subordinate interface to inspect - """ - self.config_file = config_file - if isinstance(service, list): - self.services = service - else: - self.services = [service] - if isinstance(interface, list): - self.interfaces = interface - else: - self.interfaces = [interface] - - def __call__(self): - ctxt = {'sections': {}} - rids = [] - for interface in self.interfaces: - rids.extend(relation_ids(interface)) - for rid in rids: - for unit in related_units(rid): - sub_config = relation_get('subordinate_configuration', - rid=rid, unit=unit) - if sub_config and sub_config != '': - try: - sub_config = json.loads(sub_config) - except Exception: - log('Could not parse JSON from ' - 'subordinate_configuration setting from %s' - % rid, level=ERROR) - continue - - for service in self.services: - if service not in sub_config: - log('Found subordinate_configuration on %s but it ' - 'contained nothing for %s service' - % (rid, service), level=INFO) - continue - - sub_config = sub_config[service] - if self.config_file not in sub_config: - log('Found subordinate_configuration on %s but it ' - 'contained nothing for %s' - % (rid, self.config_file), level=INFO) - continue - - sub_config = sub_config[self.config_file] - for k, v in six.iteritems(sub_config): - if k == 'sections': - for section, config_list in six.iteritems(v): - log("adding section '%s'" % (section), - level=DEBUG) - if ctxt[k].get(section): - ctxt[k][section].extend(config_list) - else: - ctxt[k][section] = config_list - else: - ctxt[k] = v - log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) - return ctxt - - -class LogLevelContext(OSContextGenerator): - - def __call__(self): - ctxt = {} - ctxt['debug'] = \ - False if config('debug') is None else config('debug') - ctxt['verbose'] = \ - False if config('verbose') is None else config('verbose') - - return ctxt - - -class SyslogContext(OSContextGenerator): - - def __call__(self): - ctxt = {'use_syslog': config('use-syslog')} - return ctxt - - -class BindHostContext(OSContextGenerator): - - def __call__(self): - if config('prefer-ipv6'): - return {'bind_host': '::'} - else: - return {'bind_host': '0.0.0.0'} - - -MAX_DEFAULT_WORKERS = 4 -DEFAULT_MULTIPLIER = 2 - - -def _calculate_workers(): - ''' - Determine the number of worker processes based on the CPU - count of the unit containing the application. - - Workers will be limited to MAX_DEFAULT_WORKERS in - container environments where no worker-multipler configuration - option been set. - - @returns int: number of worker processes to use - ''' - multiplier = config('worker-multiplier') or DEFAULT_MULTIPLIER - count = int(_num_cpus() * multiplier) - if multiplier > 0 and count == 0: - count = 1 - - if config('worker-multiplier') is None and is_container(): - # NOTE(jamespage): Limit unconfigured worker-multiplier - # to MAX_DEFAULT_WORKERS to avoid insane - # worker configuration in LXD containers - # on large servers - # Reference: https://pad.lv/1665270 - count = min(count, MAX_DEFAULT_WORKERS) - - return count - - -def _num_cpus(): - ''' - Compatibility wrapper for calculating the number of CPU's - a unit has. - - @returns: int: number of CPU cores detected - ''' - try: - return psutil.cpu_count() - except AttributeError: - return psutil.NUM_CPUS - - -class WorkerConfigContext(OSContextGenerator): - - def __call__(self): - ctxt = {"workers": _calculate_workers()} - return ctxt - - -class WSGIWorkerConfigContext(WorkerConfigContext): - - def __init__(self, name=None, script=None, admin_script=None, - public_script=None, process_weight=1.00, - admin_process_weight=0.25, public_process_weight=0.75): - self.service_name = name - self.user = name - self.group = name - self.script = script - self.admin_script = admin_script - self.public_script = public_script - self.process_weight = process_weight - self.admin_process_weight = admin_process_weight - self.public_process_weight = public_process_weight - - def __call__(self): - total_processes = _calculate_workers() - ctxt = { - "service_name": self.service_name, - "user": self.user, - "group": self.group, - "script": self.script, - "admin_script": self.admin_script, - "public_script": self.public_script, - "processes": int(math.ceil(self.process_weight * total_processes)), - "admin_processes": int(math.ceil(self.admin_process_weight * - total_processes)), - "public_processes": int(math.ceil(self.public_process_weight * - total_processes)), - "threads": 1, - } - return ctxt - - -class ZeroMQContext(OSContextGenerator): - interfaces = ['zeromq-configuration'] - - def __call__(self): - ctxt = {} - if is_relation_made('zeromq-configuration', 'host'): - for rid in relation_ids('zeromq-configuration'): - for unit in related_units(rid): - ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) - ctxt['zmq_host'] = relation_get('host', unit, rid) - ctxt['zmq_redis_address'] = relation_get( - 'zmq_redis_address', unit, rid) - - return ctxt - - -class NotificationDriverContext(OSContextGenerator): - - def __init__(self, zmq_relation='zeromq-configuration', - amqp_relation='amqp'): - """ - :param zmq_relation: Name of Zeromq relation to check - """ - self.zmq_relation = zmq_relation - self.amqp_relation = amqp_relation - - def __call__(self): - ctxt = {'notifications': 'False'} - if is_relation_made(self.amqp_relation): - ctxt['notifications'] = "True" - - return ctxt - - -class SysctlContext(OSContextGenerator): - """This context check if the 'sysctl' option exists on configuration - then creates a file with the loaded contents""" - def __call__(self): - sysctl_dict = config('sysctl') - if sysctl_dict: - sysctl_create(sysctl_dict, - '/etc/sysctl.d/50-{0}.conf'.format(charm_name())) - return {'sysctl': sysctl_dict} - - -class NeutronAPIContext(OSContextGenerator): - ''' - Inspects current neutron-plugin-api relation for neutron settings. Return - defaults if it is not present. - ''' - interfaces = ['neutron-plugin-api'] - - def __call__(self): - self.neutron_defaults = { - 'l2_population': { - 'rel_key': 'l2-population', - 'default': False, - }, - 'overlay_network_type': { - 'rel_key': 'overlay-network-type', - 'default': 'gre', - }, - 'neutron_security_groups': { - 'rel_key': 'neutron-security-groups', - 'default': False, - }, - 'network_device_mtu': { - 'rel_key': 'network-device-mtu', - 'default': None, - }, - 'enable_dvr': { - 'rel_key': 'enable-dvr', - 'default': False, - }, - 'enable_l3ha': { - 'rel_key': 'enable-l3ha', - 'default': False, - }, - 'dns_domain': { - 'rel_key': 'dns-domain', - 'default': None, - }, - 'polling_interval': { - 'rel_key': 'polling-interval', - 'default': 2, - }, - 'rpc_response_timeout': { - 'rel_key': 'rpc-response-timeout', - 'default': 60, - }, - 'report_interval': { - 'rel_key': 'report-interval', - 'default': 30, - }, - 'enable_qos': { - 'rel_key': 'enable-qos', - 'default': False, - }, - } - ctxt = self.get_neutron_options({}) - for rid in relation_ids('neutron-plugin-api'): - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - # The l2-population key is used by the context as a way of - # checking if the api service on the other end is sending data - # in a recent format. - if 'l2-population' in rdata: - ctxt.update(self.get_neutron_options(rdata)) - - if ctxt['enable_qos']: - ctxt['extension_drivers'] = 'qos' - else: - ctxt['extension_drivers'] = '' - - return ctxt - - def get_neutron_options(self, rdata): - settings = {} - for nkey in self.neutron_defaults.keys(): - defv = self.neutron_defaults[nkey]['default'] - rkey = self.neutron_defaults[nkey]['rel_key'] - if rkey in rdata.keys(): - if type(defv) is bool: - settings[nkey] = bool_from_string(rdata[rkey]) - else: - settings[nkey] = rdata[rkey] - else: - settings[nkey] = defv - return settings - - -class ExternalPortContext(NeutronPortContext): - - def __call__(self): - ctxt = {} - ports = config('ext-port') - if ports: - ports = [p.strip() for p in ports.split()] - ports = self.resolve_ports(ports) - if ports: - ctxt = {"ext_port": ports[0]} - napi_settings = NeutronAPIContext()() - mtu = napi_settings.get('network_device_mtu') - if mtu: - ctxt['ext_port_mtu'] = mtu - - return ctxt - - -class DataPortContext(NeutronPortContext): - - def __call__(self): - ports = config('data-port') - if ports: - # Map of {port/mac:bridge} - portmap = parse_data_port_mappings(ports) - ports = portmap.keys() - # Resolve provided ports or mac addresses and filter out those - # already attached to a bridge. - resolved = self.resolve_ports(ports) - # FIXME: is this necessary? - normalized = {get_nic_hwaddr(port): port for port in resolved - if port not in ports} - normalized.update({port: port for port in resolved - if port in ports}) - if resolved: - return {normalized[port]: bridge for port, bridge in - six.iteritems(portmap) if port in normalized.keys()} - - return None - - -class PhyNICMTUContext(DataPortContext): - - def __call__(self): - ctxt = {} - mappings = super(PhyNICMTUContext, self).__call__() - if mappings and mappings.keys(): - ports = sorted(mappings.keys()) - napi_settings = NeutronAPIContext()() - mtu = napi_settings.get('network_device_mtu') - all_ports = set() - # If any of ports is a vlan device, its underlying device must have - # mtu applied first. - for port in ports: - for lport in glob.glob("/sys/class/net/%s/lower_*" % port): - lport = os.path.basename(lport) - all_ports.add(lport.split('_')[1]) - - all_ports = list(all_ports) - all_ports.extend(ports) - if mtu: - ctxt["devs"] = '\\n'.join(all_ports) - ctxt['mtu'] = mtu - - return ctxt - - -class NetworkServiceContext(OSContextGenerator): - - def __init__(self, rel_name='quantum-network-service'): - self.rel_name = rel_name - self.interfaces = [rel_name] - - def __call__(self): - for rid in relation_ids(self.rel_name): - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - ctxt = { - 'keystone_host': rdata.get('keystone_host'), - 'service_port': rdata.get('service_port'), - 'auth_port': rdata.get('auth_port'), - 'service_tenant': rdata.get('service_tenant'), - 'service_username': rdata.get('service_username'), - 'service_password': rdata.get('service_password'), - 'quantum_host': rdata.get('quantum_host'), - 'quantum_port': rdata.get('quantum_port'), - 'quantum_url': rdata.get('quantum_url'), - 'region': rdata.get('region'), - 'service_protocol': - rdata.get('service_protocol') or 'http', - 'auth_protocol': - rdata.get('auth_protocol') or 'http', - 'api_version': - rdata.get('api_version') or '2.0', - } - if self.context_complete(ctxt): - return ctxt - return {} - - -class InternalEndpointContext(OSContextGenerator): - """Internal endpoint context. - - This context provides the endpoint type used for communication between - services e.g. between Nova and Cinder internally. Openstack uses Public - endpoints by default so this allows admins to optionally use internal - endpoints. - """ - def __call__(self): - return {'use_internal_endpoints': config('use-internal-endpoints')} - - -class VolumeAPIContext(InternalEndpointContext): - """Volume API context. - - This context provides information regarding the volume endpoint to use - when communicating between services. It determines which version of the - API is appropriate for use. - - This value will be determined in the resulting context dictionary - returned from calling the VolumeAPIContext object. Information provided - by this context is as follows: - - volume_api_version: the volume api version to use, currently - 'v2' or 'v3' - volume_catalog_info: the information to use for a cinder client - configuration that consumes API endpoints from the keystone - catalog. This is defined as the type:name:endpoint_type string. - """ - # FIXME(wolsen) This implementation is based on the provider being able - # to specify the package version to check but does not guarantee that the - # volume service api version selected is available. In practice, it is - # quite likely the volume service *is* providing the v3 volume service. - # This should be resolved when the service-discovery spec is implemented. - def __init__(self, pkg): - """ - Creates a new VolumeAPIContext for use in determining which version - of the Volume API should be used for communication. A package codename - should be supplied for determining the currently installed OpenStack - version. - - :param pkg: the package codename to use in order to determine the - component version (e.g. nova-common). See - charmhelpers.contrib.openstack.utils.PACKAGE_CODENAMES for more. - """ - super(VolumeAPIContext, self).__init__() - self._ctxt = None - if not pkg: - raise ValueError('package name must be provided in order to ' - 'determine current OpenStack version.') - self.pkg = pkg - - @property - def ctxt(self): - if self._ctxt is not None: - return self._ctxt - self._ctxt = self._determine_ctxt() - return self._ctxt - - def _determine_ctxt(self): - """Determines the Volume API endpoint information. - - Determines the appropriate version of the API that should be used - as well as the catalog_info string that would be supplied. Returns - a dict containing the volume_api_version and the volume_catalog_info. - """ - rel = os_release(self.pkg, base='icehouse') - version = '2' - if CompareOpenStackReleases(rel) >= 'pike': - version = '3' - - service_type = 'volumev{version}'.format(version=version) - service_name = 'cinderv{version}'.format(version=version) - endpoint_type = 'publicURL' - if config('use-internal-endpoints'): - endpoint_type = 'internalURL' - catalog_info = '{type}:{name}:{endpoint}'.format( - type=service_type, name=service_name, endpoint=endpoint_type) - - return { - 'volume_api_version': version, - 'volume_catalog_info': catalog_info, - } - - def __call__(self): - return self.ctxt - - -class AppArmorContext(OSContextGenerator): - """Base class for apparmor contexts.""" - - def __init__(self, profile_name=None): - self._ctxt = None - self.aa_profile = profile_name - self.aa_utils_packages = ['apparmor-utils'] - - @property - def ctxt(self): - if self._ctxt is not None: - return self._ctxt - self._ctxt = self._determine_ctxt() - return self._ctxt - - def _determine_ctxt(self): - """ - Validate aa-profile-mode settings is disable, enforce, or complain. - - :return ctxt: Dictionary of the apparmor profile or None - """ - if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: - ctxt = {'aa_profile_mode': config('aa-profile-mode'), - 'ubuntu_release': lsb_release()['DISTRIB_RELEASE']} - if self.aa_profile: - ctxt['aa_profile'] = self.aa_profile - else: - ctxt = None - return ctxt - - def __call__(self): - return self.ctxt - - def install_aa_utils(self): - """ - Install packages required for apparmor configuration. - """ - log("Installing apparmor utils.") - ensure_packages(self.aa_utils_packages) - - def manually_disable_aa_profile(self): - """ - Manually disable an apparmor profile. - - If aa-profile-mode is set to disabled (default) this is required as the - template has been written but apparmor is yet unaware of the profile - and aa-disable aa-profile fails. Without this the profile would kick - into enforce mode on the next service restart. - - """ - profile_path = '/etc/apparmor.d' - disable_path = '/etc/apparmor.d/disable' - if not os.path.lexists(os.path.join(disable_path, self.aa_profile)): - os.symlink(os.path.join(profile_path, self.aa_profile), - os.path.join(disable_path, self.aa_profile)) - - def setup_aa_profile(self): - """ - Setup an apparmor profile. - The ctxt dictionary will contain the apparmor profile mode and - the apparmor profile name. - Makes calls out to aa-disable, aa-complain, or aa-enforce to setup - the apparmor profile. - """ - self() - if not self.ctxt: - log("Not enabling apparmor Profile") - return - self.install_aa_utils() - cmd = ['aa-{}'.format(self.ctxt['aa_profile_mode'])] - cmd.append(self.ctxt['aa_profile']) - log("Setting up the apparmor profile for {} in {} mode." - "".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode'])) - try: - check_call(cmd) - except CalledProcessError as e: - # If aa-profile-mode is set to disabled (default) manual - # disabling is required as the template has been written but - # apparmor is yet unaware of the profile and aa-disable aa-profile - # fails. If aa-disable learns to read profile files first this can - # be removed. - if self.ctxt['aa_profile_mode'] == 'disable': - log("Manually disabling the apparmor profile for {}." - "".format(self.ctxt['aa_profile'])) - self.manually_disable_aa_profile() - return - status_set('blocked', "Apparmor profile {} failed to be set to {}." - "".format(self.ctxt['aa_profile'], - self.ctxt['aa_profile_mode'])) - raise e - - -class MemcacheContext(OSContextGenerator): - """Memcache context - - This context provides options for configuring a local memcache client and - server for both IPv4 and IPv6 - """ - - def __init__(self, package=None): - """ - @param package: Package to examine to extrapolate OpenStack release. - Used when charms have no openstack-origin config - option (ie subordinates) - """ - self.package = package - - def __call__(self): - ctxt = {} - ctxt['use_memcache'] = enable_memcache(package=self.package) - if ctxt['use_memcache']: - # Trusty version of memcached does not support ::1 as a listen - # address so use host file entry instead - release = lsb_release()['DISTRIB_CODENAME'].lower() - if is_ipv6_disabled(): - if CompareHostReleases(release) > 'trusty': - ctxt['memcache_server'] = '127.0.0.1' - else: - ctxt['memcache_server'] = 'localhost' - ctxt['memcache_server_formatted'] = '127.0.0.1' - ctxt['memcache_port'] = '11211' - ctxt['memcache_url'] = '{}:{}'.format( - ctxt['memcache_server_formatted'], - ctxt['memcache_port']) - else: - if CompareHostReleases(release) > 'trusty': - ctxt['memcache_server'] = '::1' - else: - ctxt['memcache_server'] = 'ip6-localhost' - ctxt['memcache_server_formatted'] = '[::1]' - ctxt['memcache_port'] = '11211' - ctxt['memcache_url'] = 'inet6:{}:{}'.format( - ctxt['memcache_server_formatted'], - ctxt['memcache_port']) - return ctxt - - -class EnsureDirContext(OSContextGenerator): - ''' - Serves as a generic context to create a directory as a side-effect. - - Useful for software that supports drop-in files (.d) in conjunction - with config option-based templates. Examples include: - * OpenStack oslo.policy drop-in files; - * systemd drop-in config files; - * other software that supports overriding defaults with .d files - - Another use-case is when a subordinate generates a configuration for - primary to render in a separate directory. - - Some software requires a user to create a target directory to be - scanned for drop-in files with a specific format. This is why this - context is needed to do that before rendering a template. - ''' - - def __init__(self, dirname, **kwargs): - '''Used merely to ensure that a given directory exists.''' - self.dirname = dirname - self.kwargs = kwargs - - def __call__(self): - mkdir(self.dirname, **self.kwargs) - return {} diff --git a/hooks/charmhelpers/contrib/openstack/exceptions.py b/hooks/charmhelpers/contrib/openstack/exceptions.py deleted file mode 100644 index f85ae4f..0000000 --- a/hooks/charmhelpers/contrib/openstack/exceptions.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class OSContextError(Exception): - """Raised when an error occurs during context generation. - - This exception is principally used in contrib.openstack.context - """ - pass diff --git a/hooks/charmhelpers/contrib/openstack/files/__init__.py b/hooks/charmhelpers/contrib/openstack/files/__init__.py deleted file mode 100644 index 9df5f74..0000000 --- a/hooks/charmhelpers/contrib/openstack/files/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# dummy __init__.py to fool syncer into thinking this is a syncable python -# module diff --git a/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh b/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh deleted file mode 100755 index 1df55db..0000000 --- a/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -#-------------------------------------------- -# This file is managed by Juju -#-------------------------------------------- -# -# Copyright 2009,2012 Canonical Ltd. -# Author: Tom Haddon - -CRITICAL=0 -NOTACTIVE='' -LOGFILE=/var/log/nagios/check_haproxy.log -AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}') - -typeset -i N_INSTANCES=0 -for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg) -do - N_INSTANCES=N_INSTANCES+1 - output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK') - if [ $? != 0 ]; then - date >> $LOGFILE - echo $output >> $LOGFILE - /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1 - CRITICAL=1 - NOTACTIVE="${NOTACTIVE} $appserver" - fi -done - -if [ $CRITICAL = 1 ]; then - echo "CRITICAL:${NOTACTIVE}" - exit 2 -fi - -echo "OK: All haproxy instances ($N_INSTANCES) looking good" -exit 0 diff --git a/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh deleted file mode 100755 index 91ce024..0000000 --- a/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -#-------------------------------------------- -# This file is managed by Juju -#-------------------------------------------- -# -# Copyright 2009,2012 Canonical Ltd. -# Author: Tom Haddon - -# These should be config options at some stage -CURRQthrsh=0 -MAXQthrsh=100 - -AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}') - -HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v) - -for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}') -do - CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3) - MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4) - - if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then - echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ" - exit 2 - fi -done - -echo "OK: All haproxy queue depths looking good" -exit 0 - diff --git a/hooks/charmhelpers/contrib/openstack/ha/__init__.py b/hooks/charmhelpers/contrib/openstack/ha/__init__.py deleted file mode 100644 index 9b088de..0000000 --- a/hooks/charmhelpers/contrib/openstack/ha/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/openstack/ha/utils.py b/hooks/charmhelpers/contrib/openstack/ha/utils.py deleted file mode 100644 index 6060ae5..0000000 --- a/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ /dev/null @@ -1,265 +0,0 @@ -# Copyright 2014-2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2016 Canonical Ltd. -# -# Authors: -# Openstack Charmers < -# - -""" -Helpers for high availability. -""" - -import json - -import re - -from charmhelpers.core.hookenv import ( - log, - relation_set, - charm_name, - config, - status_set, - DEBUG, - WARNING, -) - -from charmhelpers.core.host import ( - lsb_release -) - -from charmhelpers.contrib.openstack.ip import ( - resolve_address, - is_ipv6, -) - -from charmhelpers.contrib.network.ip import ( - get_iface_for_address, - get_netmask_for_address, -) - -from charmhelpers.contrib.hahelpers.cluster import ( - get_hacluster_config -) - -JSON_ENCODE_OPTIONS = dict( - sort_keys=True, - allow_nan=False, - indent=None, - separators=(',', ':'), -) - - -class DNSHAException(Exception): - """Raised when an error occurs setting up DNS HA - """ - - pass - - -def update_dns_ha_resource_params(resources, resource_params, - relation_id=None, - crm_ocf='ocf:maas:dns'): - """ Configure DNS-HA resources based on provided configuration and - update resource dictionaries for the HA relation. - - @param resources: Pointer to dictionary of resources. - Usually instantiated in ha_joined(). - @param resource_params: Pointer to dictionary of resource parameters. - Usually instantiated in ha_joined() - @param relation_id: Relation ID of the ha relation - @param crm_ocf: Corosync Open Cluster Framework resource agent to use for - DNS HA - """ - _relation_data = {'resources': {}, 'resource_params': {}} - update_hacluster_dns_ha(charm_name(), - _relation_data, - crm_ocf) - resources.update(_relation_data['resources']) - resource_params.update(_relation_data['resource_params']) - relation_set(relation_id=relation_id, groups=_relation_data['groups']) - - -def assert_charm_supports_dns_ha(): - """Validate prerequisites for DNS HA - The MAAS client is only available on Xenial or greater - - :raises DNSHAException: if release is < 16.04 - """ - if lsb_release().get('DISTRIB_RELEASE') < '16.04': - msg = ('DNS HA is only supported on 16.04 and greater ' - 'versions of Ubuntu.') - status_set('blocked', msg) - raise DNSHAException(msg) - return True - - -def expect_ha(): - """ Determine if the unit expects to be in HA - - Check for VIP or dns-ha settings which indicate the unit should expect to - be related to hacluster. - - @returns boolean - """ - return config('vip') or config('dns-ha') - - -def generate_ha_relation_data(service): - """ Generate relation data for ha relation - - Based on configuration options and unit interfaces, generate a json - encoded dict of relation data items for the hacluster relation, - providing configuration for DNS HA or VIP's + haproxy clone sets. - - @returns dict: json encoded data for use with relation_set - """ - _haproxy_res = 'res_{}_haproxy'.format(service) - _relation_data = { - 'resources': { - _haproxy_res: 'lsb:haproxy', - }, - 'resource_params': { - _haproxy_res: 'op monitor interval="5s"' - }, - 'init_services': { - _haproxy_res: 'haproxy' - }, - 'clones': { - 'cl_{}_haproxy'.format(service): _haproxy_res - }, - } - - if config('dns-ha'): - update_hacluster_dns_ha(service, _relation_data) - else: - update_hacluster_vip(service, _relation_data) - - return { - 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS) - for k, v in _relation_data.items() if v - } - - -def update_hacluster_dns_ha(service, relation_data, - crm_ocf='ocf:maas:dns'): - """ Configure DNS-HA resources based on provided configuration - - @param service: Name of the service being configured - @param relation_data: Pointer to dictionary of relation data. - @param crm_ocf: Corosync Open Cluster Framework resource agent to use for - DNS HA - """ - # Validate the charm environment for DNS HA - assert_charm_supports_dns_ha() - - settings = ['os-admin-hostname', 'os-internal-hostname', - 'os-public-hostname', 'os-access-hostname'] - - # Check which DNS settings are set and update dictionaries - hostname_group = [] - for setting in settings: - hostname = config(setting) - if hostname is None: - log('DNS HA: Hostname setting {} is None. Ignoring.' - ''.format(setting), - DEBUG) - continue - m = re.search('os-(.+?)-hostname', setting) - if m: - endpoint_type = m.group(1) - # resolve_address's ADDRESS_MAP uses 'int' not 'internal' - if endpoint_type == 'internal': - endpoint_type = 'int' - else: - msg = ('Unexpected DNS hostname setting: {}. ' - 'Cannot determine endpoint_type name' - ''.format(setting)) - status_set('blocked', msg) - raise DNSHAException(msg) - - hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type) - if hostname_key in hostname_group: - log('DNS HA: Resource {}: {} already exists in ' - 'hostname group - skipping'.format(hostname_key, hostname), - DEBUG) - continue - - hostname_group.append(hostname_key) - relation_data['resources'][hostname_key] = crm_ocf - relation_data['resource_params'][hostname_key] = ( - 'params fqdn="{}" ip_address="{}"' - .format(hostname, resolve_address(endpoint_type=endpoint_type, - override=False))) - - if len(hostname_group) >= 1: - log('DNS HA: Hostname group is set with {} as members. ' - 'Informing the ha relation'.format(' '.join(hostname_group)), - DEBUG) - relation_data['groups'] = { - 'grp_{}_hostnames'.format(service): ' '.join(hostname_group) - } - else: - msg = 'DNS HA: Hostname group has no members.' - status_set('blocked', msg) - raise DNSHAException(msg) - - -def update_hacluster_vip(service, relation_data): - """ Configure VIP resources based on provided configuration - - @param service: Name of the service being configured - @param relation_data: Pointer to dictionary of relation data. - """ - cluster_config = get_hacluster_config() - vip_group = [] - for vip in cluster_config['vip'].split(): - if is_ipv6(vip): - res_neutron_vip = 'ocf:heartbeat:IPv6addr' - vip_params = 'ipv6addr' - else: - res_neutron_vip = 'ocf:heartbeat:IPaddr2' - vip_params = 'ip' - - iface = (get_iface_for_address(vip) or - config('vip_iface')) - netmask = (get_netmask_for_address(vip) or - config('vip_cidr')) - - if iface is not None: - vip_key = 'res_{}_{}_vip'.format(service, iface) - if vip_key in vip_group: - if vip not in relation_data['resource_params'][vip_key]: - vip_key = '{}_{}'.format(vip_key, vip_params) - else: - log("Resource '%s' (vip='%s') already exists in " - "vip group - skipping" % (vip_key, vip), WARNING) - continue - - relation_data['resources'][vip_key] = res_neutron_vip - relation_data['resource_params'][vip_key] = ( - 'params {ip}="{vip}" cidr_netmask="{netmask}" ' - 'nic="{iface}"'.format(ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask) - ) - vip_group.append(vip_key) - - if len(vip_group) >= 1: - relation_data['groups'] = { - 'grp_{}_vips'.format(service): ' '.join(vip_group) - } diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py deleted file mode 100644 index 73102af..0000000 --- a/hooks/charmhelpers/contrib/openstack/ip.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.core.hookenv import ( - config, - unit_get, - service_name, - network_get_primary_address, -) -from charmhelpers.contrib.network.ip import ( - get_address_in_network, - is_address_in_network, - is_ipv6, - get_ipv6_addr, - resolve_network_cidr, -) -from charmhelpers.contrib.hahelpers.cluster import is_clustered - -PUBLIC = 'public' -INTERNAL = 'int' -ADMIN = 'admin' -ACCESS = 'access' - -ADDRESS_MAP = { - PUBLIC: { - 'binding': 'public', - 'config': 'os-public-network', - 'fallback': 'public-address', - 'override': 'os-public-hostname', - }, - INTERNAL: { - 'binding': 'internal', - 'config': 'os-internal-network', - 'fallback': 'private-address', - 'override': 'os-internal-hostname', - }, - ADMIN: { - 'binding': 'admin', - 'config': 'os-admin-network', - 'fallback': 'private-address', - 'override': 'os-admin-hostname', - }, - ACCESS: { - 'binding': 'access', - 'config': 'access-network', - 'fallback': 'private-address', - 'override': 'os-access-hostname', - }, -} - - -def canonical_url(configs, endpoint_type=PUBLIC): - """Returns the correct HTTP URL to this host given the state of HTTPS - configuration, hacluster and charm configuration. - - :param configs: OSTemplateRenderer config templating object to inspect - for a complete https context. - :param endpoint_type: str endpoint type to resolve. - :param returns: str base URL for services on the current service unit. - """ - scheme = _get_scheme(configs) - - address = resolve_address(endpoint_type) - if is_ipv6(address): - address = "[{}]".format(address) - - return '%s://%s' % (scheme, address) - - -def _get_scheme(configs): - """Returns the scheme to use for the url (either http or https) - depending upon whether https is in the configs value. - - :param configs: OSTemplateRenderer config templating object to inspect - for a complete https context. - :returns: either 'http' or 'https' depending on whether https is - configured within the configs context. - """ - scheme = 'http' - if configs and 'https' in configs.complete_contexts(): - scheme = 'https' - return scheme - - -def _get_address_override(endpoint_type=PUBLIC): - """Returns any address overrides that the user has defined based on the - endpoint type. - - Note: this function allows for the service name to be inserted into the - address if the user specifies {service_name}.somehost.org. - - :param endpoint_type: the type of endpoint to retrieve the override - value for. - :returns: any endpoint address or hostname that the user has overridden - or None if an override is not present. - """ - override_key = ADDRESS_MAP[endpoint_type]['override'] - addr_override = config(override_key) - if not addr_override: - return None - else: - return addr_override.format(service_name=service_name()) - - -def resolve_address(endpoint_type=PUBLIC, override=True): - """Return unit address depending on net config. - - If unit is clustered with vip(s) and has net splits defined, return vip on - correct network. If clustered with no nets defined, return primary vip. - - If not clustered, return unit address ensuring address is on configured net - split if one is configured, or a Juju 2.0 extra-binding has been used. - - :param endpoint_type: Network endpoing type - :param override: Accept hostname overrides or not - """ - resolved_address = None - if override: - resolved_address = _get_address_override(endpoint_type) - if resolved_address: - return resolved_address - - vips = config('vip') - if vips: - vips = vips.split() - - net_type = ADDRESS_MAP[endpoint_type]['config'] - net_addr = config(net_type) - net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] - binding = ADDRESS_MAP[endpoint_type]['binding'] - clustered = is_clustered() - - if clustered and vips: - if net_addr: - for vip in vips: - if is_address_in_network(net_addr, vip): - resolved_address = vip - break - else: - # NOTE: endeavour to check vips against network space - # bindings - try: - bound_cidr = resolve_network_cidr( - network_get_primary_address(binding) - ) - for vip in vips: - if is_address_in_network(bound_cidr, vip): - resolved_address = vip - break - except NotImplementedError: - # If no net-splits configured and no support for extra - # bindings/network spaces so we expect a single vip - resolved_address = vips[0] - else: - if config('prefer-ipv6'): - fallback_addr = get_ipv6_addr(exc_list=vips)[0] - else: - fallback_addr = unit_get(net_fallback) - - if net_addr: - resolved_address = get_address_in_network(net_addr, fallback_addr) - else: - # NOTE: only try to use extra bindings if legacy network - # configuration is not in use - try: - resolved_address = network_get_primary_address(binding) - except NotImplementedError: - resolved_address = fallback_addr - - if resolved_address is None: - raise ValueError("Unable to resolve a suitable IP address based on " - "charm state and configuration. (net_type=%s, " - "clustered=%s)" % (net_type, clustered)) - - return resolved_address - - -def get_vip_in_network(network): - matching_vip = None - vips = config('vip') - if vips: - for vip in vips.split(): - if is_address_in_network(network, vip): - matching_vip = vip - return matching_vip diff --git a/hooks/charmhelpers/contrib/openstack/keystone.py b/hooks/charmhelpers/contrib/openstack/keystone.py deleted file mode 100644 index d7e02cc..0000000 --- a/hooks/charmhelpers/contrib/openstack/keystone.py +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/python -# -# Copyright 2017 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six -from charmhelpers.fetch import apt_install -from charmhelpers.contrib.openstack.context import IdentityServiceContext -from charmhelpers.core.hookenv import ( - log, - ERROR, -) - - -def get_api_suffix(api_version): - """Return the formatted api suffix for the given version - @param api_version: version of the keystone endpoint - @returns the api suffix formatted according to the given api - version - """ - return 'v2.0' if api_version in (2, "2", "2.0") else 'v3' - - -def format_endpoint(schema, addr, port, api_version): - """Return a formatted keystone endpoint - @param schema: http or https - @param addr: ipv4/ipv6 host of the keystone service - @param port: port of the keystone service - @param api_version: 2 or 3 - @returns a fully formatted keystone endpoint - """ - return '{}://{}:{}/{}/'.format(schema, addr, port, - get_api_suffix(api_version)) - - -def get_keystone_manager(endpoint, api_version, **kwargs): - """Return a keystonemanager for the correct API version - - @param endpoint: the keystone endpoint to point client at - @param api_version: version of the keystone api the client should use - @param kwargs: token or username/tenant/password information - @returns keystonemanager class used for interrogating keystone - """ - if api_version == 2: - return KeystoneManager2(endpoint, **kwargs) - if api_version == 3: - return KeystoneManager3(endpoint, **kwargs) - raise ValueError('No manager found for api version {}'.format(api_version)) - - -def get_keystone_manager_from_identity_service_context(): - """Return a keystonmanager generated from a - instance of charmhelpers.contrib.openstack.context.IdentityServiceContext - @returns keystonamenager instance - """ - context = IdentityServiceContext()() - if not context: - msg = "Identity service context cannot be generated" - log(msg, level=ERROR) - raise ValueError(msg) - - endpoint = format_endpoint(context['service_protocol'], - context['service_host'], - context['service_port'], - context['api_version']) - - if context['api_version'] in (2, "2.0"): - api_version = 2 - else: - api_version = 3 - - return get_keystone_manager(endpoint, api_version, - username=context['admin_user'], - password=context['admin_password'], - tenant_name=context['admin_tenant_name']) - - -class KeystoneManager(object): - - def resolve_service_id(self, service_name=None, service_type=None): - """Find the service_id of a given service""" - services = [s._info for s in self.api.services.list()] - - service_name = service_name.lower() - for s in services: - name = s['name'].lower() - if service_type and service_name: - if (service_name == name and service_type == s['type']): - return s['id'] - elif service_name and service_name == name: - return s['id'] - elif service_type and service_type == s['type']: - return s['id'] - return None - - def service_exists(self, service_name=None, service_type=None): - """Determine if the given service exists on the service list""" - return self.resolve_service_id(service_name, service_type) is not None - - -class KeystoneManager2(KeystoneManager): - - def __init__(self, endpoint, **kwargs): - try: - from keystoneclient.v2_0 import client - from keystoneclient.auth.identity import v2 - from keystoneclient import session - except ImportError: - if six.PY2: - apt_install(["python-keystoneclient"], fatal=True) - else: - apt_install(["python3-keystoneclient"], fatal=True) - - from keystoneclient.v2_0 import client - from keystoneclient.auth.identity import v2 - from keystoneclient import session - - self.api_version = 2 - - token = kwargs.get("token", None) - if token: - api = client.Client(endpoint=endpoint, token=token) - else: - auth = v2.Password(username=kwargs.get("username"), - password=kwargs.get("password"), - tenant_name=kwargs.get("tenant_name"), - auth_url=endpoint) - sess = session.Session(auth=auth) - api = client.Client(session=sess) - - self.api = api - - -class KeystoneManager3(KeystoneManager): - - def __init__(self, endpoint, **kwargs): - try: - from keystoneclient.v3 import client - from keystoneclient.auth import token_endpoint - from keystoneclient import session - from keystoneclient.auth.identity import v3 - except ImportError: - if six.PY2: - apt_install(["python-keystoneclient"], fatal=True) - else: - apt_install(["python3-keystoneclient"], fatal=True) - - from keystoneclient.v3 import client - from keystoneclient.auth import token_endpoint - from keystoneclient import session - from keystoneclient.auth.identity import v3 - - self.api_version = 3 - - token = kwargs.get("token", None) - if token: - auth = token_endpoint.Token(endpoint=endpoint, - token=token) - sess = session.Session(auth=auth) - else: - auth = v3.Password(auth_url=endpoint, - user_id=kwargs.get("username"), - password=kwargs.get("password"), - project_id=kwargs.get("tenant_name")) - sess = session.Session(auth=auth) - - self.api = client.Client(session=sess) diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py deleted file mode 100644 index 0f847f5..0000000 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ /dev/null @@ -1,354 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Various utilies for dealing with Neutron and the renaming from Quantum. - -import six -from subprocess import check_output - -from charmhelpers.core.hookenv import ( - config, - log, - ERROR, -) - -from charmhelpers.contrib.openstack.utils import ( - os_release, - CompareOpenStackReleases, -) - - -def headers_package(): - """Ensures correct linux-headers for running kernel are installed, - for building DKMS package""" - kver = check_output(['uname', '-r']).decode('UTF-8').strip() - return 'linux-headers-%s' % kver - - -QUANTUM_CONF_DIR = '/etc/quantum' - - -def kernel_version(): - """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ - kver = check_output(['uname', '-r']).decode('UTF-8').strip() - kver = kver.split('.') - return (int(kver[0]), int(kver[1])) - - -def determine_dkms_package(): - """ Determine which DKMS package should be used based on kernel version """ - # NOTE: 3.13 kernels have support for GRE and VXLAN native - if kernel_version() >= (3, 13): - return [] - else: - return [headers_package(), 'openvswitch-datapath-dkms'] - - -# legacy - - -def quantum_plugins(): - return { - 'ovs': { - 'config': '/etc/quantum/plugins/openvswitch/' - 'ovs_quantum_plugin.ini', - 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' - 'OVSQuantumPluginV2', - 'contexts': [], - 'services': ['quantum-plugin-openvswitch-agent'], - 'packages': [determine_dkms_package(), - ['quantum-plugin-openvswitch-agent']], - 'server_packages': ['quantum-server', - 'quantum-plugin-openvswitch'], - 'server_services': ['quantum-server'] - }, - 'nvp': { - 'config': '/etc/quantum/plugins/nicira/nvp.ini', - 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' - 'QuantumPlugin.NvpPluginV2', - 'contexts': [], - 'services': [], - 'packages': [], - 'server_packages': ['quantum-server', - 'quantum-plugin-nicira'], - 'server_services': ['quantum-server'] - } - } - - -NEUTRON_CONF_DIR = '/etc/neutron' - - -def neutron_plugins(): - release = os_release('nova-common') - plugins = { - 'ovs': { - 'config': '/etc/neutron/plugins/openvswitch/' - 'ovs_neutron_plugin.ini', - 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' - 'OVSNeutronPluginV2', - 'contexts': [], - 'services': ['neutron-plugin-openvswitch-agent'], - 'packages': [determine_dkms_package(), - ['neutron-plugin-openvswitch-agent']], - 'server_packages': ['neutron-server', - 'neutron-plugin-openvswitch'], - 'server_services': ['neutron-server'] - }, - 'nvp': { - 'config': '/etc/neutron/plugins/nicira/nvp.ini', - 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' - 'NeutronPlugin.NvpPluginV2', - 'contexts': [], - 'services': [], - 'packages': [], - 'server_packages': ['neutron-server', - 'neutron-plugin-nicira'], - 'server_services': ['neutron-server'] - }, - 'nsx': { - 'config': '/etc/neutron/plugins/vmware/nsx.ini', - 'driver': 'vmware', - 'contexts': [], - 'services': [], - 'packages': [], - 'server_packages': ['neutron-server', - 'neutron-plugin-vmware'], - 'server_services': ['neutron-server'] - }, - 'n1kv': { - 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', - 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', - 'contexts': [], - 'services': [], - 'packages': [determine_dkms_package(), - ['neutron-plugin-cisco']], - 'server_packages': ['neutron-server', - 'neutron-plugin-cisco'], - 'server_services': ['neutron-server'] - }, - 'Calico': { - 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', - 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', - 'contexts': [], - 'services': ['calico-felix', - 'bird', - 'neutron-dhcp-agent', - 'nova-api-metadata', - 'etcd'], - 'packages': [determine_dkms_package(), - ['calico-compute', - 'bird', - 'neutron-dhcp-agent', - 'nova-api-metadata', - 'etcd']], - 'server_packages': ['neutron-server', 'calico-control', 'etcd'], - 'server_services': ['neutron-server', 'etcd'] - }, - 'vsp': { - 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', - 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin', - 'contexts': [], - 'services': [], - 'packages': [], - 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], - 'server_services': ['neutron-server'] - }, - 'plumgrid': { - 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', - 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin' - '.plumgrid_plugin.NeutronPluginPLUMgridV2'), - 'contexts': [], - 'services': [], - 'packages': ['plumgrid-lxc', - 'iovisor-dkms'], - 'server_packages': ['neutron-server', - 'neutron-plugin-plumgrid'], - 'server_services': ['neutron-server'] - }, - 'midonet': { - 'config': '/etc/neutron/plugins/midonet/midonet.ini', - 'driver': 'midonet.neutron.plugin.MidonetPluginV2', - 'contexts': [], - 'services': [], - 'packages': [determine_dkms_package()], - 'server_packages': ['neutron-server', - 'python-neutron-plugin-midonet'], - 'server_services': ['neutron-server'] - } - } - if CompareOpenStackReleases(release) >= 'icehouse': - # NOTE: patch in ml2 plugin for icehouse onwards - plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' - plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' - plugins['ovs']['server_packages'] = ['neutron-server', - 'neutron-plugin-ml2'] - # NOTE: patch in vmware renames nvp->nsx for icehouse onwards - plugins['nvp'] = plugins['nsx'] - if CompareOpenStackReleases(release) >= 'kilo': - plugins['midonet']['driver'] = ( - 'neutron.plugins.midonet.plugin.MidonetPluginV2') - if CompareOpenStackReleases(release) >= 'liberty': - plugins['midonet']['driver'] = ( - 'midonet.neutron.plugin_v1.MidonetPluginV2') - plugins['midonet']['server_packages'].remove( - 'python-neutron-plugin-midonet') - plugins['midonet']['server_packages'].append( - 'python-networking-midonet') - plugins['plumgrid']['driver'] = ( - 'networking_plumgrid.neutron.plugins' - '.plugin.NeutronPluginPLUMgridV2') - plugins['plumgrid']['server_packages'].remove( - 'neutron-plugin-plumgrid') - if CompareOpenStackReleases(release) >= 'mitaka': - plugins['nsx']['server_packages'].remove('neutron-plugin-vmware') - plugins['nsx']['server_packages'].append('python-vmware-nsx') - plugins['nsx']['config'] = '/etc/neutron/nsx.ini' - plugins['vsp']['driver'] = ( - 'nuage_neutron.plugins.nuage.plugin.NuagePlugin') - return plugins - - -def neutron_plugin_attribute(plugin, attr, net_manager=None): - manager = net_manager or network_manager() - if manager == 'quantum': - plugins = quantum_plugins() - elif manager == 'neutron': - plugins = neutron_plugins() - else: - log("Network manager '%s' does not support plugins." % (manager), - level=ERROR) - raise Exception - - try: - _plugin = plugins[plugin] - except KeyError: - log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) - raise Exception - - try: - return _plugin[attr] - except KeyError: - return None - - -def network_manager(): - ''' - Deals with the renaming of Quantum to Neutron in H and any situations - that require compatability (eg, deploying H with network-manager=quantum, - upgrading from G). - ''' - release = os_release('nova-common') - manager = config('network-manager').lower() - - if manager not in ['quantum', 'neutron']: - return manager - - if release in ['essex']: - # E does not support neutron - log('Neutron networking not supported in Essex.', level=ERROR) - raise Exception - elif release in ['folsom', 'grizzly']: - # neutron is named quantum in F and G - return 'quantum' - else: - # ensure accurate naming for all releases post-H - return 'neutron' - - -def parse_mappings(mappings, key_rvalue=False): - """By default mappings are lvalue keyed. - - If key_rvalue is True, the mapping will be reversed to allow multiple - configs for the same lvalue. - """ - parsed = {} - if mappings: - mappings = mappings.split() - for m in mappings: - p = m.partition(':') - - if key_rvalue: - key_index = 2 - val_index = 0 - # if there is no rvalue skip to next - if not p[1]: - continue - else: - key_index = 0 - val_index = 2 - - key = p[key_index].strip() - parsed[key] = p[val_index].strip() - - return parsed - - -def parse_bridge_mappings(mappings): - """Parse bridge mappings. - - Mappings must be a space-delimited list of provider:bridge mappings. - - Returns dict of the form {provider:bridge}. - """ - return parse_mappings(mappings) - - -def parse_data_port_mappings(mappings, default_bridge='br-data'): - """Parse data port mappings. - - Mappings must be a space-delimited list of bridge:port. - - Returns dict of the form {port:bridge} where ports may be mac addresses or - interface names. - """ - - # NOTE(dosaboy): we use rvalue for key to allow multiple values to be - # proposed for since it may be a mac address which will differ - # across units this allowing first-known-good to be chosen. - _mappings = parse_mappings(mappings, key_rvalue=True) - if not _mappings or list(_mappings.values()) == ['']: - if not mappings: - return {} - - # For backwards-compatibility we need to support port-only provided in - # config. - _mappings = {mappings.split()[0]: default_bridge} - - ports = _mappings.keys() - if len(set(ports)) != len(ports): - raise Exception("It is not allowed to have the same port configured " - "on more than one bridge") - - return _mappings - - -def parse_vlan_range_mappings(mappings): - """Parse vlan range mappings. - - Mappings must be a space-delimited list of provider:start:end mappings. - - The start:end range is optional and may be omitted. - - Returns dict of the form {provider: (start, end)}. - """ - _mappings = parse_mappings(mappings) - if not _mappings: - return {} - - mappings = {} - for p, r in six.iteritems(_mappings): - mappings[p] = tuple(r.split(':')) - - return mappings diff --git a/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/hooks/charmhelpers/contrib/openstack/templates/__init__.py deleted file mode 100644 index 9df5f74..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# dummy __init__.py to fool syncer into thinking this is a syncable python -# module diff --git a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf deleted file mode 100644 index a11ce8a..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf +++ /dev/null @@ -1,24 +0,0 @@ -############################################################################### -# [ WARNING ] -# ceph configuration file maintained by Juju -# local changes may be overwritten. -############################################################################### -[global] -{% if auth -%} -auth_supported = {{ auth }} -keyring = /etc/ceph/$cluster.$name.keyring -mon host = {{ mon_hosts }} -{% endif -%} -log to syslog = {{ use_syslog }} -err to syslog = {{ use_syslog }} -clog to syslog = {{ use_syslog }} -{% if rbd_features %} -rbd default features = {{ rbd_features }} -{% endif %} - -[client] -{% if rbd_client_cache_settings -%} -{% for key, value in rbd_client_cache_settings.items() -%} -{{ key }} = {{ value }} -{% endfor -%} -{%- endif %} diff --git a/hooks/charmhelpers/contrib/openstack/templates/git.upstart b/hooks/charmhelpers/contrib/openstack/templates/git.upstart deleted file mode 100644 index 4bed404..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/git.upstart +++ /dev/null @@ -1,17 +0,0 @@ -description "{{ service_description }}" -author "Juju {{ service_name }} Charm " - -start on runlevel [2345] -stop on runlevel [!2345] - -respawn - -exec start-stop-daemon --start --chuid {{ user_name }} \ - --chdir {{ start_dir }} --name {{ process_name }} \ - --exec {{ executable_name }} -- \ - {% for config_file in config_files -%} - --config-file={{ config_file }} \ - {% endfor -%} - {% if log_file -%} - --log-file={{ log_file }} - {% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg deleted file mode 100644 index d36af2a..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ /dev/null @@ -1,77 +0,0 @@ -global - log /var/lib/haproxy/dev/log local0 - log /var/lib/haproxy/dev/log local1 notice - maxconn 20000 - user haproxy - group haproxy - spread-checks 0 - stats socket /var/run/haproxy/admin.sock mode 600 level admin - stats timeout 2m - -defaults - log global - mode tcp - option tcplog - option dontlognull - retries 3 -{%- if haproxy_queue_timeout %} - timeout queue {{ haproxy_queue_timeout }} -{%- else %} - timeout queue 9000 -{%- endif %} -{%- if haproxy_connect_timeout %} - timeout connect {{ haproxy_connect_timeout }} -{%- else %} - timeout connect 9000 -{%- endif %} -{%- if haproxy_client_timeout %} - timeout client {{ haproxy_client_timeout }} -{%- else %} - timeout client 90000 -{%- endif %} -{%- if haproxy_server_timeout %} - timeout server {{ haproxy_server_timeout }} -{%- else %} - timeout server 90000 -{%- endif %} - -listen stats - bind {{ local_host }}:{{ stat_port }} - mode http - stats enable - stats hide-version - stats realm Haproxy\ Statistics - stats uri / - stats auth admin:{{ stat_password }} - -{% if frontends -%} -{% for service, ports in service_ports.items() -%} -frontend tcp-in_{{ service }} - bind *:{{ ports[0] }} - {% if ipv6_enabled -%} - bind :::{{ ports[0] }} - {% endif -%} - {% for frontend in frontends -%} - acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} - use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} - {% endfor -%} - default_backend {{ service }}_{{ default_backend }} - -{% for frontend in frontends -%} -backend {{ service }}_{{ frontend }} - balance leastconn - {% if backend_options -%} - {% if backend_options[service] -%} - {% for option in backend_options[service] -%} - {% for key, value in option.items() -%} - {{ key }} {{ value }} - {% endfor -%} - {% endfor -%} - {% endif -%} - {% endif -%} - {% for unit, address in frontends[frontend]['backends'].items() -%} - server {{ unit }} {{ address }}:{{ ports[1] }} check - {% endfor %} -{% endfor -%} -{% endfor -%} -{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/memcached.conf b/hooks/charmhelpers/contrib/openstack/templates/memcached.conf deleted file mode 100644 index 26cb037..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/memcached.conf +++ /dev/null @@ -1,53 +0,0 @@ -############################################################################### -# [ WARNING ] -# memcached configuration file maintained by Juju -# local changes may be overwritten. -############################################################################### - -# memcached default config file -# 2003 - Jay Bonci -# This configuration file is read by the start-memcached script provided as -# part of the Debian GNU/Linux distribution. - -# Run memcached as a daemon. This command is implied, and is not needed for the -# daemon to run. See the README.Debian that comes with this package for more -# information. --d - -# Log memcached's output to /var/log/memcached -logfile /var/log/memcached.log - -# Be verbose -# -v - -# Be even more verbose (print client commands as well) -# -vv - -# Start with a cap of 64 megs of memory. It's reasonable, and the daemon default -# Note that the daemon will grow to this size, but does not start out holding this much -# memory --m 64 - -# Default connection port is 11211 --p {{ memcache_port }} - -# Run the daemon as root. The start-memcached will default to running as root if no -# -u command is present in this config file --u memcache - -# Specify which IP address to listen on. The default is to listen on all IP addresses -# This parameter is one of the only security measures that memcached has, so make sure -# it's listening on a firewalled interface. --l {{ memcache_server }} - -# Limit the number of simultaneous incoming connections. The daemon default is 1024 -# -c 1024 - -# Lock down all paged memory. Consult with the README and homepage before you do this -# -k - -# Return error when memory is exhausted (rather than removing items) -# -M - -# Maximize core file limit -# -r diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend deleted file mode 100644 index f614b3f..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend +++ /dev/null @@ -1,29 +0,0 @@ -{% if endpoints -%} -{% for ext_port in ext_ports -%} -Listen {{ ext_port }} -{% endfor -%} -{% for address, endpoint, ext, int in endpoints -%} - - ServerName {{ endpoint }} - SSLEngine on - SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2 - SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM - SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} - # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8 - SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} - SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} - ProxyPass / http://localhost:{{ int }}/ - ProxyPassReverse / http://localhost:{{ int }}/ - ProxyPreserveHost on - RequestHeader set X-Forwarded-Proto "https" - -{% endfor -%} - - Order deny,allow - Allow from all - - - Order allow,deny - Allow from all - -{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf deleted file mode 100644 index f614b3f..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf +++ /dev/null @@ -1,29 +0,0 @@ -{% if endpoints -%} -{% for ext_port in ext_ports -%} -Listen {{ ext_port }} -{% endfor -%} -{% for address, endpoint, ext, int in endpoints -%} - - ServerName {{ endpoint }} - SSLEngine on - SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2 - SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM - SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} - # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8 - SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} - SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} - ProxyPass / http://localhost:{{ int }}/ - ProxyPassReverse / http://localhost:{{ int }}/ - ProxyPreserveHost on - RequestHeader set X-Forwarded-Proto "https" - -{% endfor -%} - - Order deny,allow - Allow from all - - - Order allow,deny - Allow from all - -{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken deleted file mode 100644 index 5dcebe7..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken +++ /dev/null @@ -1,12 +0,0 @@ -{% if auth_host -%} -[keystone_authtoken] -auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} -auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} -auth_plugin = password -project_domain_id = default -user_domain_id = default -project_name = {{ admin_tenant_name }} -username = {{ admin_user }} -password = {{ admin_password }} -signing_dir = {{ signing_dir }} -{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy deleted file mode 100644 index 9356b2b..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy +++ /dev/null @@ -1,10 +0,0 @@ -{% if auth_host -%} -[keystone_authtoken] -# Juno specific config (Bug #1557223) -auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} -identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} -admin_tenant_name = {{ admin_tenant_name }} -admin_user = {{ admin_user }} -admin_password = {{ admin_password }} -signing_dir = {{ signing_dir }} -{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka deleted file mode 100644 index 8e6889e..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka +++ /dev/null @@ -1,20 +0,0 @@ -{% if auth_host -%} -[keystone_authtoken] -auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} -auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} -auth_type = password -{% if api_version == "3" -%} -project_domain_name = {{ admin_domain_name }} -user_domain_name = {{ admin_domain_name }} -{% else -%} -project_domain_name = default -user_domain_name = default -{% endif -%} -project_name = {{ admin_tenant_name }} -username = {{ admin_user }} -password = {{ admin_password }} -signing_dir = {{ signing_dir }} -{% if use_memcache == true %} -memcached_servers = {{ memcache_url }} -{% endif -%} -{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache deleted file mode 100644 index e056a32..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache +++ /dev/null @@ -1,6 +0,0 @@ -[cache] -{% if memcache_url %} -enabled = true -backend = oslo_cache.memcache_pool -memcache_servers = {{ memcache_url }} -{% endif %} diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware deleted file mode 100644 index dd73230..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware +++ /dev/null @@ -1,5 +0,0 @@ -[oslo_middleware] - -# Bug #1758675 -enable_proxy_headers_parsing = true - diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications deleted file mode 100644 index 021a3c2..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications +++ /dev/null @@ -1,11 +0,0 @@ -{% if transport_url -%} -[oslo_messaging_notifications] -driver = messagingv2 -transport_url = {{ transport_url }} -{% if notification_topics -%} -topics = {{ notification_topics }} -{% endif -%} -{% if notification_format -%} -notification_format = {{ notification_format }} -{% endif -%} -{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo b/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo deleted file mode 100644 index b444c9c..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo +++ /dev/null @@ -1,22 +0,0 @@ -{% if rabbitmq_host or rabbitmq_hosts -%} -[oslo_messaging_rabbit] -rabbit_userid = {{ rabbitmq_user }} -rabbit_virtual_host = {{ rabbitmq_virtual_host }} -rabbit_password = {{ rabbitmq_password }} -{% if rabbitmq_hosts -%} -rabbit_hosts = {{ rabbitmq_hosts }} -{% if rabbitmq_ha_queues -%} -rabbit_ha_queues = True -rabbit_durable_queues = False -{% endif -%} -{% else -%} -rabbit_host = {{ rabbitmq_host }} -{% endif -%} -{% if rabbit_ssl_port -%} -rabbit_use_ssl = True -rabbit_port = {{ rabbit_ssl_port }} -{% if rabbit_ssl_ca -%} -kombu_ssl_ca_certs = {{ rabbit_ssl_ca }} -{% endif -%} -{% endif -%} -{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-zeromq b/hooks/charmhelpers/contrib/openstack/templates/section-zeromq deleted file mode 100644 index 95f1a76..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/section-zeromq +++ /dev/null @@ -1,14 +0,0 @@ -{% if zmq_host -%} -# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }}) -rpc_backend = zmq -rpc_zmq_host = {{ zmq_host }} -{% if zmq_redis_address -%} -rpc_zmq_matchmaker = redis -matchmaker_heartbeat_freq = 15 -matchmaker_heartbeat_ttl = 30 -[matchmaker_redis] -host = {{ zmq_redis_address }} -{% else -%} -rpc_zmq_matchmaker = ring -{% endif -%} -{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf deleted file mode 100644 index e2e73b2..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf +++ /dev/null @@ -1,91 +0,0 @@ -# Configuration file maintained by Juju. Local changes may be overwritten. - -{% if port -%} -Listen {{ port }} -{% endif -%} - -{% if admin_port -%} -Listen {{ admin_port }} -{% endif -%} - -{% if public_port -%} -Listen {{ public_port }} -{% endif -%} - -{% if port -%} - - WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ - display-name=%{GROUP} - WSGIProcessGroup {{ service_name }} - WSGIScriptAlias / {{ script }} - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/apache2/{{ service_name }}_error.log - CustomLog /var/log/apache2/{{ service_name }}_access.log combined - - - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - - -{% endif -%} - -{% if admin_port -%} - - WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ - display-name=%{GROUP} - WSGIProcessGroup {{ service_name }}-admin - WSGIScriptAlias / {{ admin_script }} - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/apache2/{{ service_name }}_error.log - CustomLog /var/log/apache2/{{ service_name }}_access.log combined - - - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - - -{% endif -%} - -{% if public_port -%} - - WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ - display-name=%{GROUP} - WSGIProcessGroup {{ service_name }}-public - WSGIScriptAlias / {{ public_script }} - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/apache2/{{ service_name }}_error.log - CustomLog /var/log/apache2/{{ service_name }}_access.log combined - - - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - - -{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py deleted file mode 100644 index a623315..0000000 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ /dev/null @@ -1,379 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import six - -from charmhelpers.fetch import apt_install, apt_update -from charmhelpers.core.hookenv import ( - log, - ERROR, - INFO, - TRACE -) -from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES - -try: - from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions -except ImportError: - apt_update(fatal=True) - if six.PY2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) - from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions - - -class OSConfigException(Exception): - pass - - -def get_loader(templates_dir, os_release): - """ - Create a jinja2.ChoiceLoader containing template dirs up to - and including os_release. If directory template directory - is missing at templates_dir, it will be omitted from the loader. - templates_dir is added to the bottom of the search list as a base - loading dir. - - A charm may also ship a templates dir with this module - and it will be appended to the bottom of the search list, eg:: - - hooks/charmhelpers/contrib/openstack/templates - - :param templates_dir (str): Base template directory containing release - sub-directories. - :param os_release (str): OpenStack release codename to construct template - loader. - :returns: jinja2.ChoiceLoader constructed with a list of - jinja2.FilesystemLoaders, ordered in descending - order by OpenStack release. - """ - tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) - for rel in six.itervalues(OPENSTACK_CODENAMES)] - - if not os.path.isdir(templates_dir): - log('Templates directory not found @ %s.' % templates_dir, - level=ERROR) - raise OSConfigException - - # the bottom contains tempaltes_dir and possibly a common templates dir - # shipped with the helper. - loaders = [FileSystemLoader(templates_dir)] - helper_templates = os.path.join(os.path.dirname(__file__), 'templates') - if os.path.isdir(helper_templates): - loaders.append(FileSystemLoader(helper_templates)) - - for rel, tmpl_dir in tmpl_dirs: - if os.path.isdir(tmpl_dir): - loaders.insert(0, FileSystemLoader(tmpl_dir)) - if rel == os_release: - break - # demote this log to the lowest level; we don't really need to see these - # lots in production even when debugging. - log('Creating choice loader with dirs: %s' % - [l.searchpath for l in loaders], level=TRACE) - return ChoiceLoader(loaders) - - -class OSConfigTemplate(object): - """ - Associates a config file template with a list of context generators. - Responsible for constructing a template context based on those generators. - """ - - def __init__(self, config_file, contexts, config_template=None): - self.config_file = config_file - - if hasattr(contexts, '__call__'): - self.contexts = [contexts] - else: - self.contexts = contexts - - self._complete_contexts = [] - - self.config_template = config_template - - def context(self): - ctxt = {} - for context in self.contexts: - _ctxt = context() - if _ctxt: - ctxt.update(_ctxt) - # track interfaces for every complete context. - [self._complete_contexts.append(interface) - for interface in context.interfaces - if interface not in self._complete_contexts] - return ctxt - - def complete_contexts(self): - ''' - Return a list of interfaces that have satisfied contexts. - ''' - if self._complete_contexts: - return self._complete_contexts - self.context() - return self._complete_contexts - - @property - def is_string_template(self): - """:returns: Boolean if this instance is a template initialised with a string""" - return self.config_template is not None - - -class OSConfigRenderer(object): - """ - This class provides a common templating system to be used by OpenStack - charms. It is intended to help charms share common code and templates, - and ease the burden of managing config templates across multiple OpenStack - releases. - - Basic usage:: - - # import some common context generates from charmhelpers - from charmhelpers.contrib.openstack import context - - # Create a renderer object for a specific OS release. - configs = OSConfigRenderer(templates_dir='/tmp/templates', - openstack_release='folsom') - # register some config files with context generators. - configs.register(config_file='/etc/nova/nova.conf', - contexts=[context.SharedDBContext(), - context.AMQPContext()]) - configs.register(config_file='/etc/nova/api-paste.ini', - contexts=[context.IdentityServiceContext()]) - configs.register(config_file='/etc/haproxy/haproxy.conf', - contexts=[context.HAProxyContext()]) - configs.register(config_file='/etc/keystone/policy.d/extra.cfg', - contexts=[context.ExtraPolicyContext() - context.KeystoneContext()], - config_template=hookenv.config('extra-policy')) - # write out a single config - configs.write('/etc/nova/nova.conf') - # write out all registered configs - configs.write_all() - - **OpenStack Releases and template loading** - - When the object is instantiated, it is associated with a specific OS - release. This dictates how the template loader will be constructed. - - The constructed loader attempts to load the template from several places - in the following order: - - from the most recent OS release-specific template dir (if one exists) - - the base templates_dir - - a template directory shipped in the charm with this helper file. - - For the example above, '/tmp/templates' contains the following structure:: - - /tmp/templates/nova.conf - /tmp/templates/api-paste.ini - /tmp/templates/grizzly/api-paste.ini - /tmp/templates/havana/api-paste.ini - - Since it was registered with the grizzly release, it first seraches - the grizzly directory for nova.conf, then the templates dir. - - When writing api-paste.ini, it will find the template in the grizzly - directory. - - If the object were created with folsom, it would fall back to the - base templates dir for its api-paste.ini template. - - This system should help manage changes in config files through - openstack releases, allowing charms to fall back to the most recently - updated config template for a given release - - The haproxy.conf, since it is not shipped in the templates dir, will - be loaded from the module directory's template directory, eg - $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows - us to ship common templates (haproxy, apache) with the helpers. - - **Context generators** - - Context generators are used to generate template contexts during hook - execution. Doing so may require inspecting service relations, charm - config, etc. When registered, a config file is associated with a list - of generators. When a template is rendered and written, all context - generates are called in a chain to generate the context dictionary - passed to the jinja2 template. See context.py for more info. - """ - def __init__(self, templates_dir, openstack_release): - if not os.path.isdir(templates_dir): - log('Could not locate templates dir %s' % templates_dir, - level=ERROR) - raise OSConfigException - - self.templates_dir = templates_dir - self.openstack_release = openstack_release - self.templates = {} - self._tmpl_env = None - - if None in [Environment, ChoiceLoader, FileSystemLoader]: - # if this code is running, the object is created pre-install hook. - # jinja2 shouldn't get touched until the module is reloaded on next - # hook execution, with proper jinja2 bits successfully imported. - if six.PY2: - apt_install('python-jinja2') - else: - apt_install('python3-jinja2') - - def register(self, config_file, contexts, config_template=None): - """ - Register a config file with a list of context generators to be called - during rendering. - config_template can be used to load a template from a string instead of - using template loaders and template files. - :param config_file (str): a path where a config file will be rendered - :param contexts (list): a list of context dictionaries with kv pairs - :param config_template (str): an optional template string to use - """ - self.templates[config_file] = OSConfigTemplate( - config_file=config_file, - contexts=contexts, - config_template=config_template - ) - log('Registered config file: {}'.format(config_file), - level=INFO) - - def _get_tmpl_env(self): - if not self._tmpl_env: - loader = get_loader(self.templates_dir, self.openstack_release) - self._tmpl_env = Environment(loader=loader) - - def _get_template(self, template): - self._get_tmpl_env() - template = self._tmpl_env.get_template(template) - log('Loaded template from {}'.format(template.filename), - level=INFO) - return template - - def _get_template_from_string(self, ostmpl): - ''' - Get a jinja2 template object from a string. - :param ostmpl: OSConfigTemplate to use as a data source. - ''' - self._get_tmpl_env() - template = self._tmpl_env.from_string(ostmpl.config_template) - log('Loaded a template from a string for {}'.format( - ostmpl.config_file), - level=INFO) - return template - - def render(self, config_file): - if config_file not in self.templates: - log('Config not registered: {}'.format(config_file), level=ERROR) - raise OSConfigException - - ostmpl = self.templates[config_file] - ctxt = ostmpl.context() - - if ostmpl.is_string_template: - template = self._get_template_from_string(ostmpl) - log('Rendering from a string template: ' - '{}'.format(config_file), - level=INFO) - else: - _tmpl = os.path.basename(config_file) - try: - template = self._get_template(_tmpl) - except exceptions.TemplateNotFound: - # if no template is found with basename, try looking - # for it using a munged full path, eg: - # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf - _tmpl = '_'.join(config_file.split('/')[1:]) - try: - template = self._get_template(_tmpl) - except exceptions.TemplateNotFound as e: - log('Could not load template from {} by {} or {}.' - ''.format( - self.templates_dir, - os.path.basename(config_file), - _tmpl - ), - level=ERROR) - raise e - - log('Rendering from template: {}'.format(config_file), - level=INFO) - return template.render(ctxt) - - def write(self, config_file): - """ - Write a single config file, raises if config file is not registered. - """ - if config_file not in self.templates: - log('Config not registered: %s' % config_file, level=ERROR) - raise OSConfigException - - _out = self.render(config_file) - if six.PY3: - _out = _out.encode('UTF-8') - - with open(config_file, 'wb') as out: - out.write(_out) - - log('Wrote template %s.' % config_file, level=INFO) - - def write_all(self): - """ - Write out all registered config files. - """ - [self.write(k) for k in six.iterkeys(self.templates)] - - def set_release(self, openstack_release): - """ - Resets the template environment and generates a new template loader - based on a the new openstack release. - """ - self._tmpl_env = None - self.openstack_release = openstack_release - self._get_tmpl_env() - - def complete_contexts(self): - ''' - Returns a list of context interfaces that yield a complete context. - ''' - interfaces = [] - [interfaces.extend(i.complete_contexts()) - for i in six.itervalues(self.templates)] - return interfaces - - def get_incomplete_context_data(self, interfaces): - ''' - Return dictionary of relation status of interfaces and any missing - required context data. Example: - {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, - 'zeromq-configuration': {'related': False}} - ''' - incomplete_context_data = {} - - for i in six.itervalues(self.templates): - for context in i.contexts: - for interface in interfaces: - related = False - if interface in context.interfaces: - related = context.get_related() - missing_data = context.missing_data - if missing_data: - incomplete_context_data[interface] = {'missing_data': missing_data} - if related: - if incomplete_context_data.get(interface): - incomplete_context_data[interface].update({'related': True}) - else: - incomplete_context_data[interface] = {'related': True} - else: - incomplete_context_data[interface] = {'related': False} - return incomplete_context_data diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py deleted file mode 100644 index 0180e55..0000000 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ /dev/null @@ -1,1691 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Common python helper functions used for OpenStack charms. -from collections import OrderedDict -from functools import wraps - -import subprocess -import json -import os -import sys -import re -import itertools -import functools - -import six -import traceback -import uuid -import yaml - -from charmhelpers import deprecate - -from charmhelpers.contrib.network import ip - -from charmhelpers.core import unitdata - -from charmhelpers.core.hookenv import ( - action_fail, - action_set, - config, - log as juju_log, - charm_dir, - INFO, - ERROR, - related_units, - relation_ids, - relation_set, - status_set, - hook_name, - application_version_set, - cached, -) - -from charmhelpers.core.strutils import BasicStringComparator - -from charmhelpers.contrib.storage.linux.lvm import ( - deactivate_lvm_volume_group, - is_lvm_physical_volume, - remove_lvm_physical_volume, -) - -from charmhelpers.contrib.network.ip import ( - get_ipv6_addr, - is_ipv6, - port_has_listener, -) - -from charmhelpers.core.host import ( - lsb_release, - mounts, - umount, - service_running, - service_pause, - service_resume, - restart_on_change_helper, -) -from charmhelpers.fetch import ( - apt_cache, - import_key as fetch_import_key, - add_source as fetch_add_source, - SourceConfigError, - GPGKeyError, - get_upstream_version -) - -from charmhelpers.fetch.snap import ( - snap_install, - snap_refresh, - valid_snap_channel, -) - -from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk -from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device -from charmhelpers.contrib.openstack.exceptions import OSContextError - -CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" -CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' - -DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' - 'restricted main multiverse universe') - -OPENSTACK_RELEASES = ( - 'diablo', - 'essex', - 'folsom', - 'grizzly', - 'havana', - 'icehouse', - 'juno', - 'kilo', - 'liberty', - 'mitaka', - 'newton', - 'ocata', - 'pike', - 'queens', - 'rocky', -) - -UBUNTU_OPENSTACK_RELEASE = OrderedDict([ - ('oneiric', 'diablo'), - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), - ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ('wily', 'liberty'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), -]) - - -OPENSTACK_CODENAMES = OrderedDict([ - ('2011.2', 'diablo'), - ('2012.1', 'essex'), - ('2012.2', 'folsom'), - ('2013.1', 'grizzly'), - ('2013.2', 'havana'), - ('2014.1', 'icehouse'), - ('2014.2', 'juno'), - ('2015.1', 'kilo'), - ('2015.2', 'liberty'), - ('2016.1', 'mitaka'), - ('2016.2', 'newton'), - ('2017.1', 'ocata'), - ('2017.2', 'pike'), - ('2018.1', 'queens'), - ('2018.2', 'rocky'), -]) - -# The ugly duckling - must list releases oldest to newest -SWIFT_CODENAMES = OrderedDict([ - ('diablo', - ['1.4.3']), - ('essex', - ['1.4.8']), - ('folsom', - ['1.7.4']), - ('grizzly', - ['1.7.6', '1.7.7', '1.8.0']), - ('havana', - ['1.9.0', '1.9.1', '1.10.0']), - ('icehouse', - ['1.11.0', '1.12.0', '1.13.0', '1.13.1']), - ('juno', - ['2.0.0', '2.1.0', '2.2.0']), - ('kilo', - ['2.2.1', '2.2.2']), - ('liberty', - ['2.3.0', '2.4.0', '2.5.0']), - ('mitaka', - ['2.5.0', '2.6.0', '2.7.0']), - ('newton', - ['2.8.0', '2.9.0', '2.10.0']), - ('ocata', - ['2.11.0', '2.12.0', '2.13.0']), - ('pike', - ['2.13.0', '2.15.0']), - ('queens', - ['2.16.0', '2.17.0']), - ('rocky', - ['2.18.0']), -]) - -# >= Liberty version->codename mapping -PACKAGE_CODENAMES = { - 'nova-common': OrderedDict([ - ('12', 'liberty'), - ('13', 'mitaka'), - ('14', 'newton'), - ('15', 'ocata'), - ('16', 'pike'), - ('17', 'queens'), - ('18', 'rocky'), - ]), - 'neutron-common': OrderedDict([ - ('7', 'liberty'), - ('8', 'mitaka'), - ('9', 'newton'), - ('10', 'ocata'), - ('11', 'pike'), - ('12', 'queens'), - ('13', 'rocky'), - ]), - 'cinder-common': OrderedDict([ - ('7', 'liberty'), - ('8', 'mitaka'), - ('9', 'newton'), - ('10', 'ocata'), - ('11', 'pike'), - ('12', 'queens'), - ('13', 'rocky'), - ]), - 'keystone': OrderedDict([ - ('8', 'liberty'), - ('9', 'mitaka'), - ('10', 'newton'), - ('11', 'ocata'), - ('12', 'pike'), - ('13', 'queens'), - ('14', 'rocky'), - ]), - 'horizon-common': OrderedDict([ - ('8', 'liberty'), - ('9', 'mitaka'), - ('10', 'newton'), - ('11', 'ocata'), - ('12', 'pike'), - ('13', 'queens'), - ('14', 'rocky'), - ]), - 'ceilometer-common': OrderedDict([ - ('5', 'liberty'), - ('6', 'mitaka'), - ('7', 'newton'), - ('8', 'ocata'), - ('9', 'pike'), - ('10', 'queens'), - ('11', 'rocky'), - ]), - 'heat-common': OrderedDict([ - ('5', 'liberty'), - ('6', 'mitaka'), - ('7', 'newton'), - ('8', 'ocata'), - ('9', 'pike'), - ('10', 'queens'), - ('11', 'rocky'), - ]), - 'glance-common': OrderedDict([ - ('11', 'liberty'), - ('12', 'mitaka'), - ('13', 'newton'), - ('14', 'ocata'), - ('15', 'pike'), - ('16', 'queens'), - ('17', 'rocky'), - ]), - 'openstack-dashboard': OrderedDict([ - ('8', 'liberty'), - ('9', 'mitaka'), - ('10', 'newton'), - ('11', 'ocata'), - ('12', 'pike'), - ('13', 'queens'), - ('14', 'rocky'), - ]), -} - -DEFAULT_LOOPBACK_SIZE = '5G' - - -class CompareOpenStackReleases(BasicStringComparator): - """Provide comparisons of OpenStack releases. - - Use in the form of - - if CompareOpenStackReleases(release) > 'mitaka': - # do something with mitaka - """ - _list = OPENSTACK_RELEASES - - -def error_out(msg): - juju_log("FATAL ERROR: %s" % msg, level='ERROR') - sys.exit(1) - - -def get_os_codename_install_source(src): - '''Derive OpenStack release codename from a given installation source.''' - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - rel = '' - if src is None: - return rel - if src in ['distro', 'distro-proposed']: - try: - rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] - except KeyError: - e = 'Could not derive openstack release for '\ - 'this Ubuntu release: %s' % ubuntu_rel - error_out(e) - return rel - - if src.startswith('cloud:'): - ca_rel = src.split(':')[1] - ca_rel = ca_rel.split('-')[1].split('/')[0] - return ca_rel - - # Best guess match based on deb string provided - if (src.startswith('deb') or - src.startswith('ppa') or - src.startswith('snap')): - for v in OPENSTACK_CODENAMES.values(): - if v in src: - return v - - -def get_os_version_install_source(src): - codename = get_os_codename_install_source(src) - return get_os_version_codename(codename) - - -def get_os_codename_version(vers): - '''Determine OpenStack codename from version number.''' - try: - return OPENSTACK_CODENAMES[vers] - except KeyError: - e = 'Could not determine OpenStack codename for version %s' % vers - error_out(e) - - -def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): - '''Determine OpenStack version number from codename.''' - for k, v in six.iteritems(version_map): - if v == codename: - return k - e = 'Could not derive OpenStack version for '\ - 'codename: %s' % codename - error_out(e) - - -def get_os_version_codename_swift(codename): - '''Determine OpenStack version number of swift from codename.''' - for k, v in six.iteritems(SWIFT_CODENAMES): - if k == codename: - return v[-1] - e = 'Could not derive swift version for '\ - 'codename: %s' % codename - error_out(e) - - -def get_swift_codename(version): - '''Determine OpenStack codename that corresponds to swift version.''' - codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] - - if len(codenames) > 1: - # If more than one release codename contains this version we determine - # the actual codename based on the highest available install source. - for codename in reversed(codenames): - releases = UBUNTU_OPENSTACK_RELEASE - release = [k for k, v in six.iteritems(releases) if codename in v] - ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) - if six.PY3: - ret = ret.decode('UTF-8') - if codename in ret or release[0] in ret: - return codename - elif len(codenames) == 1: - return codenames[0] - - # NOTE: fallback - attempt to match with just major.minor version - match = re.match('^(\d+)\.(\d+)', version) - if match: - major_minor_version = match.group(0) - for codename, versions in six.iteritems(SWIFT_CODENAMES): - for release_version in versions: - if release_version.startswith(major_minor_version): - return codename - - return None - - -def get_os_codename_package(package, fatal=True): - '''Derive OpenStack release codename from an installed package.''' - - if snap_install_requested(): - cmd = ['snap', 'list', package] - try: - out = subprocess.check_output(cmd) - if six.PY3: - out = out.decode('UTF-8') - except subprocess.CalledProcessError as e: - return None - lines = out.split('\n') - for line in lines: - if package in line: - # Second item in list is Version - return line.split()[1] - - import apt_pkg as apt - - cache = apt_cache() - - try: - pkg = cache[package] - except Exception: - if not fatal: - return None - # the package is unknown to the current apt cache. - e = 'Could not determine version of package with no installation '\ - 'candidate: %s' % package - error_out(e) - - if not pkg.current_ver: - if not fatal: - return None - # package is known, but no version is currently installed. - e = 'Could not determine version of uninstalled package: %s' % package - error_out(e) - - vers = apt.upstream_version(pkg.current_ver.ver_str) - if 'swift' in pkg.name: - # Fully x.y.z match for swift versions - match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) - else: - # x.y match only for 20XX.X - # and ignore patch level for other packages - match = re.match('^(\d+)\.(\d+)', vers) - - if match: - vers = match.group(0) - - # Generate a major version number for newer semantic - # versions of openstack projects - major_vers = vers.split('.')[0] - # >= Liberty independent project versions - if (package in PACKAGE_CODENAMES and - major_vers in PACKAGE_CODENAMES[package]): - return PACKAGE_CODENAMES[package][major_vers] - else: - # < Liberty co-ordinated project versions - try: - if 'swift' in pkg.name: - return get_swift_codename(vers) - else: - return OPENSTACK_CODENAMES[vers] - except KeyError: - if not fatal: - return None - e = 'Could not determine OpenStack codename for version %s' % vers - error_out(e) - - -def get_os_version_package(pkg, fatal=True): - '''Derive OpenStack version number from an installed package.''' - codename = get_os_codename_package(pkg, fatal=fatal) - - if not codename: - return None - - if 'swift' in pkg: - vers_map = SWIFT_CODENAMES - for cname, version in six.iteritems(vers_map): - if cname == codename: - return version[-1] - else: - vers_map = OPENSTACK_CODENAMES - for version, cname in six.iteritems(vers_map): - if cname == codename: - return version - # e = "Could not determine OpenStack version for package: %s" % pkg - # error_out(e) - - -# Module local cache variable for the os_release. -_os_rel = None - - -def reset_os_release(): - '''Unset the cached os_release version''' - global _os_rel - _os_rel = None - - -def os_release(package, base='essex', reset_cache=False): - ''' - Returns OpenStack release codename from a cached global. - - If reset_cache then unset the cached os_release version and return the - freshly determined version. - - If the codename can not be determined from either an installed package or - the installation source, the earliest release supported by the charm should - be returned. - ''' - global _os_rel - if reset_cache: - reset_os_release() - if _os_rel: - return _os_rel - _os_rel = ( - get_os_codename_package(package, fatal=False) or - get_os_codename_install_source(config('openstack-origin')) or - base) - return _os_rel - - -@deprecate("moved to charmhelpers.fetch.import_key()", "2017-07", log=juju_log) -def import_key(keyid): - """Import a key, either ASCII armored, or a GPG key id. - - @param keyid: the key in ASCII armor format, or a GPG key id. - @raises SystemExit() via sys.exit() on failure. - """ - try: - return fetch_import_key(keyid) - except GPGKeyError as e: - error_out("Could not import key: {}".format(str(e))) - - -def get_source_and_pgp_key(source_and_key): - """Look for a pgp key ID or ascii-armor key in the given input. - - :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is - optional. - :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id - if there was no '|' in the source_and_key string. - """ - try: - source, key = source_and_key.split('|', 2) - return source, key or None - except ValueError: - return source_and_key, None - - -@deprecate("use charmhelpers.fetch.add_source() instead.", - "2017-07", log=juju_log) -def configure_installation_source(source_plus_key): - """Configure an installation source. - - The functionality is provided by charmhelpers.fetch.add_source() - The difference between the two functions is that add_source() signature - requires the key to be passed directly, whereas this function passes an - optional key by appending '|' to the end of the source specificiation - 'source'. - - Another difference from add_source() is that the function calls sys.exit(1) - if the configuration fails, whereas add_source() raises - SourceConfigurationError(). Another difference, is that add_source() - silently fails (with a juju_log command) if there is no matching source to - configure, whereas this function fails with a sys.exit(1) - - :param source: String_plus_key -- see above for details. - - Note that the behaviour on error is to log the error to the juju log and - then call sys.exit(1). - """ - if source_plus_key.startswith('snap'): - # Do nothing for snap installs - return - # extract the key if there is one, denoted by a '|' in the rel - source, key = get_source_and_pgp_key(source_plus_key) - - # handle the ordinary sources via add_source - try: - fetch_add_source(source, key, fail_invalid=True) - except SourceConfigError as se: - error_out(str(se)) - - -def config_value_changed(option): - """ - Determine if config value changed since last call to this function. - """ - hook_data = unitdata.HookData() - with hook_data(): - db = unitdata.kv() - current = config(option) - saved = db.get(option) - db.set(option, current) - if saved is None: - return False - return current != saved - - -def save_script_rc(script_path="scripts/scriptrc", **env_vars): - """ - Write an rc file in the charm-delivered directory containing - exported environment variables provided by env_vars. Any charm scripts run - outside the juju hook environment can source this scriptrc to obtain - updated config information necessary to perform health checks or - service changes. - """ - juju_rc_path = "%s/%s" % (charm_dir(), script_path) - if not os.path.exists(os.path.dirname(juju_rc_path)): - os.mkdir(os.path.dirname(juju_rc_path)) - with open(juju_rc_path, 'wt') as rc_script: - rc_script.write( - "#!/bin/bash\n") - [rc_script.write('export %s=%s\n' % (u, p)) - for u, p in six.iteritems(env_vars) if u != "script_path"] - - -def openstack_upgrade_available(package): - """ - Determines if an OpenStack upgrade is available from installation - source, based on version of installed package. - - :param package: str: Name of installed package. - - :returns: bool: : Returns True if configured installation source offers - a newer version of package. - """ - - import apt_pkg as apt - src = config('openstack-origin') - cur_vers = get_os_version_package(package) - if not cur_vers: - # The package has not been installed yet do not attempt upgrade - return False - if "swift" in package: - codename = get_os_codename_install_source(src) - avail_vers = get_os_version_codename_swift(codename) - else: - avail_vers = get_os_version_install_source(src) - apt.init() - return apt.version_compare(avail_vers, cur_vers) == 1 - - -def ensure_block_device(block_device): - ''' - Confirm block_device, create as loopback if necessary. - - :param block_device: str: Full path of block device to ensure. - - :returns: str: Full path of ensured block device. - ''' - _none = ['None', 'none', None] - if (block_device in _none): - error_out('prepare_storage(): Missing required input: block_device=%s.' - % block_device) - - if block_device.startswith('/dev/'): - bdev = block_device - elif block_device.startswith('/'): - _bd = block_device.split('|') - if len(_bd) == 2: - bdev, size = _bd - else: - bdev = block_device - size = DEFAULT_LOOPBACK_SIZE - bdev = ensure_loopback_device(bdev, size) - else: - bdev = '/dev/%s' % block_device - - if not is_block_device(bdev): - error_out('Failed to locate valid block device at %s' % bdev) - - return bdev - - -def clean_storage(block_device): - ''' - Ensures a block device is clean. That is: - - unmounted - - any lvm volume groups are deactivated - - any lvm physical device signatures removed - - partition table wiped - - :param block_device: str: Full path to block device to clean. - ''' - for mp, d in mounts(): - if d == block_device: - juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % - (d, mp), level=INFO) - umount(mp, persist=True) - - if is_lvm_physical_volume(block_device): - deactivate_lvm_volume_group(block_device) - remove_lvm_physical_volume(block_device) - else: - zap_disk(block_device) - - -is_ip = ip.is_ip -ns_query = ip.ns_query -get_host_ip = ip.get_host_ip -get_hostname = ip.get_hostname - - -def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): - mm_map = {} - if os.path.isfile(mm_file): - with open(mm_file, 'r') as f: - mm_map = json.load(f) - return mm_map - - -def sync_db_with_multi_ipv6_addresses(database, database_user, - relation_prefix=None): - hosts = get_ipv6_addr(dynamic_only=False) - - if config('vip'): - vips = config('vip').split() - for vip in vips: - if vip and is_ipv6(vip): - hosts.append(vip) - - kwargs = {'database': database, - 'username': database_user, - 'hostname': json.dumps(hosts)} - - if relation_prefix: - for key in list(kwargs.keys()): - kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] - del kwargs[key] - - for rid in relation_ids('shared-db'): - relation_set(relation_id=rid, **kwargs) - - -def os_requires_version(ostack_release, pkg): - """ - Decorator for hook to specify minimum supported release - """ - def wrap(f): - @wraps(f) - def wrapped_f(*args): - if os_release(pkg) < ostack_release: - raise Exception("This hook is not supported on releases" - " before %s" % ostack_release) - f(*args) - return wrapped_f - return wrap - - -def os_workload_status(configs, required_interfaces, charm_func=None): - """ - Decorator to set workload status based on complete contexts - """ - def wrap(f): - @wraps(f) - def wrapped_f(*args, **kwargs): - # Run the original function first - f(*args, **kwargs) - # Set workload status now that contexts have been - # acted on - set_os_workload_status(configs, required_interfaces, charm_func) - return wrapped_f - return wrap - - -def set_os_workload_status(configs, required_interfaces, charm_func=None, - services=None, ports=None): - """Set the state of the workload status for the charm. - - This calls _determine_os_workload_status() to get the new state, message - and sets the status using status_set() - - @param configs: a templating.OSConfigRenderer() object - @param required_interfaces: {generic: [specific, specific2, ...]} - @param charm_func: a callable function that returns state, message. The - signature is charm_func(configs) -> (state, message) - @param services: list of strings OR dictionary specifying services/ports - @param ports: OPTIONAL list of port numbers. - @returns state, message: the new workload status, user message - """ - state, message = _determine_os_workload_status( - configs, required_interfaces, charm_func, services, ports) - status_set(state, message) - - -def _determine_os_workload_status( - configs, required_interfaces, charm_func=None, - services=None, ports=None): - """Determine the state of the workload status for the charm. - - This function returns the new workload status for the charm based - on the state of the interfaces, the paused state and whether the - services are actually running and any specified ports are open. - - This checks: - - 1. if the unit should be paused, that it is actually paused. If so the - state is 'maintenance' + message, else 'broken'. - 2. that the interfaces/relations are complete. If they are not then - it sets the state to either 'broken' or 'waiting' and an appropriate - message. - 3. If all the relation data is set, then it checks that the actual - services really are running. If not it sets the state to 'broken'. - - If everything is okay then the state returns 'active'. - - @param configs: a templating.OSConfigRenderer() object - @param required_interfaces: {generic: [specific, specific2, ...]} - @param charm_func: a callable function that returns state, message. The - signature is charm_func(configs) -> (state, message) - @param services: list of strings OR dictionary specifying services/ports - @param ports: OPTIONAL list of port numbers. - @returns state, message: the new workload status, user message - """ - state, message = _ows_check_if_paused(services, ports) - - if state is None: - state, message = _ows_check_generic_interfaces( - configs, required_interfaces) - - if state != 'maintenance' and charm_func: - # _ows_check_charm_func() may modify the state, message - state, message = _ows_check_charm_func( - state, message, lambda: charm_func(configs)) - - if state is None: - state, message = _ows_check_services_running(services, ports) - - if state is None: - state = 'active' - message = "Unit is ready" - juju_log(message, 'INFO') - - return state, message - - -def _ows_check_if_paused(services=None, ports=None): - """Check if the unit is supposed to be paused, and if so check that the - services/ports (if passed) are actually stopped/not being listened to. - - if the unit isn't supposed to be paused, just return None, None - - @param services: OPTIONAL services spec or list of service names. - @param ports: OPTIONAL list of port numbers. - @returns state, message or None, None - """ - if is_unit_paused_set(): - state, message = check_actually_paused(services=services, - ports=ports) - if state is None: - # we're paused okay, so set maintenance and return - state = "maintenance" - message = "Paused. Use 'resume' action to resume normal service." - return state, message - return None, None - - -def _ows_check_generic_interfaces(configs, required_interfaces): - """Check the complete contexts to determine the workload status. - - - Checks for missing or incomplete contexts - - juju log details of missing required data. - - determines the correct workload status - - creates an appropriate message for status_set(...) - - if there are no problems then the function returns None, None - - @param configs: a templating.OSConfigRenderer() object - @params required_interfaces: {generic_interface: [specific_interface], } - @returns state, message or None, None - """ - incomplete_rel_data = incomplete_relation_data(configs, - required_interfaces) - state = None - message = None - missing_relations = set() - incomplete_relations = set() - - for generic_interface, relations_states in incomplete_rel_data.items(): - related_interface = None - missing_data = {} - # Related or not? - for interface, relation_state in relations_states.items(): - if relation_state.get('related'): - related_interface = interface - missing_data = relation_state.get('missing_data') - break - # No relation ID for the generic_interface? - if not related_interface: - juju_log("{} relation is missing and must be related for " - "functionality. ".format(generic_interface), 'WARN') - state = 'blocked' - missing_relations.add(generic_interface) - else: - # Relation ID eists but no related unit - if not missing_data: - # Edge case - relation ID exists but departings - _hook_name = hook_name() - if (('departed' in _hook_name or 'broken' in _hook_name) and - related_interface in _hook_name): - state = 'blocked' - missing_relations.add(generic_interface) - juju_log("{} relation's interface, {}, " - "relationship is departed or broken " - "and is required for functionality." - "".format(generic_interface, related_interface), - "WARN") - # Normal case relation ID exists but no related unit - # (joining) - else: - juju_log("{} relations's interface, {}, is related but has" - " no units in the relation." - "".format(generic_interface, related_interface), - "INFO") - # Related unit exists and data missing on the relation - else: - juju_log("{} relation's interface, {}, is related awaiting " - "the following data from the relationship: {}. " - "".format(generic_interface, related_interface, - ", ".join(missing_data)), "INFO") - if state != 'blocked': - state = 'waiting' - if generic_interface not in missing_relations: - incomplete_relations.add(generic_interface) - - if missing_relations: - message = "Missing relations: {}".format(", ".join(missing_relations)) - if incomplete_relations: - message += "; incomplete relations: {}" \ - "".format(", ".join(incomplete_relations)) - state = 'blocked' - elif incomplete_relations: - message = "Incomplete relations: {}" \ - "".format(", ".join(incomplete_relations)) - state = 'waiting' - - return state, message - - -def _ows_check_charm_func(state, message, charm_func_with_configs): - """Run a custom check function for the charm to see if it wants to - change the state. This is only run if not in 'maintenance' and - tests to see if the new state is more important that the previous - one determined by the interfaces/relations check. - - @param state: the previously determined state so far. - @param message: the user orientated message so far. - @param charm_func: a callable function that returns state, message - @returns state, message strings. - """ - if charm_func_with_configs: - charm_state, charm_message = charm_func_with_configs() - if charm_state != 'active' and charm_state != 'unknown': - state = workload_state_compare(state, charm_state) - if message: - charm_message = charm_message.replace("Incomplete relations: ", - "") - message = "{}, {}".format(message, charm_message) - else: - message = charm_message - return state, message - - -def _ows_check_services_running(services, ports): - """Check that the services that should be running are actually running - and that any ports specified are being listened to. - - @param services: list of strings OR dictionary specifying services/ports - @param ports: list of ports - @returns state, message: strings or None, None - """ - messages = [] - state = None - if services is not None: - services = _extract_services_list_helper(services) - services_running, running = _check_running_services(services) - if not all(running): - messages.append( - "Services not running that should be: {}" - .format(", ".join(_filter_tuples(services_running, False)))) - state = 'blocked' - # also verify that the ports that should be open are open - # NB, that ServiceManager objects only OPTIONALLY have ports - map_not_open, ports_open = ( - _check_listening_on_services_ports(services)) - if not all(ports_open): - # find which service has missing ports. They are in service - # order which makes it a bit easier. - message_parts = {service: ", ".join([str(v) for v in open_ports]) - for service, open_ports in map_not_open.items()} - message = ", ".join( - ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) - messages.append( - "Services with ports not open that should be: {}" - .format(message)) - state = 'blocked' - - if ports is not None: - # and we can also check ports which we don't know the service for - ports_open, ports_open_bools = _check_listening_on_ports_list(ports) - if not all(ports_open_bools): - messages.append( - "Ports which should be open, but are not: {}" - .format(", ".join([str(p) for p, v in ports_open - if not v]))) - state = 'blocked' - - if state is not None: - message = "; ".join(messages) - return state, message - - return None, None - - -def _extract_services_list_helper(services): - """Extract a OrderedDict of {service: [ports]} of the supplied services - for use by the other functions. - - The services object can either be: - - None : no services were passed (an empty dict is returned) - - a list of strings - - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} - - An array of [{'service': service_name, ...}, ...] - - @param services: see above - @returns OrderedDict(service: [ports], ...) - """ - if services is None: - return {} - if isinstance(services, dict): - services = services.values() - # either extract the list of services from the dictionary, or if - # it is a simple string, use that. i.e. works with mixed lists. - _s = OrderedDict() - for s in services: - if isinstance(s, dict) and 'service' in s: - _s[s['service']] = s.get('ports', []) - if isinstance(s, str): - _s[s] = [] - return _s - - -def _check_running_services(services): - """Check that the services dict provided is actually running and provide - a list of (service, boolean) tuples for each service. - - Returns both a zipped list of (service, boolean) and a list of booleans - in the same order as the services. - - @param services: OrderedDict of strings: [ports], one for each service to - check. - @returns [(service, boolean), ...], : results for checks - [boolean] : just the result of the service checks - """ - services_running = [service_running(s) for s in services] - return list(zip(services, services_running)), services_running - - -def _check_listening_on_services_ports(services, test=False): - """Check that the unit is actually listening (has the port open) on the - ports that the service specifies are open. If test is True then the - function returns the services with ports that are open rather than - closed. - - Returns an OrderedDict of service: ports and a list of booleans - - @param services: OrderedDict(service: [port, ...], ...) - @param test: default=False, if False, test for closed, otherwise open. - @returns OrderedDict(service: [port-not-open, ...]...), [boolean] - """ - test = not(not(test)) # ensure test is True or False - all_ports = list(itertools.chain(*services.values())) - ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] - map_ports = OrderedDict() - matched_ports = [p for p, opened in zip(all_ports, ports_states) - if opened == test] # essentially opened xor test - for service, ports in services.items(): - set_ports = set(ports).intersection(matched_ports) - if set_ports: - map_ports[service] = set_ports - return map_ports, ports_states - - -def _check_listening_on_ports_list(ports): - """Check that the ports list given are being listened to - - Returns a list of ports being listened to and a list of the - booleans. - - @param ports: LIST or port numbers. - @returns [(port_num, boolean), ...], [boolean] - """ - ports_open = [port_has_listener('0.0.0.0', p) for p in ports] - return zip(ports, ports_open), ports_open - - -def _filter_tuples(services_states, state): - """Return a simple list from a list of tuples according to the condition - - @param services_states: LIST of (string, boolean): service and running - state. - @param state: Boolean to match the tuple against. - @returns [LIST of strings] that matched the tuple RHS. - """ - return [s for s, b in services_states if b == state] - - -def workload_state_compare(current_workload_state, workload_state): - """ Return highest priority of two states""" - hierarchy = {'unknown': -1, - 'active': 0, - 'maintenance': 1, - 'waiting': 2, - 'blocked': 3, - } - - if hierarchy.get(workload_state) is None: - workload_state = 'unknown' - if hierarchy.get(current_workload_state) is None: - current_workload_state = 'unknown' - - # Set workload_state based on hierarchy of statuses - if hierarchy.get(current_workload_state) > hierarchy.get(workload_state): - return current_workload_state - else: - return workload_state - - -def incomplete_relation_data(configs, required_interfaces): - """Check complete contexts against required_interfaces - Return dictionary of incomplete relation data. - - configs is an OSConfigRenderer object with configs registered - - required_interfaces is a dictionary of required general interfaces - with dictionary values of possible specific interfaces. - Example: - required_interfaces = {'database': ['shared-db', 'pgsql-db']} - - The interface is said to be satisfied if anyone of the interfaces in the - list has a complete context. - - Return dictionary of incomplete or missing required contexts with relation - status of interfaces and any missing data points. Example: - {'message': - {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, - 'zeromq-configuration': {'related': False}}, - 'identity': - {'identity-service': {'related': False}}, - 'database': - {'pgsql-db': {'related': False}, - 'shared-db': {'related': True}}} - """ - complete_ctxts = configs.complete_contexts() - incomplete_relations = [ - svc_type - for svc_type, interfaces in required_interfaces.items() - if not set(interfaces).intersection(complete_ctxts)] - return { - i: configs.get_incomplete_context_data(required_interfaces[i]) - for i in incomplete_relations} - - -def do_action_openstack_upgrade(package, upgrade_callback, configs): - """Perform action-managed OpenStack upgrade. - - Upgrades packages to the configured openstack-origin version and sets - the corresponding action status as a result. - - If the charm was installed from source we cannot upgrade it. - For backwards compatibility a config flag (action-managed-upgrade) must - be set for this code to run, otherwise a full service level upgrade will - fire on config-changed. - - @param package: package name for determining if upgrade available - @param upgrade_callback: function callback to charm's upgrade function - @param configs: templating object derived from OSConfigRenderer class - - @return: True if upgrade successful; False if upgrade failed or skipped - """ - ret = False - - if openstack_upgrade_available(package): - if config('action-managed-upgrade'): - juju_log('Upgrading OpenStack release') - - try: - upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed.'}) - ret = True - except Exception: - action_set({'outcome': 'upgrade failed, see traceback.'}) - action_set({'traceback': traceback.format_exc()}) - action_fail('do_openstack_upgrade resulted in an ' - 'unexpected error') - else: - action_set({'outcome': 'action-managed-upgrade config is ' - 'False, skipped upgrade.'}) - else: - action_set({'outcome': 'no upgrade available.'}) - - return ret - - -def remote_restart(rel_name, remote_service=None): - trigger = { - 'restart-trigger': str(uuid.uuid4()), - } - if remote_service: - trigger['remote-service'] = remote_service - for rid in relation_ids(rel_name): - # This subordinate can be related to two seperate services using - # different subordinate relations so only issue the restart if - # the principle is conencted down the relation we think it is - if related_units(relid=rid): - relation_set(relation_id=rid, - relation_settings=trigger, - ) - - -def check_actually_paused(services=None, ports=None): - """Check that services listed in the services object and and ports - are actually closed (not listened to), to verify that the unit is - properly paused. - - @param services: See _extract_services_list_helper - @returns status, : string for status (None if okay) - message : string for problem for status_set - """ - state = None - message = None - messages = [] - if services is not None: - services = _extract_services_list_helper(services) - services_running, services_states = _check_running_services(services) - if any(services_states): - # there shouldn't be any running so this is a problem - messages.append("these services running: {}" - .format(", ".join( - _filter_tuples(services_running, True)))) - state = "blocked" - ports_open, ports_open_bools = ( - _check_listening_on_services_ports(services, True)) - if any(ports_open_bools): - message_parts = {service: ", ".join([str(v) for v in open_ports]) - for service, open_ports in ports_open.items()} - message = ", ".join( - ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) - messages.append( - "these service:ports are open: {}".format(message)) - state = 'blocked' - if ports is not None: - ports_open, bools = _check_listening_on_ports_list(ports) - if any(bools): - messages.append( - "these ports which should be closed, but are open: {}" - .format(", ".join([str(p) for p, v in ports_open if v]))) - state = 'blocked' - if messages: - message = ("Services should be paused but {}" - .format(", ".join(messages))) - return state, message - - -def set_unit_paused(): - """Set the unit to a paused state in the local kv() store. - This does NOT actually pause the unit - """ - with unitdata.HookData()() as t: - kv = t[0] - kv.set('unit-paused', True) - - -def clear_unit_paused(): - """Clear the unit from a paused state in the local kv() store - This does NOT actually restart any services - it only clears the - local state. - """ - with unitdata.HookData()() as t: - kv = t[0] - kv.set('unit-paused', False) - - -def is_unit_paused_set(): - """Return the state of the kv().get('unit-paused'). - This does NOT verify that the unit really is paused. - - To help with units that don't have HookData() (testing) - if it excepts, return False - """ - try: - with unitdata.HookData()() as t: - kv = t[0] - # transform something truth-y into a Boolean. - return not(not(kv.get('unit-paused'))) - except Exception: - return False - - -def pause_unit(assess_status_func, services=None, ports=None, - charm_func=None): - """Pause a unit by stopping the services and setting 'unit-paused' - in the local kv() store. - - Also checks that the services have stopped and ports are no longer - being listened to. - - An optional charm_func() can be called that can either raise an - Exception or return non None, None to indicate that the unit - didn't pause cleanly. - - The signature for charm_func is: - charm_func() -> message: string - - charm_func() is executed after any services are stopped, if supplied. - - The services object can either be: - - None : no services were passed (an empty dict is returned) - - a list of strings - - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} - - An array of [{'service': service_name, ...}, ...] - - @param assess_status_func: (f() -> message: string | None) or None - @param services: OPTIONAL see above - @param ports: OPTIONAL list of port - @param charm_func: function to run for custom charm pausing. - @returns None - @raises Exception(message) on an error for action_fail(). - """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - stopped = service_pause(service) - if not stopped: - messages.append("{} didn't stop cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) - set_unit_paused() - if assess_status_func: - message = assess_status_func() - if message: - messages.append(message) - if messages: - raise Exception("Couldn't pause: {}".format("; ".join(messages))) - - -def resume_unit(assess_status_func, services=None, ports=None, - charm_func=None): - """Resume a unit by starting the services and clearning 'unit-paused' - in the local kv() store. - - Also checks that the services have started and ports are being listened to. - - An optional charm_func() can be called that can either raise an - Exception or return non None to indicate that the unit - didn't resume cleanly. - - The signature for charm_func is: - charm_func() -> message: string - - charm_func() is executed after any services are started, if supplied. - - The services object can either be: - - None : no services were passed (an empty dict is returned) - - a list of strings - - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} - - An array of [{'service': service_name, ...}, ...] - - @param assess_status_func: (f() -> message: string | None) or None - @param services: OPTIONAL see above - @param ports: OPTIONAL list of port - @param charm_func: function to run for custom charm resuming. - @returns None - @raises Exception(message) on an error for action_fail(). - """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - started = service_resume(service) - if not started: - messages.append("{} didn't start cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) - clear_unit_paused() - if assess_status_func: - message = assess_status_func() - if message: - messages.append(message) - if messages: - raise Exception("Couldn't resume: {}".format("; ".join(messages))) - - -def make_assess_status_func(*args, **kwargs): - """Creates an assess_status_func() suitable for handing to pause_unit() - and resume_unit(). - - This uses the _determine_os_workload_status(...) function to determine - what the workload_status should be for the unit. If the unit is - not in maintenance or active states, then the message is returned to - the caller. This is so an action that doesn't result in either a - complete pause or complete resume can signal failure with an action_fail() - """ - def _assess_status_func(): - state, message = _determine_os_workload_status(*args, **kwargs) - status_set(state, message) - if state not in ['maintenance', 'active']: - return message - return None - - return _assess_status_func - - -def pausable_restart_on_change(restart_map, stopstart=False, - restart_functions=None): - """A restart_on_change decorator that checks to see if the unit is - paused. If it is paused then the decorated function doesn't fire. - - This is provided as a helper, as the @restart_on_change(...) decorator - is in core.host, yet the openstack specific helpers are in this file - (contrib.openstack.utils). Thus, this needs to be an optional feature - for openstack charms (or charms that wish to use the openstack - pause/resume type features). - - It is used as follows: - - from contrib.openstack.utils import ( - pausable_restart_on_change as restart_on_change) - - @restart_on_change(restart_map, stopstart=) - def some_hook(...): - pass - - see core.utils.restart_on_change() for more details. - - @param f: the function to decorate - @param restart_map: the restart map {conf_file: [services]} - @param stopstart: DEFAULT false; whether to stop, start or just restart - @returns decorator to use a restart_on_change with pausability - """ - def wrap(f): - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - if is_unit_paused_set(): - return f(*args, **kwargs) - # otherwise, normal restart_on_change functionality - return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart, - restart_functions) - return wrapped_f - return wrap - - -def ordered(orderme): - """Converts the provided dictionary into a collections.OrderedDict. - - The items in the returned OrderedDict will be inserted based on the - natural sort order of the keys. Nested dictionaries will also be sorted - in order to ensure fully predictable ordering. - - :param orderme: the dict to order - :return: collections.OrderedDict - :raises: ValueError: if `orderme` isn't a dict instance. - """ - if not isinstance(orderme, dict): - raise ValueError('argument must be a dict type') - - result = OrderedDict() - for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]): - if isinstance(v, dict): - result[k] = ordered(v) - else: - result[k] = v - - return result - - -def config_flags_parser(config_flags): - """Parses config flags string into dict. - - This parsing method supports a few different formats for the config - flag values to be parsed: - - 1. A string in the simple format of key=value pairs, with the possibility - of specifying multiple key value pairs within the same string. For - example, a string in the format of 'key1=value1, key2=value2' will - return a dict of: - - {'key1': 'value1', 'key2': 'value2'}. - - 2. A string in the above format, but supporting a comma-delimited list - of values for the same key. For example, a string in the format of - 'key1=value1, key2=value3,value4,value5' will return a dict of: - - {'key1': 'value1', 'key2': 'value2,value3,value4'} - - 3. A string containing a colon character (:) prior to an equal - character (=) will be treated as yaml and parsed as such. This can be - used to specify more complex key value pairs. For example, - a string in the format of 'key1: subkey1=value1, subkey2=value2' will - return a dict of: - - {'key1', 'subkey1=value1, subkey2=value2'} - - The provided config_flags string may be a list of comma-separated values - which themselves may be comma-separated list of values. - """ - # If we find a colon before an equals sign then treat it as yaml. - # Note: limit it to finding the colon first since this indicates assignment - # for inline yaml. - colon = config_flags.find(':') - equals = config_flags.find('=') - if colon > 0: - if colon < equals or equals < 0: - return ordered(yaml.safe_load(config_flags)) - - if config_flags.find('==') >= 0: - juju_log("config_flags is not in expected format (key=value)", - level=ERROR) - raise OSContextError - - # strip the following from each value. - post_strippers = ' ,' - # we strip any leading/trailing '=' or ' ' from the string then - # split on '='. - split = config_flags.strip(' =').split('=') - limit = len(split) - flags = OrderedDict() - for i in range(0, limit - 1): - current = split[i] - next = split[i + 1] - vindex = next.rfind(',') - if (i == limit - 2) or (vindex < 0): - value = next - else: - value = next[:vindex] - - if i == 0: - key = current - else: - # if this not the first entry, expect an embedded key. - index = current.rfind(',') - if index < 0: - juju_log("Invalid config value(s) at index %s" % (i), - level=ERROR) - raise OSContextError - key = current[index + 1:] - - # Add to collection. - flags[key.strip(post_strippers)] = value.rstrip(post_strippers) - - return flags - - -def os_application_version_set(package): - '''Set version of application for Juju 2.0 and later''' - application_version = get_upstream_version(package) - # NOTE(jamespage) if not able to figure out package version, fallback to - # openstack codename version detection. - if not application_version: - application_version_set(os_release(package)) - else: - application_version_set(application_version) - - -def enable_memcache(source=None, release=None, package=None): - """Determine if memcache should be enabled on the local unit - - @param release: release of OpenStack currently deployed - @param package: package to derive OpenStack version deployed - @returns boolean Whether memcache should be enabled - """ - _release = None - if release: - _release = release - else: - _release = os_release(package, base='icehouse') - if not _release: - _release = get_os_codename_install_source(source) - - return CompareOpenStackReleases(_release) >= 'mitaka' - - -def token_cache_pkgs(source=None, release=None): - """Determine additional packages needed for token caching - - @param source: source string for charm - @param release: release of OpenStack currently deployed - @returns List of package to enable token caching - """ - packages = [] - if enable_memcache(source=source, release=release): - packages.extend(['memcached', 'python-memcache']) - return packages - - -def update_json_file(filename, items): - """Updates the json `filename` with a given dict. - :param filename: path to json file (e.g. /etc/glance/policy.json) - :param items: dict of items to update - """ - if not items: - return - - with open(filename) as fd: - policy = json.load(fd) - - # Compare before and after and if nothing has changed don't write the file - # since that could cause unnecessary service restarts. - before = json.dumps(policy, indent=4, sort_keys=True) - policy.update(items) - after = json.dumps(policy, indent=4, sort_keys=True) - if before == after: - return - - with open(filename, "w") as fd: - fd.write(after) - - -@cached -def snap_install_requested(): - """ Determine if installing from snaps - - If openstack-origin is of the form snap:track/channel[/branch] - and channel is in SNAPS_CHANNELS return True. - """ - origin = config('openstack-origin') or "" - if not origin.startswith('snap:'): - return False - - _src = origin[5:] - if '/' in _src: - channel = _src.split('/')[1] - else: - # Handle snap:track with no channel - channel = 'stable' - return valid_snap_channel(channel) - - -def get_snaps_install_info_from_origin(snaps, src, mode='classic'): - """Generate a dictionary of snap install information from origin - - @param snaps: List of snaps - @param src: String of openstack-origin or source of the form - snap:track/channel - @param mode: String classic, devmode or jailmode - @returns: Dictionary of snaps with channels and modes - """ - - if not src.startswith('snap:'): - juju_log("Snap source is not a snap origin", 'WARN') - return {} - - _src = src[5:] - channel = '--channel={}'.format(_src) - - return {snap: {'channel': channel, 'mode': mode} - for snap in snaps} - - -def install_os_snaps(snaps, refresh=False): - """Install OpenStack snaps from channel and with mode - - @param snaps: Dictionary of snaps with channels and modes of the form: - {'snap_name': {'channel': 'snap_channel', - 'mode': 'snap_mode'}} - Where channel is a snapstore channel and mode is --classic, --devmode - or --jailmode. - @param post_snap_install: Callback function to run after snaps have been - installed - """ - - def _ensure_flag(flag): - if flag.startswith('--'): - return flag - return '--{}'.format(flag) - - if refresh: - for snap in snaps.keys(): - snap_refresh(snap, - _ensure_flag(snaps[snap]['channel']), - _ensure_flag(snaps[snap]['mode'])) - else: - for snap in snaps.keys(): - snap_install(snap, - _ensure_flag(snaps[snap]['channel']), - _ensure_flag(snaps[snap]['mode'])) diff --git a/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/hooks/charmhelpers/contrib/openstack/vaultlocker.py deleted file mode 100644 index a8e4bf8..0000000 --- a/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2018 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import os - -import charmhelpers.contrib.openstack.alternatives as alternatives -import charmhelpers.contrib.openstack.context as context - -import charmhelpers.core.hookenv as hookenv -import charmhelpers.core.host as host -import charmhelpers.core.templating as templating -import charmhelpers.core.unitdata as unitdata - -VAULTLOCKER_BACKEND = 'charm-vaultlocker' - - -class VaultKVContext(context.OSContextGenerator): - """Vault KV context for interaction with vault-kv interfaces""" - interfaces = ['secrets-storage'] - - def __init__(self, secret_backend=None): - super(context.OSContextGenerator, self).__init__() - self.secret_backend = ( - secret_backend or 'charm-{}'.format(hookenv.service_name()) - ) - - def __call__(self): - db = unitdata.kv() - last_token = db.get('last-token') - secret_id = db.get('secret-id') - for relation_id in hookenv.relation_ids(self.interfaces[0]): - for unit in hookenv.related_units(relation_id): - data = hookenv.relation_get(unit=unit, - rid=relation_id) - vault_url = data.get('vault_url') - role_id = data.get('{}_role_id'.format(hookenv.local_unit())) - token = data.get('{}_token'.format(hookenv.local_unit())) - - if all([vault_url, role_id, token]): - token = json.loads(token) - vault_url = json.loads(vault_url) - - # Tokens may change when secret_id's are being - # reissued - if so use token to get new secret_id - if token != last_token: - secret_id = retrieve_secret_id( - url=vault_url, - token=token - ) - db.set('secret-id', secret_id) - db.set('last-token', token) - db.flush() - - ctxt = { - 'vault_url': vault_url, - 'role_id': json.loads(role_id), - 'secret_id': secret_id, - 'secret_backend': self.secret_backend, - } - vault_ca = data.get('vault_ca') - if vault_ca: - ctxt['vault_ca'] = json.loads(vault_ca) - self.complete = True - return ctxt - return {} - - -def write_vaultlocker_conf(context, priority=100): - """Write vaultlocker configuration to disk and install alternative - - :param context: Dict of data from vault-kv relation - :ptype: context: dict - :param priority: Priority of alternative configuration - :ptype: priority: int""" - charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format( - hookenv.service_name() - ) - host.mkdir(os.path.dirname(charm_vl_path), perms=0o700) - templating.render(source='vaultlocker.conf.j2', - target=charm_vl_path, - context=context, perms=0o600), - alternatives.install_alternative('vaultlocker.conf', - '/etc/vaultlocker/vaultlocker.conf', - charm_vl_path, priority) - - -def vault_relation_complete(backend=None): - """Determine whether vault relation is complete - - :param backend: Name of secrets backend requested - :ptype backend: string - :returns: whether the relation to vault is complete - :rtype: bool""" - vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) - vault_kv() - return vault_kv.complete - - -# TODO: contrib a high level unwrap method to hvac that works -def retrieve_secret_id(url, token): - """Retrieve a response-wrapped secret_id from Vault - - :param url: URL to Vault Server - :ptype url: str - :param token: One shot Token to use - :ptype token: str - :returns: secret_id to use for Vault Access - :rtype: str""" - import hvac - client = hvac.Client(url=url, token=token) - response = client._post('/v1/sys/wrapping/unwrap') - if response.status_code == 200: - data = response.json() - return data['data']['secret_id'] diff --git a/hooks/charmhelpers/contrib/python/__init__.py b/hooks/charmhelpers/contrib/python/__init__.py deleted file mode 100644 index d7567b8..0000000 --- a/hooks/charmhelpers/contrib/python/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/python/packages.py b/hooks/charmhelpers/contrib/python/packages.py deleted file mode 100644 index 6e95028..0000000 --- a/hooks/charmhelpers/contrib/python/packages.py +++ /dev/null @@ -1,154 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import six -import subprocess -import sys - -from charmhelpers.fetch import apt_install, apt_update -from charmhelpers.core.hookenv import charm_dir, log - -__author__ = "Jorge Niedbalski " - - -def pip_execute(*args, **kwargs): - """Overriden pip_execute() to stop sys.path being changed. - - The act of importing main from the pip module seems to cause add wheels - from the /usr/share/python-wheels which are installed by various tools. - This function ensures that sys.path remains the same after the call is - executed. - """ - try: - _path = sys.path - try: - from pip import main as _pip_execute - except ImportError: - apt_update() - if six.PY2: - apt_install('python-pip') - else: - apt_install('python3-pip') - from pip import main as _pip_execute - _pip_execute(*args, **kwargs) - finally: - sys.path = _path - - -def parse_options(given, available): - """Given a set of options, check if available""" - for key, value in sorted(given.items()): - if not value: - continue - if key in available: - yield "--{0}={1}".format(key, value) - - -def pip_install_requirements(requirements, constraints=None, **options): - """Install a requirements file. - - :param constraints: Path to pip constraints file. - http://pip.readthedocs.org/en/stable/user_guide/#constraints-files - """ - command = ["install"] - - available_options = ('proxy', 'src', 'log', ) - for option in parse_options(options, available_options): - command.append(option) - - command.append("-r {0}".format(requirements)) - if constraints: - command.append("-c {0}".format(constraints)) - log("Installing from file: {} with constraints {} " - "and options: {}".format(requirements, constraints, command)) - else: - log("Installing from file: {} with options: {}".format(requirements, - command)) - pip_execute(command) - - -def pip_install(package, fatal=False, upgrade=False, venv=None, - constraints=None, **options): - """Install a python package""" - if venv: - venv_python = os.path.join(venv, 'bin/pip') - command = [venv_python, "install"] - else: - command = ["install"] - - available_options = ('proxy', 'src', 'log', 'index-url', ) - for option in parse_options(options, available_options): - command.append(option) - - if upgrade: - command.append('--upgrade') - - if constraints: - command.extend(['-c', constraints]) - - if isinstance(package, list): - command.extend(package) - else: - command.append(package) - - log("Installing {} package with options: {}".format(package, - command)) - if venv: - subprocess.check_call(command) - else: - pip_execute(command) - - -def pip_uninstall(package, **options): - """Uninstall a python package""" - command = ["uninstall", "-q", "-y"] - - available_options = ('proxy', 'log', ) - for option in parse_options(options, available_options): - command.append(option) - - if isinstance(package, list): - command.extend(package) - else: - command.append(package) - - log("Uninstalling {} package with options: {}".format(package, - command)) - pip_execute(command) - - -def pip_list(): - """Returns the list of current python installed packages - """ - return pip_execute(["list"]) - - -def pip_create_virtualenv(path=None): - """Create an isolated Python environment.""" - if six.PY2: - apt_install('python-virtualenv') - else: - apt_install('python3-virtualenv') - - if path: - venv_path = path - else: - venv_path = os.path.join(charm_dir(), 'venv') - - if not os.path.exists(venv_path): - subprocess.check_call(['virtualenv', venv_path]) diff --git a/hooks/charmhelpers/contrib/storage/__init__.py b/hooks/charmhelpers/contrib/storage/__init__.py deleted file mode 100644 index d7567b8..0000000 --- a/hooks/charmhelpers/contrib/storage/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/storage/linux/__init__.py b/hooks/charmhelpers/contrib/storage/linux/__init__.py deleted file mode 100644 index d7567b8..0000000 --- a/hooks/charmhelpers/contrib/storage/linux/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/storage/linux/bcache.py b/hooks/charmhelpers/contrib/storage/linux/bcache.py deleted file mode 100644 index 605991e..0000000 --- a/hooks/charmhelpers/contrib/storage/linux/bcache.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2017 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os -import json - -from charmhelpers.core.hookenv import log - -stats_intervals = ['stats_day', 'stats_five_minute', - 'stats_hour', 'stats_total'] - -SYSFS = '/sys' - - -class Bcache(object): - """Bcache behaviour - """ - - def __init__(self, cachepath): - self.cachepath = cachepath - - @classmethod - def fromdevice(cls, devname): - return cls('{}/block/{}/bcache'.format(SYSFS, devname)) - - def __str__(self): - return self.cachepath - - def get_stats(self, interval): - """Get cache stats - """ - intervaldir = 'stats_{}'.format(interval) - path = "{}/{}".format(self.cachepath, intervaldir) - out = dict() - for elem in os.listdir(path): - out[elem] = open('{}/{}'.format(path, elem)).read().strip() - return out - - -def get_bcache_fs(): - """Return all cache sets - """ - cachesetroot = "{}/fs/bcache".format(SYSFS) - try: - dirs = os.listdir(cachesetroot) - except OSError: - log("No bcache fs found") - return [] - cacheset = set([Bcache('{}/{}'.format(cachesetroot, d)) for d in dirs if not d.startswith('register')]) - return cacheset - - -def get_stats_action(cachespec, interval): - """Action for getting bcache statistics for a given cachespec. - Cachespec can either be a device name, eg. 'sdb', which will retrieve - cache stats for the given device, or 'global', which will retrieve stats - for all cachesets - """ - if cachespec == 'global': - caches = get_bcache_fs() - else: - caches = [Bcache.fromdevice(cachespec)] - res = dict((c.cachepath, c.get_stats(interval)) for c in caches) - return json.dumps(res, indent=4, separators=(',', ': ')) diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py deleted file mode 100644 index 7682820..0000000 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ /dev/null @@ -1,1472 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2012 Canonical Ltd. -# -# This file is sourced from lp:openstack-charm-helpers -# -# Authors: -# James Page -# Adam Gandelman -# - -import errno -import hashlib -import math -import six - -import os -import shutil -import json -import time -import uuid - -from subprocess import ( - check_call, - check_output, - CalledProcessError, -) -from charmhelpers.core.hookenv import ( - config, - service_name, - local_unit, - relation_get, - relation_ids, - relation_set, - related_units, - log, - DEBUG, - INFO, - WARNING, - ERROR, -) -from charmhelpers.core.host import ( - mount, - mounts, - service_start, - service_stop, - service_running, - umount, -) -from charmhelpers.fetch import ( - apt_install, -) -from charmhelpers.core.unitdata import kv - -from charmhelpers.core.kernel import modprobe -from charmhelpers.contrib.openstack.utils import config_flags_parser - -KEYRING = '/etc/ceph/ceph.client.{}.keyring' -KEYFILE = '/etc/ceph/ceph.client.{}.key' - -CEPH_CONF = """[global] -auth supported = {auth} -keyring = {keyring} -mon host = {mon_hosts} -log to syslog = {use_syslog} -err to syslog = {use_syslog} -clog to syslog = {use_syslog} -""" - -# The number of placement groups per OSD to target for placement group -# calculations. This number is chosen as 100 due to the ceph PG Calc -# documentation recommending to choose 100 for clusters which are not -# expected to increase in the foreseeable future. Since the majority of the -# calculations are done on deployment, target the case of non-expanding -# clusters as the default. -DEFAULT_PGS_PER_OSD_TARGET = 100 -DEFAULT_POOL_WEIGHT = 10.0 -LEGACY_PG_COUNT = 200 -DEFAULT_MINIMUM_PGS = 2 - - -def validator(value, valid_type, valid_range=None): - """ - Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values - Example input: - validator(value=1, - valid_type=int, - valid_range=[0, 2]) - This says I'm testing value=1. It must be an int inclusive in [0,2] - - :param value: The value to validate - :param valid_type: The type that value should be. - :param valid_range: A range of values that value can assume. - :return: - """ - assert isinstance(value, valid_type), "{} is not a {}".format( - value, - valid_type) - if valid_range is not None: - assert isinstance(valid_range, list), \ - "valid_range must be a list, was given {}".format(valid_range) - # If we're dealing with strings - if isinstance(value, six.string_types): - assert value in valid_range, \ - "{} is not in the list {}".format(value, valid_range) - # Integer, float should have a min and max - else: - if len(valid_range) != 2: - raise ValueError( - "Invalid valid_range list of {} for {}. " - "List must be [min,max]".format(valid_range, value)) - assert value >= valid_range[0], \ - "{} is less than minimum allowed value of {}".format( - value, valid_range[0]) - assert value <= valid_range[1], \ - "{} is greater than maximum allowed value of {}".format( - value, valid_range[1]) - - -class PoolCreationError(Exception): - """ - A custom error to inform the caller that a pool creation failed. Provides an error message - """ - - def __init__(self, message): - super(PoolCreationError, self).__init__(message) - - -class Pool(object): - """ - An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. - Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). - """ - - def __init__(self, service, name): - self.service = service - self.name = name - - # Create the pool if it doesn't exist already - # To be implemented by subclasses - def create(self): - pass - - def add_cache_tier(self, cache_pool, mode): - """ - Adds a new cache tier to an existing pool. - :param cache_pool: six.string_types. The cache tier pool name to add. - :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] - :return: None - """ - # Check the input types and values - validator(value=cache_pool, valid_type=six.string_types) - validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) - - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) - check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) - - def remove_cache_tier(self, cache_pool): - """ - Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. - :param cache_pool: six.string_types. The cache tier pool name to remove. - :return: None - """ - # read-only is easy, writeback is much harder - mode = get_cache_mode(self.service, cache_pool) - version = ceph_version() - if mode == 'readonly': - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) - - elif mode == 'writeback': - pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', - 'cache-mode', cache_pool, 'forward'] - if version >= '10.1': - # Jewel added a mandatory flag - pool_forward_cmd.append('--yes-i-really-mean-it') - - check_call(pool_forward_cmd) - # Flush the cache and wait for it to return - check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) - check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) - - def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): - """Return the number of placement groups to use when creating the pool. - - Returns the number of placement groups which should be specified when - creating the pool. This is based upon the calculation guidelines - provided by the Ceph Placement Group Calculator (located online at - http://ceph.com/pgcalc/). - - The number of placement groups are calculated using the following: - - (Target PGs per OSD) * (OSD #) * (%Data) - ---------------------------------------- - (Pool size) - - Per the upstream guidelines, the OSD # should really be considered - based on the number of OSDs which are eligible to be selected by the - pool. Since the pool creation doesn't specify any of CRUSH set rules, - the default rule will be dependent upon the type of pool being - created (replicated or erasure). - - This code makes no attempt to determine the number of OSDs which can be - selected for the specific rule, rather it is left to the user to tune - in the form of 'expected-osd-count' config option. - - :param pool_size: int. pool_size is either the number of replicas for - replicated pools or the K+M sum for erasure coded pools - :param percent_data: float. the percentage of data that is expected to - be contained in the pool for the specific OSD set. Default value - is to assume 10% of the data is for this pool, which is a - relatively low % of the data but allows for the pg_num to be - increased. NOTE: the default is primarily to handle the scenario - where related charms requiring pools has not been upgraded to - include an update to indicate their relative usage of the pools. - :return: int. The number of pgs to use. - """ - - # Note: This calculation follows the approach that is provided - # by the Ceph PG Calculator located at http://ceph.com/pgcalc/. - validator(value=pool_size, valid_type=int) - - # Ensure that percent data is set to something - even with a default - # it can be set to None, which would wreak havoc below. - if percent_data is None: - percent_data = DEFAULT_POOL_WEIGHT - - # If the expected-osd-count is specified, then use the max between - # the expected-osd-count and the actual osd_count - osd_list = get_osds(self.service) - expected = config('expected-osd-count') or 0 - - if osd_list: - osd_count = max(expected, len(osd_list)) - - # Log a message to provide some insight if the calculations claim - # to be off because someone is setting the expected count and - # there are more OSDs in reality. Try to make a proper guess - # based upon the cluster itself. - if expected and osd_count != expected: - log("Found more OSDs than provided expected count. " - "Using the actual count instead", INFO) - elif expected: - # Use the expected-osd-count in older ceph versions to allow for - # a more accurate pg calculations - osd_count = expected - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - return LEGACY_PG_COUNT - - percent_data /= 100.0 - target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET - num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size - - # NOTE: ensure a sane minimum number of PGS otherwise we don't get any - # reasonable data distribution in minimal OSD configurations - if num_pg < DEFAULT_MINIMUM_PGS: - num_pg = DEFAULT_MINIMUM_PGS - - # The CRUSH algorithm has a slight optimization for placement groups - # with powers of 2 so find the nearest power of 2. If the nearest - # power of 2 is more than 25% below the original value, the next - # highest value is used. To do this, find the nearest power of 2 such - # that 2^n <= num_pg, check to see if its within the 25% tolerance. - exponent = math.floor(math.log(num_pg, 2)) - nearest = 2 ** exponent - if (num_pg - nearest) > (num_pg * 0.25): - # Choose the next highest power of 2 since the nearest is more - # than 25% below the original value. - return int(nearest * 2) - else: - return int(nearest) - - -class ReplicatedPool(Pool): - def __init__(self, service, name, pg_num=None, replicas=2, - percent_data=10.0, app_name=None): - super(ReplicatedPool, self).__init__(service=service, name=name) - self.replicas = replicas - if pg_num: - # Since the number of placement groups were specified, ensure - # that there aren't too many created. - max_pgs = self.get_pgs(self.replicas, 100.0) - self.pg_num = min(pg_num, max_pgs) - else: - self.pg_num = self.get_pgs(self.replicas, percent_data) - if app_name: - self.app_name = app_name - else: - self.app_name = 'unknown' - - def create(self): - if not pool_exists(self.service, self.name): - # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num)] - try: - check_call(cmd) - # Set the pool replica size - update_pool(client=self.service, - pool=self.name, - settings={'size': str(self.replicas)}) - try: - set_app_name_for_pool(client=self.service, - pool=self.name, - name=self.app_name) - except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name, level=WARNING)) - except CalledProcessError: - raise - - -# Default jerasure erasure coded pool -class ErasurePool(Pool): - def __init__(self, service, name, erasure_code_profile="default", - percent_data=10.0, app_name=None): - super(ErasurePool, self).__init__(service=service, name=name) - self.erasure_code_profile = erasure_code_profile - self.percent_data = percent_data - if app_name: - self.app_name = app_name - else: - self.app_name = 'unknown' - - def create(self): - if not pool_exists(self.service, self.name): - # Try to find the erasure profile information in order to properly - # size the number of placement groups. The size of an erasure - # coded placement group is calculated as k+m. - erasure_profile = get_erasure_profile(self.service, - self.erasure_code_profile) - - # Check for errors - if erasure_profile is None: - msg = ("Failed to discover erasure profile named " - "{}".format(self.erasure_code_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - if 'k' not in erasure_profile or 'm' not in erasure_profile: - # Error - msg = ("Unable to find k (data chunks) or m (coding chunks) " - "in erasure profile {}".format(erasure_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - - k = int(erasure_profile['k']) - m = int(erasure_profile['m']) - pgs = self.get_pgs(k + m, self.percent_data) - # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile] - try: - check_call(cmd) - try: - set_app_name_for_pool(client=self.service, - pool=self.name, - name=self.app_name) - except CalledProcessError: - log('Could not set app name for pool {}'.format(self.name, level=WARNING)) - except CalledProcessError: - raise - - """Get an existing erasure code profile if it already exists. - Returns json formatted output""" - - -def get_mon_map(service): - """ - Returns the current monitor map. - :param service: six.string_types. The Ceph user name to run the command under - :return: json string. :raise: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails - """ - try: - mon_status = check_output(['ceph', '--id', service, - 'mon_status', '--format=json']) - if six.PY3: - mon_status = mon_status.decode('UTF-8') - try: - return json.loads(mon_status) - except ValueError as v: - log("Unable to parse mon_status json: {}. Error: {}" - .format(mon_status, str(v))) - raise - except CalledProcessError as e: - log("mon_status command failed with message: {}" - .format(str(e))) - raise - - -def hash_monitor_names(service): - """ - Uses the get_mon_map() function to get information about the monitor - cluster. - Hash the name of each monitor. Return a sorted list of monitor hashes - in an ascending order. - :param service: six.string_types. The Ceph user name to run the command under - :rtype : dict. json dict of monitor name, ip address and rank - example: { - 'name': 'ip-172-31-13-165', - 'rank': 0, - 'addr': '172.31.13.165:6789/0'} - """ - try: - hash_list = [] - monitor_list = get_mon_map(service=service) - if monitor_list['monmap']['mons']: - for mon in monitor_list['monmap']['mons']: - hash_list.append( - hashlib.sha224(mon['name'].encode('utf-8')).hexdigest()) - return sorted(hash_list) - else: - return None - except (ValueError, CalledProcessError): - raise - - -def monitor_key_delete(service, key): - """ - Delete a key and value pair from the monitor cluster - :param service: six.string_types. The Ceph user name to run the command under - Deletes a key value pair on the monitor cluster. - :param key: six.string_types. The key to delete. - """ - try: - check_output( - ['ceph', '--id', service, - 'config-key', 'del', str(key)]) - except CalledProcessError as e: - log("Monitor config-key put failed with message: {}".format( - e.output)) - raise - - -def monitor_key_set(service, key, value): - """ - Sets a key value pair on the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to set. - :param value: The value to set. This will be converted to a string - before setting - """ - try: - check_output( - ['ceph', '--id', service, - 'config-key', 'put', str(key), str(value)]) - except CalledProcessError as e: - log("Monitor config-key put failed with message: {}".format( - e.output)) - raise - - -def monitor_key_get(service, key): - """ - Gets the value of an existing key in the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to search for. - :return: Returns the value of that key or None if not found. - """ - try: - output = check_output( - ['ceph', '--id', service, - 'config-key', 'get', str(key)]).decode('UTF-8') - return output - except CalledProcessError as e: - log("Monitor config-key get failed with message: {}".format( - e.output)) - return None - - -def monitor_key_exists(service, key): - """ - Searches for the existence of a key in the monitor cluster. - :param service: six.string_types. The Ceph user name to run the command under - :param key: six.string_types. The key to search for - :return: Returns True if the key exists, False if not and raises an - exception if an unknown error occurs. :raise: CalledProcessError if - an unknown error occurs - """ - try: - check_call( - ['ceph', '--id', service, - 'config-key', 'exists', str(key)]) - # I can return true here regardless because Ceph returns - # ENOENT if the key wasn't found - return True - except CalledProcessError as e: - if e.returncode == errno.ENOENT: - return False - else: - log("Unknown error from ceph config-get exists: {} {}".format( - e.returncode, e.output)) - raise - - -def get_erasure_profile(service, name): - """ - :param service: six.string_types. The Ceph user name to run the command under - :param name: - :return: - """ - try: - out = check_output(['ceph', '--id', service, - 'osd', 'erasure-code-profile', 'get', - name, '--format=json']) - if six.PY3: - out = out.decode('UTF-8') - return json.loads(out) - except (CalledProcessError, OSError, ValueError): - return None - - -def pool_set(service, pool_name, key, value): - """ - Sets a value for a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param key: six.string_types - :param value: - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, - str(value).lower()] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def snapshot_pool(service, pool_name, snapshot_name): - """ - Snapshots a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param snapshot_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def remove_pool_snapshot(service, pool_name, snapshot_name): - """ - Remove a snapshot from a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param snapshot_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] - try: - check_call(cmd) - except CalledProcessError: - raise - - -# max_bytes should be an int or long -def set_pool_quota(service, pool_name, max_bytes): - """ - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param max_bytes: int or long - :return: None. Can raise CalledProcessError - """ - # Set a byte quota on a RADOS pool in ceph. - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, - 'max_bytes', str(max_bytes)] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def remove_pool_quota(service, pool_name): - """ - Set a byte quota on a RADOS pool in ceph. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def remove_erasure_profile(service, profile_name): - """ - Create a new erasure code profile if one does not already exist for it. Updates - the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ - for more details - :param service: six.string_types. The Ceph user name to run the command under - :param profile_name: six.string_types - :return: None. Can raise CalledProcessError - """ - cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', - profile_name] - try: - check_call(cmd) - except CalledProcessError: - raise - - -def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', - failure_domain='host', - data_chunks=2, coding_chunks=1, - locality=None, durability_estimator=None): - """ - Create a new erasure code profile if one does not already exist for it. Updates - the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ - for more details - :param service: six.string_types. The Ceph user name to run the command under - :param profile_name: six.string_types - :param erasure_plugin_name: six.string_types - :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', - 'room', 'root', 'row']) - :param data_chunks: int - :param coding_chunks: int - :param locality: int - :param durability_estimator: int - :return: None. Can raise CalledProcessError - """ - version = ceph_version() - - # Ensure this failure_domain is allowed by Ceph - validator(failure_domain, six.string_types, - ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) - - cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, - 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks) - ] - if locality is not None and durability_estimator is not None: - raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") - - # failure_domain changed in luminous - if version and version >= '12.0.0': - cmd.append('crush-failure-domain=' + failure_domain) - else: - cmd.append('ruleset-failure-domain=' + failure_domain) - - # Add plugin specific information - if locality is not None: - # For local erasure codes - cmd.append('l=' + str(locality)) - if durability_estimator is not None: - # For Shec erasure codes - cmd.append('c=' + str(durability_estimator)) - - if erasure_profile_exists(service, profile_name): - cmd.append('--force') - - try: - check_call(cmd) - except CalledProcessError: - raise - - -def rename_pool(service, old_name, new_name): - """ - Rename a Ceph pool from old_name to new_name - :param service: six.string_types. The Ceph user name to run the command under - :param old_name: six.string_types - :param new_name: six.string_types - :return: None - """ - validator(value=old_name, valid_type=six.string_types) - validator(value=new_name, valid_type=six.string_types) - - cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] - check_call(cmd) - - -def erasure_profile_exists(service, name): - """ - Check to see if an Erasure code profile already exists. - :param service: six.string_types. The Ceph user name to run the command under - :param name: six.string_types - :return: int or None - """ - validator(value=name, valid_type=six.string_types) - try: - check_call(['ceph', '--id', service, - 'osd', 'erasure-code-profile', 'get', - name]) - return True - except CalledProcessError: - return False - - -def get_cache_mode(service, pool_name): - """ - Find the current caching mode of the pool_name given. - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :return: int or None - """ - validator(value=service, valid_type=six.string_types) - validator(value=pool_name, valid_type=six.string_types) - out = check_output(['ceph', '--id', service, - 'osd', 'dump', '--format=json']) - if six.PY3: - out = out.decode('UTF-8') - try: - osd_json = json.loads(out) - for pool in osd_json['pools']: - if pool['pool_name'] == pool_name: - return pool['cache_mode'] - return None - except ValueError: - raise - - -def pool_exists(service, name): - """Check to see if a RADOS pool already exists.""" - try: - out = check_output(['rados', '--id', service, 'lspools']) - if six.PY3: - out = out.decode('UTF-8') - except CalledProcessError: - return False - - return name in out.split() - - -def get_osds(service): - """Return a list of all Ceph Object Storage Daemons currently in the - cluster. - """ - version = ceph_version() - if version and version >= '0.56': - out = check_output(['ceph', '--id', service, - 'osd', 'ls', - '--format=json']) - if six.PY3: - out = out.decode('UTF-8') - return json.loads(out) - - return None - - -def install(): - """Basic Ceph client installation.""" - ceph_dir = "/etc/ceph" - if not os.path.exists(ceph_dir): - os.mkdir(ceph_dir) - - apt_install('ceph-common', fatal=True) - - -def rbd_exists(service, pool, rbd_img): - """Check to see if a RADOS block device exists.""" - try: - out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]) - if six.PY3: - out = out.decode('UTF-8') - except CalledProcessError: - return False - - return rbd_img in out - - -def create_rbd_image(service, pool, image, sizemb): - """Create a new RADOS block device.""" - cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, - '--pool', pool] - check_call(cmd) - - -def update_pool(client, pool, settings): - cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] - for k, v in six.iteritems(settings): - cmd.append(k) - cmd.append(v) - - check_call(cmd) - - -def set_app_name_for_pool(client, pool, name): - """ - Calls `osd pool application enable` for the specified pool name - - :param client: Name of the ceph client to use - :type client: str - :param pool: Pool to set app name for - :type pool: str - :param name: app name for the specified pool - :type name: str - - :raises: CalledProcessError if ceph call fails - """ - if ceph_version() >= '12.0.0': - cmd = ['ceph', '--id', client, 'osd', 'pool', - 'application', 'enable', pool, name] - check_call(cmd) - - -def create_pool(service, name, replicas=3, pg_num=None): - """Create a new RADOS pool.""" - if pool_exists(service, name): - log("Ceph pool {} already exists, skipping creation".format(name), - level=WARNING) - return - - if not pg_num: - # Calculate the number of placement groups based - # on upstream recommended best practices. - osds = get_osds(service) - if osds: - pg_num = (len(osds) * 100 // replicas) - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - pg_num = 200 - - cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] - check_call(cmd) - - update_pool(service, name, settings={'size': str(replicas)}) - - -def delete_pool(service, name): - """Delete a RADOS pool from ceph.""" - cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, - '--yes-i-really-really-mean-it'] - check_call(cmd) - - -def _keyfile_path(service): - return KEYFILE.format(service) - - -def _keyring_path(service): - return KEYRING.format(service) - - -def create_keyring(service, key): - """Create a new Ceph keyring containing key.""" - keyring = _keyring_path(service) - if os.path.exists(keyring): - log('Ceph keyring exists at %s.' % keyring, level=WARNING) - return - - cmd = ['ceph-authtool', keyring, '--create-keyring', - '--name=client.{}'.format(service), '--add-key={}'.format(key)] - check_call(cmd) - log('Created new ceph keyring at %s.' % keyring, level=DEBUG) - - -def delete_keyring(service): - """Delete an existing Ceph keyring.""" - keyring = _keyring_path(service) - if not os.path.exists(keyring): - log('Keyring does not exist at %s' % keyring, level=WARNING) - return - - os.remove(keyring) - log('Deleted ring at %s.' % keyring, level=INFO) - - -def create_key_file(service, key): - """Create a file containing key.""" - keyfile = _keyfile_path(service) - if os.path.exists(keyfile): - log('Keyfile exists at %s.' % keyfile, level=WARNING) - return - - with open(keyfile, 'w') as fd: - fd.write(key) - - log('Created new keyfile at %s.' % keyfile, level=INFO) - - -def get_ceph_nodes(relation='ceph'): - """Query named relation to determine current nodes.""" - hosts = [] - for r_id in relation_ids(relation): - for unit in related_units(r_id): - hosts.append(relation_get('private-address', unit=unit, rid=r_id)) - - return hosts - - -def configure(service, key, auth, use_syslog): - """Perform basic configuration of Ceph.""" - create_keyring(service, key) - create_key_file(service, key) - hosts = get_ceph_nodes() - with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: - ceph_conf.write(CEPH_CONF.format(auth=auth, - keyring=_keyring_path(service), - mon_hosts=",".join(map(str, hosts)), - use_syslog=use_syslog)) - modprobe('rbd') - - -def image_mapped(name): - """Determine whether a RADOS block device is mapped locally.""" - try: - out = check_output(['rbd', 'showmapped']) - if six.PY3: - out = out.decode('UTF-8') - except CalledProcessError: - return False - - return name in out - - -def map_block_storage(service, pool, image): - """Map a RADOS block device for local use.""" - cmd = [ - 'rbd', - 'map', - '{}/{}'.format(pool, image), - '--user', - service, - '--secret', - _keyfile_path(service), - ] - check_call(cmd) - - -def filesystem_mounted(fs): - """Determine whether a filesytems is already mounted.""" - return fs in [f for f, m in mounts()] - - -def make_filesystem(blk_device, fstype='ext4', timeout=10): - """Make a new filesystem on the specified block device.""" - count = 0 - e_noent = os.errno.ENOENT - while not os.path.exists(blk_device): - if count >= timeout: - log('Gave up waiting on block device %s' % blk_device, - level=ERROR) - raise IOError(e_noent, os.strerror(e_noent), blk_device) - - log('Waiting for block device %s to appear' % blk_device, - level=DEBUG) - count += 1 - time.sleep(1) - else: - log('Formatting block device %s as filesystem %s.' % - (blk_device, fstype), level=INFO) - check_call(['mkfs', '-t', fstype, blk_device]) - - -def place_data_on_block_device(blk_device, data_src_dst): - """Migrate data in data_src_dst to blk_device and then remount.""" - # mount block device into /mnt - mount(blk_device, '/mnt') - # copy data to /mnt - copy_files(data_src_dst, '/mnt') - # umount block device - umount('/mnt') - # Grab user/group ID's from original source - _dir = os.stat(data_src_dst) - uid = _dir.st_uid - gid = _dir.st_gid - # re-mount where the data should originally be - # TODO: persist is currently a NO-OP in core.host - mount(blk_device, data_src_dst, persist=True) - # ensure original ownership of new mount. - os.chown(data_src_dst, uid, gid) - - -def copy_files(src, dst, symlinks=False, ignore=None): - """Copy files from src to dst.""" - for item in os.listdir(src): - s = os.path.join(src, item) - d = os.path.join(dst, item) - if os.path.isdir(s): - shutil.copytree(s, d, symlinks, ignore) - else: - shutil.copy2(s, d) - - -def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, - blk_device, fstype, system_services=[], - replicas=3): - """NOTE: This function must only be called from a single service unit for - the same rbd_img otherwise data loss will occur. - - Ensures given pool and RBD image exists, is mapped to a block device, - and the device is formatted and mounted at the given mount_point. - - If formatting a device for the first time, data existing at mount_point - will be migrated to the RBD device before being re-mounted. - - All services listed in system_services will be stopped prior to data - migration and restarted when complete. - """ - # Ensure pool, RBD image, RBD mappings are in place. - if not pool_exists(service, pool): - log('Creating new pool {}.'.format(pool), level=INFO) - create_pool(service, pool, replicas=replicas) - - if not rbd_exists(service, pool, rbd_img): - log('Creating RBD image ({}).'.format(rbd_img), level=INFO) - create_rbd_image(service, pool, rbd_img, sizemb) - - if not image_mapped(rbd_img): - log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), - level=INFO) - map_block_storage(service, pool, rbd_img) - - # make file system - # TODO: What happens if for whatever reason this is run again and - # the data is already in the rbd device and/or is mounted?? - # When it is mounted already, it will fail to make the fs - # XXX: This is really sketchy! Need to at least add an fstab entry - # otherwise this hook will blow away existing data if its executed - # after a reboot. - if not filesystem_mounted(mount_point): - make_filesystem(blk_device, fstype) - - for svc in system_services: - if service_running(svc): - log('Stopping services {} prior to migrating data.' - .format(svc), level=DEBUG) - service_stop(svc) - - place_data_on_block_device(blk_device, mount_point) - - for svc in system_services: - log('Starting service {} after migrating data.' - .format(svc), level=DEBUG) - service_start(svc) - - -def ensure_ceph_keyring(service, user=None, group=None, - relation='ceph', key=None): - """Ensures a ceph keyring is created for a named service and optionally - ensures user and group ownership. - - @returns boolean: Flag to indicate whether a key was successfully written - to disk based on either relation data or a supplied key - """ - if not key: - for rid in relation_ids(relation): - for unit in related_units(rid): - key = relation_get('key', rid=rid, unit=unit) - if key: - break - - if not key: - return False - - create_keyring(service=service, key=key) - keyring = _keyring_path(service) - if user and group: - check_call(['chown', '%s.%s' % (user, group), keyring]) - - return True - - -def ceph_version(): - """Retrieve the local version of ceph.""" - if os.path.exists('/usr/bin/ceph'): - cmd = ['ceph', '-v'] - output = check_output(cmd) - if six.PY3: - output = output.decode('UTF-8') - output = output.split() - if len(output) > 3: - return output[2] - else: - return None - else: - return None - - -class CephBrokerRq(object): - """Ceph broker request. - - Multiple operations can be added to a request and sent to the Ceph broker - to be executed. - - Request is json-encoded for sending over the wire. - - The API is versioned and defaults to version 1. - """ - - def __init__(self, api_version=1, request_id=None): - self.api_version = api_version - if request_id: - self.request_id = request_id - else: - self.request_id = str(uuid.uuid1()) - self.ops = [] - - def add_op_request_access_to_group(self, name, namespace=None, - permission=None, key_name=None, - object_prefix_permissions=None): - """ - Adds the requested permissions to the current service's Ceph key, - allowing the key to access only the specified pools or - object prefixes. object_prefix_permissions should be a dictionary - keyed on the permission with the corresponding value being a list - of prefixes to apply that permission to. - { - 'rwx': ['prefix1', 'prefix2'], - 'class-read': ['prefix3']} - """ - self.ops.append({ - 'op': 'add-permissions-to-key', 'group': name, - 'namespace': namespace, - 'name': key_name or service_name(), - 'group-permission': permission, - 'object-prefix-permissions': object_prefix_permissions}) - - def add_op_create_pool(self, name, replica_count=3, pg_num=None, - weight=None, group=None, namespace=None): - """Adds an operation to create a pool. - - @param pg_num setting: optional setting. If not provided, this value - will be calculated by the broker based on how many OSDs are in the - cluster at the time of creation. Note that, if provided, this value - will be capped at the current available maximum. - @param weight: the percentage of data the pool makes up - """ - if pg_num and weight: - raise ValueError('pg_num and weight are mutually exclusive') - - self.ops.append({'op': 'create-pool', 'name': name, - 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight, 'group': group, - 'group-namespace': namespace}) - - def set_ops(self, ops): - """Set request ops to provided value. - - Useful for injecting ops that come from a previous request - to allow comparisons to ensure validity. - """ - self.ops = ops - - @property - def request(self): - return json.dumps({'api-version': self.api_version, 'ops': self.ops, - 'request-id': self.request_id}) - - def _ops_equal(self, other): - if len(self.ops) == len(other.ops): - for req_no in range(0, len(self.ops)): - for key in [ - 'replicas', 'name', 'op', 'pg_num', 'weight', - 'group', 'group-namespace', 'group-permission', - 'object-prefix-permissions']: - if self.ops[req_no].get(key) != other.ops[req_no].get(key): - return False - else: - return False - return True - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - if self.api_version == other.api_version and \ - self._ops_equal(other): - return True - else: - return False - - def __ne__(self, other): - return not self.__eq__(other) - - -class CephBrokerRsp(object): - """Ceph broker response. - - Response is json-decoded and contents provided as methods/properties. - - The API is versioned and defaults to version 1. - """ - - def __init__(self, encoded_rsp): - self.api_version = None - self.rsp = json.loads(encoded_rsp) - - @property - def request_id(self): - return self.rsp.get('request-id') - - @property - def exit_code(self): - return self.rsp.get('exit-code') - - @property - def exit_msg(self): - return self.rsp.get('stderr') - - -# Ceph Broker Conversation: -# If a charm needs an action to be taken by ceph it can create a CephBrokerRq -# and send that request to ceph via the ceph relation. The CephBrokerRq has a -# unique id so that the client can identity which CephBrokerRsp is associated -# with the request. Ceph will also respond to each client unit individually -# creating a response key per client unit eg glance/0 will get a CephBrokerRsp -# via key broker-rsp-glance-0 -# -# To use this the charm can just do something like: -# -# from charmhelpers.contrib.storage.linux.ceph import ( -# send_request_if_needed, -# is_request_complete, -# CephBrokerRq, -# ) -# -# @hooks.hook('ceph-relation-changed') -# def ceph_changed(): -# rq = CephBrokerRq() -# rq.add_op_create_pool(name='poolname', replica_count=3) -# -# if is_request_complete(rq): -# -# else: -# send_request_if_needed(get_ceph_request()) -# -# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example -# of glance having sent a request to ceph which ceph has successfully processed -# 'ceph:8': { -# 'ceph/0': { -# 'auth': 'cephx', -# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', -# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', -# 'ceph-public-address': '10.5.44.103', -# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', -# 'private-address': '10.5.44.103', -# }, -# 'glance/0': { -# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' -# '"ops": [{"replicas": 3, "name": "glance", ' -# '"op": "create-pool"}]}'), -# 'private-address': '10.5.44.109', -# }, -# } - -def get_previous_request(rid): - """Return the last ceph broker request sent on a given relation - - @param rid: Relation id to query for request - """ - request = None - broker_req = relation_get(attribute='broker_req', rid=rid, - unit=local_unit()) - if broker_req: - request_data = json.loads(broker_req) - request = CephBrokerRq(api_version=request_data['api-version'], - request_id=request_data['request-id']) - request.set_ops(request_data['ops']) - - return request - - -def get_request_states(request, relation='ceph'): - """Return a dict of requests per relation id with their corresponding - completion state. - - This allows a charm, which has a request for ceph, to see whether there is - an equivalent request already being processed and if so what state that - request is in. - - @param request: A CephBrokerRq object - """ - complete = [] - requests = {} - for rid in relation_ids(relation): - complete = False - previous_request = get_previous_request(rid) - if request == previous_request: - sent = True - complete = is_request_complete_for_rid(previous_request, rid) - else: - sent = False - complete = False - - requests[rid] = { - 'sent': sent, - 'complete': complete, - } - - return requests - - -def is_request_sent(request, relation='ceph'): - """Check to see if a functionally equivalent request has already been sent - - Returns True if a similair request has been sent - - @param request: A CephBrokerRq object - """ - states = get_request_states(request, relation=relation) - for rid in states.keys(): - if not states[rid]['sent']: - return False - - return True - - -def is_request_complete(request, relation='ceph'): - """Check to see if a functionally equivalent request has already been - completed - - Returns True if a similair request has been completed - - @param request: A CephBrokerRq object - """ - states = get_request_states(request, relation=relation) - for rid in states.keys(): - if not states[rid]['complete']: - return False - - return True - - -def is_request_complete_for_rid(request, rid): - """Check if a given request has been completed on the given relation - - @param request: A CephBrokerRq object - @param rid: Relation ID - """ - broker_key = get_broker_rsp_key() - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - if rdata.get(broker_key): - rsp = CephBrokerRsp(rdata.get(broker_key)) - if rsp.request_id == request.request_id: - if not rsp.exit_code: - return True - else: - # The remote unit sent no reply targeted at this unit so either the - # remote ceph cluster does not support unit targeted replies or it - # has not processed our request yet. - if rdata.get('broker_rsp'): - request_data = json.loads(rdata['broker_rsp']) - if request_data.get('request-id'): - log('Ignoring legacy broker_rsp without unit key as remote ' - 'service supports unit specific replies', level=DEBUG) - else: - log('Using legacy broker_rsp as remote service does not ' - 'supports unit specific replies', level=DEBUG) - rsp = CephBrokerRsp(rdata['broker_rsp']) - if not rsp.exit_code: - return True - - return False - - -def get_broker_rsp_key(): - """Return broker response key for this unit - - This is the key that ceph is going to use to pass request status - information back to this unit - """ - return 'broker-rsp-' + local_unit().replace('/', '-') - - -def send_request_if_needed(request, relation='ceph'): - """Send broker request if an equivalent request has not already been sent - - @param request: A CephBrokerRq object - """ - if is_request_sent(request, relation=relation): - log('Request already sent but not complete, not sending new request', - level=DEBUG) - else: - for rid in relation_ids(relation): - log('Sending request {}'.format(request.request_id), level=DEBUG) - relation_set(relation_id=rid, broker_req=request.request) - - -def is_broker_action_done(action, rid=None, unit=None): - """Check whether broker action has completed yet. - - @param action: name of action to be performed - @returns True if action complete otherwise False - """ - rdata = relation_get(rid, unit) or {} - broker_rsp = rdata.get(get_broker_rsp_key()) - if not broker_rsp: - return False - - rsp = CephBrokerRsp(broker_rsp) - unit_name = local_unit().partition('/')[2] - key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) - kvstore = kv() - val = kvstore.get(key=key) - if val and val == rsp.request_id: - return True - - return False - - -def mark_broker_action_done(action, rid=None, unit=None): - """Mark action as having been completed. - - @param action: name of action to be performed - @returns None - """ - rdata = relation_get(rid, unit) or {} - broker_rsp = rdata.get(get_broker_rsp_key()) - if not broker_rsp: - return - - rsp = CephBrokerRsp(broker_rsp) - unit_name = local_unit().partition('/')[2] - key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) - kvstore = kv() - kvstore.set(key=key, value=rsp.request_id) - kvstore.flush() - - -class CephConfContext(object): - """Ceph config (ceph.conf) context. - - Supports user-provided Ceph configuration settings. Use can provide a - dictionary as the value for the config-flags charm option containing - Ceph configuration settings keyede by their section in ceph.conf. - """ - def __init__(self, permitted_sections=None): - self.permitted_sections = permitted_sections or [] - - def __call__(self): - conf = config('config-flags') - if not conf: - return {} - - conf = config_flags_parser(conf) - if not isinstance(conf, dict): - log("Provided config-flags is not a dictionary - ignoring", - level=WARNING) - return {} - - permitted = self.permitted_sections - if permitted: - diff = set(conf.keys()).difference(set(permitted)) - if diff: - log("Config-flags contains invalid keys '%s' - they will be " - "ignored" % (', '.join(diff)), level=WARNING) - - ceph_conf = {} - for key in conf: - if permitted and key not in permitted: - log("Ignoring key '%s'" % key, level=WARNING) - continue - - ceph_conf[key] = conf[key] - - return ceph_conf diff --git a/hooks/charmhelpers/contrib/storage/linux/loopback.py b/hooks/charmhelpers/contrib/storage/linux/loopback.py deleted file mode 100644 index 1d6ae6f..0000000 --- a/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import re -from subprocess import ( - check_call, - check_output, -) - -import six - - -################################################## -# loopback device helpers. -################################################## -def loopback_devices(): - ''' - Parse through 'losetup -a' output to determine currently mapped - loopback devices. Output is expected to look like: - - /dev/loop0: [0807]:961814 (/tmp/my.img) - - :returns: dict: a dict mapping {loopback_dev: backing_file} - ''' - loopbacks = {} - cmd = ['losetup', '-a'] - devs = [d.strip().split(' ') for d in - check_output(cmd).splitlines() if d != ''] - for dev, _, f in devs: - loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] - return loopbacks - - -def create_loopback(file_path): - ''' - Create a loopback device for a given backing file. - - :returns: str: Full path to new loopback device (eg, /dev/loop0) - ''' - file_path = os.path.abspath(file_path) - check_call(['losetup', '--find', file_path]) - for d, f in six.iteritems(loopback_devices()): - if f == file_path: - return d - - -def ensure_loopback_device(path, size): - ''' - Ensure a loopback device exists for a given backing file path and size. - If it a loopback device is not mapped to file, a new one will be created. - - TODO: Confirm size of found loopback device. - - :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) - ''' - for d, f in six.iteritems(loopback_devices()): - if f == path: - return d - - if not os.path.exists(path): - cmd = ['truncate', '--size', size, path] - check_call(cmd) - - return create_loopback(path) - - -def is_mapped_loopback_device(device): - """ - Checks if a given device name is an existing/mapped loopback device. - :param device: str: Full path to the device (eg, /dev/loop1). - :returns: str: Path to the backing file if is a loopback device - empty string otherwise - """ - return loopback_devices().get(device, "") diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py deleted file mode 100644 index c8bde69..0000000 --- a/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools -from subprocess import ( - CalledProcessError, - check_call, - check_output, - Popen, - PIPE, -) - - -################################################## -# LVM helpers. -################################################## -def deactivate_lvm_volume_group(block_device): - ''' - Deactivate any volume gruop associated with an LVM physical volume. - - :param block_device: str: Full path to LVM physical volume - ''' - vg = list_lvm_volume_group(block_device) - if vg: - cmd = ['vgchange', '-an', vg] - check_call(cmd) - - -def is_lvm_physical_volume(block_device): - ''' - Determine whether a block device is initialized as an LVM PV. - - :param block_device: str: Full path of block device to inspect. - - :returns: boolean: True if block device is a PV, False if not. - ''' - try: - check_output(['pvdisplay', block_device]) - return True - except CalledProcessError: - return False - - -def remove_lvm_physical_volume(block_device): - ''' - Remove LVM PV signatures from a given block device. - - :param block_device: str: Full path of block device to scrub. - ''' - p = Popen(['pvremove', '-ff', block_device], - stdin=PIPE) - p.communicate(input='y\n') - - -def list_lvm_volume_group(block_device): - ''' - List LVM volume group associated with a given block device. - - Assumes block device is a valid LVM PV. - - :param block_device: str: Full path of block device to inspect. - - :returns: str: Name of volume group associated with block device or None - ''' - vg = None - pvd = check_output(['pvdisplay', block_device]).splitlines() - for lvm in pvd: - lvm = lvm.decode('UTF-8') - if lvm.strip().startswith('VG Name'): - vg = ' '.join(lvm.strip().split()[2:]) - return vg - - -def create_lvm_physical_volume(block_device): - ''' - Initialize a block device as an LVM physical volume. - - :param block_device: str: Full path of block device to initialize. - - ''' - check_call(['pvcreate', block_device]) - - -def create_lvm_volume_group(volume_group, block_device): - ''' - Create an LVM volume group backed by a given block device. - - Assumes block device has already been initialized as an LVM PV. - - :param volume_group: str: Name of volume group to create. - :block_device: str: Full path of PV-initialized block device. - ''' - check_call(['vgcreate', volume_group, block_device]) - - -def list_logical_volumes(select_criteria=None, path_mode=False): - ''' - List logical volumes - - :param select_criteria: str: Limit list to those volumes matching this - criteria (see 'lvs -S help' for more details) - :param path_mode: bool: return logical volume name in 'vg/lv' format, this - format is required for some commands like lvextend - :returns: [str]: List of logical volumes - ''' - lv_diplay_attr = 'lv_name' - if path_mode: - # Parsing output logic relies on the column order - lv_diplay_attr = 'vg_name,' + lv_diplay_attr - cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings'] - if select_criteria: - cmd.extend(['--select', select_criteria]) - lvs = [] - for lv in check_output(cmd).decode('UTF-8').splitlines(): - if not lv: - continue - if path_mode: - lvs.append('/'.join(lv.strip().split())) - else: - lvs.append(lv.strip()) - return lvs - - -list_thin_logical_volume_pools = functools.partial( - list_logical_volumes, - select_criteria='lv_attr =~ ^t') - -list_thin_logical_volumes = functools.partial( - list_logical_volumes, - select_criteria='lv_attr =~ ^V') - - -def extend_logical_volume_by_device(lv_name, block_device): - ''' - Extends the size of logical volume lv_name by the amount of free space on - physical volume block_device. - - :param lv_name: str: name of logical volume to be extended (vg/lv format) - :param block_device: str: name of block_device to be allocated to lv_name - ''' - cmd = ['lvextend', lv_name, block_device] - check_call(cmd) - - -def create_logical_volume(lv_name, volume_group, size=None): - ''' - Create a new logical volume in an existing volume group - - :param lv_name: str: name of logical volume to be created. - :param volume_group: str: Name of volume group to use for the new volume. - :param size: str: Size of logical volume to create (100% if not supplied) - :raises subprocess.CalledProcessError: in the event that the lvcreate fails. - ''' - if size: - check_call([ - 'lvcreate', - '--yes', - '-L', - '{}'.format(size), - '-n', lv_name, volume_group - ]) - # create the lv with all the space available, this is needed because the - # system call is different for LVM - else: - check_call([ - 'lvcreate', - '--yes', - '-l', - '100%FREE', - '-n', lv_name, volume_group - ]) diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py deleted file mode 100644 index 6f846b0..0000000 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import re -from stat import S_ISBLK - -from subprocess import ( - check_call, - check_output, - call -) - - -def is_block_device(path): - ''' - Confirm device at path is a valid block device node. - - :returns: boolean: True if path is a block device, False if not. - ''' - if not os.path.exists(path): - return False - return S_ISBLK(os.stat(path).st_mode) - - -def zap_disk(block_device): - ''' - Clear a block device of partition table. Relies on sgdisk, which is - installed as pat of the 'gdisk' package in Ubuntu. - - :param block_device: str: Full path of block device to clean. - ''' - # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b - # sometimes sgdisk exits non-zero; this is OK, dd will clean up - call(['sgdisk', '--zap-all', '--', block_device]) - call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) - dev_end = check_output(['blockdev', '--getsz', - block_device]).decode('UTF-8') - gpt_end = int(dev_end.split()[0]) - 100 - check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), - 'bs=1M', 'count=1']) - check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), - 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) - - -def is_device_mounted(device): - '''Given a device path, return True if that device is mounted, and False - if it isn't. - - :param device: str: Full path of the device to check. - :returns: boolean: True if the path represents a mounted device, False if - it doesn't. - ''' - try: - out = check_output(['lsblk', '-P', device]).decode('UTF-8') - except Exception: - return False - return bool(re.search(r'MOUNTPOINT=".+"', out)) - - -def mkfs_xfs(device, force=False): - """Format device with XFS filesystem. - - By default this should fail if the device already has a filesystem on it. - :param device: Full path to device to format - :ptype device: tr - :param force: Force operation - :ptype: force: boolean""" - cmd = ['mkfs.xfs'] - if force: - cmd.append("-f") - - cmd += ['-i', 'size=1024', device] - check_call(cmd) diff --git a/hooks/charmhelpers/core/__init__.py b/hooks/charmhelpers/core/__init__.py deleted file mode 100644 index d7567b8..0000000 --- a/hooks/charmhelpers/core/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/core/decorators.py b/hooks/charmhelpers/core/decorators.py deleted file mode 100644 index 6ad41ee..0000000 --- a/hooks/charmhelpers/core/decorators.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2014 Canonical Ltd. -# -# Authors: -# Edward Hope-Morley -# - -import time - -from charmhelpers.core.hookenv import ( - log, - INFO, -) - - -def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): - """If the decorated function raises exception exc_type, allow num_retries - retry attempts before raise the exception. - """ - def _retry_on_exception_inner_1(f): - def _retry_on_exception_inner_2(*args, **kwargs): - retries = num_retries - multiplier = 1 - while True: - try: - return f(*args, **kwargs) - except exc_type: - if not retries: - raise - - delay = base_delay * multiplier - multiplier += 1 - log("Retrying '%s' %d more times (delay=%s)" % - (f.__name__, retries, delay), level=INFO) - retries -= 1 - if delay: - time.sleep(delay) - - return _retry_on_exception_inner_2 - - return _retry_on_exception_inner_1 diff --git a/hooks/charmhelpers/core/files.py b/hooks/charmhelpers/core/files.py deleted file mode 100644 index fdd82b7..0000000 --- a/hooks/charmhelpers/core/files.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -__author__ = 'Jorge Niedbalski ' - -import os -import subprocess - - -def sed(filename, before, after, flags='g'): - """ - Search and replaces the given pattern on filename. - - :param filename: relative or absolute file path. - :param before: expression to be replaced (see 'man sed') - :param after: expression to replace with (see 'man sed') - :param flags: sed-compatible regex flags in example, to make - the search and replace case insensitive, specify ``flags="i"``. - The ``g`` flag is always specified regardless, so you do not - need to remember to include it when overriding this parameter. - :returns: If the sed command exit code was zero then return, - otherwise raise CalledProcessError. - """ - expression = r's/{0}/{1}/{2}'.format(before, - after, flags) - - return subprocess.check_call(["sed", "-i", "-r", "-e", - expression, - os.path.expanduser(filename)]) diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py deleted file mode 100644 index d9fa915..0000000 --- a/hooks/charmhelpers/core/fstab.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import os - -__author__ = 'Jorge Niedbalski R. ' - - -class Fstab(io.FileIO): - """This class extends file in order to implement a file reader/writer - for file `/etc/fstab` - """ - - class Entry(object): - """Entry class represents a non-comment line on the `/etc/fstab` file - """ - def __init__(self, device, mountpoint, filesystem, - options, d=0, p=0): - self.device = device - self.mountpoint = mountpoint - self.filesystem = filesystem - - if not options: - options = "defaults" - - self.options = options - self.d = int(d) - self.p = int(p) - - def __eq__(self, o): - return str(self) == str(o) - - def __str__(self): - return "{} {} {} {} {} {}".format(self.device, - self.mountpoint, - self.filesystem, - self.options, - self.d, - self.p) - - DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') - - def __init__(self, path=None): - if path: - self._path = path - else: - self._path = self.DEFAULT_PATH - super(Fstab, self).__init__(self._path, 'rb+') - - def _hydrate_entry(self, line): - # NOTE: use split with no arguments to split on any - # whitespace including tabs - return Fstab.Entry(*filter( - lambda x: x not in ('', None), - line.strip("\n").split())) - - @property - def entries(self): - self.seek(0) - for line in self.readlines(): - line = line.decode('us-ascii') - try: - if line.strip() and not line.strip().startswith("#"): - yield self._hydrate_entry(line) - except ValueError: - pass - - def get_entry_by_attr(self, attr, value): - for entry in self.entries: - e_attr = getattr(entry, attr) - if e_attr == value: - return entry - return None - - def add_entry(self, entry): - if self.get_entry_by_attr('device', entry.device): - return False - - self.write((str(entry) + '\n').encode('us-ascii')) - self.truncate() - return entry - - def remove_entry(self, entry): - self.seek(0) - - lines = [l.decode('us-ascii') for l in self.readlines()] - - found = False - for index, line in enumerate(lines): - if line.strip() and not line.strip().startswith("#"): - if self._hydrate_entry(line) == entry: - found = True - break - - if not found: - return False - - lines.remove(line) - - self.seek(0) - self.write(''.join(lines).encode('us-ascii')) - self.truncate() - return True - - @classmethod - def remove_by_mountpoint(cls, mountpoint, path=None): - fstab = cls(path=path) - entry = fstab.get_entry_by_attr('mountpoint', mountpoint) - if entry: - return fstab.remove_entry(entry) - return False - - @classmethod - def add(cls, device, mountpoint, filesystem, options=None, path=None): - return cls(path=path).add_entry(Fstab.Entry(device, - mountpoint, filesystem, - options=options)) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py deleted file mode 100644 index ed7af39..0000000 --- a/hooks/charmhelpers/core/hookenv.py +++ /dev/null @@ -1,1299 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"Interactions with the Juju environment" -# Copyright 2013 Canonical Ltd. -# -# Authors: -# Charm Helpers Developers - -from __future__ import print_function -import copy -from distutils.version import LooseVersion -from functools import wraps -from collections import namedtuple -import glob -import os -import json -import yaml -import re -import subprocess -import sys -import errno -import tempfile -from subprocess import CalledProcessError - -import six -if not six.PY3: - from UserDict import UserDict -else: - from collections import UserDict - - -CRITICAL = "CRITICAL" -ERROR = "ERROR" -WARNING = "WARNING" -INFO = "INFO" -DEBUG = "DEBUG" -TRACE = "TRACE" -MARKER = object() - -cache = {} - - -def cached(func): - """Cache return values for multiple executions of func + args - - For example:: - - @cached - def unit_get(attribute): - pass - - unit_get('test') - - will cache the result of unit_get + 'test' for future calls. - """ - @wraps(func) - def wrapper(*args, **kwargs): - global cache - key = json.dumps((func, args, kwargs), sort_keys=True, default=str) - try: - return cache[key] - except KeyError: - pass # Drop out of the exception handler scope. - res = func(*args, **kwargs) - cache[key] = res - return res - wrapper._wrapped = func - return wrapper - - -def flush(key): - """Flushes any entries from function cache where the - key is found in the function+args """ - flush_list = [] - for item in cache: - if key in item: - flush_list.append(item) - for item in flush_list: - del cache[item] - - -def log(message, level=None): - """Write a message to the juju log""" - command = ['juju-log'] - if level: - command += ['-l', level] - if not isinstance(message, six.string_types): - message = repr(message) - command += [message] - # Missing juju-log should not cause failures in unit tests - # Send log output to stderr - try: - subprocess.call(command) - except OSError as e: - if e.errno == errno.ENOENT: - if level: - message = "{}: {}".format(level, message) - message = "juju-log: {}".format(message) - print(message, file=sys.stderr) - else: - raise - - -class Serializable(UserDict): - """Wrapper, an object that can be serialized to yaml or json""" - - def __init__(self, obj): - # wrap the object - UserDict.__init__(self) - self.data = obj - - def __getattr__(self, attr): - # See if this object has attribute. - if attr in ("json", "yaml", "data"): - return self.__dict__[attr] - # Check for attribute in wrapped object. - got = getattr(self.data, attr, MARKER) - if got is not MARKER: - return got - # Proxy to the wrapped object via dict interface. - try: - return self.data[attr] - except KeyError: - raise AttributeError(attr) - - def __getstate__(self): - # Pickle as a standard dictionary. - return self.data - - def __setstate__(self, state): - # Unpickle into our wrapper. - self.data = state - - def json(self): - """Serialize the object to json""" - return json.dumps(self.data) - - def yaml(self): - """Serialize the object to yaml""" - return yaml.dump(self.data) - - -def execution_environment(): - """A convenient bundling of the current execution context""" - context = {} - context['conf'] = config() - if relation_id(): - context['reltype'] = relation_type() - context['relid'] = relation_id() - context['rel'] = relation_get() - context['unit'] = local_unit() - context['rels'] = relations() - context['env'] = os.environ - return context - - -def in_relation_hook(): - """Determine whether we're running in a relation hook""" - return 'JUJU_RELATION' in os.environ - - -def relation_type(): - """The scope for the current relation hook""" - return os.environ.get('JUJU_RELATION', None) - - -@cached -def relation_id(relation_name=None, service_or_unit=None): - """The relation ID for the current or a specified relation""" - if not relation_name and not service_or_unit: - return os.environ.get('JUJU_RELATION_ID', None) - elif relation_name and service_or_unit: - service_name = service_or_unit.split('/')[0] - for relid in relation_ids(relation_name): - remote_service = remote_service_name(relid) - if remote_service == service_name: - return relid - else: - raise ValueError('Must specify neither or both of relation_name and service_or_unit') - - -def local_unit(): - """Local unit ID""" - return os.environ['JUJU_UNIT_NAME'] - - -def remote_unit(): - """The remote unit for the current relation hook""" - return os.environ.get('JUJU_REMOTE_UNIT', None) - - -def service_name(): - """The name service group this unit belongs to""" - return local_unit().split('/')[0] - - -def principal_unit(): - """Returns the principal unit of this unit, otherwise None""" - # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT - principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) - # If it's empty, then this unit is the principal - if principal_unit == '': - return os.environ['JUJU_UNIT_NAME'] - elif principal_unit is not None: - return principal_unit - # For Juju 2.1 and below, let's try work out the principle unit by - # the various charms' metadata.yaml. - for reltype in relation_types(): - for rid in relation_ids(reltype): - for unit in related_units(rid): - md = _metadata_unit(unit) - if not md: - continue - subordinate = md.pop('subordinate', None) - if not subordinate: - return unit - return None - - -@cached -def remote_service_name(relid=None): - """The remote service name for a given relation-id (or the current relation)""" - if relid is None: - unit = remote_unit() - else: - units = related_units(relid) - unit = units[0] if units else None - return unit.split('/')[0] if unit else None - - -def hook_name(): - """The name of the currently executing hook""" - return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) - - -class Config(dict): - """A dictionary representation of the charm's config.yaml, with some - extra features: - - - See which values in the dictionary have changed since the previous hook. - - For values that have changed, see what the previous value was. - - Store arbitrary data for use in a later hook. - - NOTE: Do not instantiate this object directly - instead call - ``hookenv.config()``, which will return an instance of :class:`Config`. - - Example usage:: - - >>> # inside a hook - >>> from charmhelpers.core import hookenv - >>> config = hookenv.config() - >>> config['foo'] - 'bar' - >>> # store a new key/value for later use - >>> config['mykey'] = 'myval' - - - >>> # user runs `juju set mycharm foo=baz` - >>> # now we're inside subsequent config-changed hook - >>> config = hookenv.config() - >>> config['foo'] - 'baz' - >>> # test to see if this val has changed since last hook - >>> config.changed('foo') - True - >>> # what was the previous value? - >>> config.previous('foo') - 'bar' - >>> # keys/values that we add are preserved across hooks - >>> config['mykey'] - 'myval' - - """ - CONFIG_FILE_NAME = '.juju-persistent-config' - - def __init__(self, *args, **kw): - super(Config, self).__init__(*args, **kw) - self.implicit_save = True - self._prev_dict = None - self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path) and os.stat(self.path).st_size: - self.load_previous() - atexit(self._implicit_save) - - def load_previous(self, path=None): - """Load previous copy of config from disk. - - In normal usage you don't need to call this method directly - it - is called automatically at object initialization. - - :param path: - - File path from which to load the previous config. If `None`, - config is loaded from the default location. If `path` is - specified, subsequent `save()` calls will write to the same - path. - - """ - self.path = path or self.path - with open(self.path) as f: - try: - self._prev_dict = json.load(f) - except ValueError as e: - log('Unable to parse previous config data - {}'.format(str(e)), - level=ERROR) - for k, v in copy.deepcopy(self._prev_dict).items(): - if k not in self: - self[k] = v - - def changed(self, key): - """Return True if the current value for this key is different from - the previous value. - - """ - if self._prev_dict is None: - return True - return self.previous(key) != self.get(key) - - def previous(self, key): - """Return previous value for this key, or None if there - is no previous value. - - """ - if self._prev_dict: - return self._prev_dict.get(key) - return None - - def save(self): - """Save this config to disk. - - If the charm is using the :mod:`Services Framework ` - or :meth:'@hook ' decorator, this - is called automatically at the end of successful hook execution. - Otherwise, it should be called directly by user code. - - To disable automatic saves, set ``implicit_save=False`` on this - instance. - - """ - with open(self.path, 'w') as f: - os.fchmod(f.fileno(), 0o600) - json.dump(self, f) - - def _implicit_save(self): - if self.implicit_save: - self.save() - - -_cache_config = None - - -def config(scope=None): - """ - Get the juju charm configuration (scope==None) or individual key, - (scope=str). The returned value is a Python data structure loaded as - JSON from the Juju config command. - - :param scope: If set, return the value for the specified key. - :type scope: Optional[str] - :returns: Either the whole config as a Config, or a key from it. - :rtype: Any - """ - global _cache_config - config_cmd_line = ['config-get', '--all', '--format=json'] - try: - # JSON Decode Exception for Python3.5+ - exc_json = json.decoder.JSONDecodeError - except AttributeError: - # JSON Decode Exception for Python2.7 through Python3.4 - exc_json = ValueError - try: - if _cache_config is None: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) - _cache_config = Config(config_data) - if scope is not None: - return _cache_config.get(scope) - return _cache_config - except (exc_json, UnicodeDecodeError) as e: - log('Unable to parse output from config-get: config_cmd_line="{}" ' - 'message="{}"' - .format(config_cmd_line, str(e)), level=ERROR) - return None - - -@cached -def relation_get(attribute=None, unit=None, rid=None): - """Get relation information""" - _args = ['relation-get', '--format=json'] - if rid: - _args.append('-r') - _args.append(rid) - _args.append(attribute or '-') - if unit: - _args.append(unit) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except CalledProcessError as e: - if e.returncode == 2: - return None - raise - - -def relation_set(relation_id=None, relation_settings=None, **kwargs): - """Set relation information for the current unit""" - relation_settings = relation_settings if relation_settings else {} - relation_cmd_line = ['relation-set'] - accepts_file = "--file" in subprocess.check_output( - relation_cmd_line + ["--help"], universal_newlines=True) - if relation_id is not None: - relation_cmd_line.extend(('-r', relation_id)) - settings = relation_settings.copy() - settings.update(kwargs) - for key, value in settings.items(): - # Force value to be a string: it always should, but some call - # sites pass in things like dicts or numbers. - if value is not None: - settings[key] = "{}".format(value) - if accepts_file: - # --file was introduced in Juju 1.23.2. Use it by default if - # available, since otherwise we'll break if the relation data is - # too big. Ideally we should tell relation-set to read the data from - # stdin, but that feature is broken in 1.23.2: Bug #1454678. - with tempfile.NamedTemporaryFile(delete=False) as settings_file: - settings_file.write(yaml.safe_dump(settings).encode("utf-8")) - subprocess.check_call( - relation_cmd_line + ["--file", settings_file.name]) - os.remove(settings_file.name) - else: - for key, value in settings.items(): - if value is None: - relation_cmd_line.append('{}='.format(key)) - else: - relation_cmd_line.append('{}={}'.format(key, value)) - subprocess.check_call(relation_cmd_line) - # Flush cache of any relation-gets for local unit - flush(local_unit()) - - -def relation_clear(r_id=None): - ''' Clears any relation data already set on relation r_id ''' - settings = relation_get(rid=r_id, - unit=local_unit()) - for setting in settings: - if setting not in ['public-address', 'private-address']: - settings[setting] = None - relation_set(relation_id=r_id, - **settings) - - -@cached -def relation_ids(reltype=None): - """A list of relation_ids""" - reltype = reltype or relation_type() - relid_cmd_line = ['relation-ids', '--format=json'] - if reltype is not None: - relid_cmd_line.append(reltype) - return json.loads( - subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] - return [] - - -@cached -def related_units(relid=None): - """A list of related units""" - relid = relid or relation_id() - units_cmd_line = ['relation-list', '--format=json'] - if relid is not None: - units_cmd_line.extend(('-r', relid)) - return json.loads( - subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] - - -@cached -def relation_for_unit(unit=None, rid=None): - """Get the json represenation of a unit's relation""" - unit = unit or remote_unit() - relation = relation_get(unit=unit, rid=rid) - for key in relation: - if key.endswith('-list'): - relation[key] = relation[key].split() - relation['__unit__'] = unit - return relation - - -@cached -def relations_for_id(relid=None): - """Get relations of a specific relation ID""" - relation_data = [] - relid = relid or relation_ids() - for unit in related_units(relid): - unit_data = relation_for_unit(unit, relid) - unit_data['__relid__'] = relid - relation_data.append(unit_data) - return relation_data - - -@cached -def relations_of_type(reltype=None): - """Get relations of a specific type""" - relation_data = [] - reltype = reltype or relation_type() - for relid in relation_ids(reltype): - for relation in relations_for_id(relid): - relation['__relid__'] = relid - relation_data.append(relation) - return relation_data - - -@cached -def metadata(): - """Get the current charm metadata.yaml contents as a python object""" - with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: - return yaml.safe_load(md) - - -def _metadata_unit(unit): - """Given the name of a unit (e.g. apache2/0), get the unit charm's - metadata.yaml. Very similar to metadata() but allows us to inspect - other units. Unit needs to be co-located, such as a subordinate or - principal/primary. - - :returns: metadata.yaml as a python object. - - """ - basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) - unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') - if not os.path.exists(joineddir): - return None - with open(joineddir) as md: - return yaml.safe_load(md) - - -@cached -def relation_types(): - """Get a list of relation types supported by this charm""" - rel_types = [] - md = metadata() - for key in ('provides', 'requires', 'peers'): - section = md.get(key) - if section: - rel_types.extend(section.keys()) - return rel_types - - -@cached -def peer_relation_id(): - '''Get the peers relation id if a peers relation has been joined, else None.''' - md = metadata() - section = md.get('peers') - if section: - for key in section: - relids = relation_ids(key) - if relids: - return relids[0] - return None - - -@cached -def relation_to_interface(relation_name): - """ - Given the name of a relation, return the interface that relation uses. - - :returns: The interface name, or ``None``. - """ - return relation_to_role_and_interface(relation_name)[1] - - -@cached -def relation_to_role_and_interface(relation_name): - """ - Given the name of a relation, return the role and the name of the interface - that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). - - :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. - """ - _metadata = metadata() - for role in ('provides', 'requires', 'peers'): - interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') - if interface: - return role, interface - return None, None - - -@cached -def role_and_interface_to_relations(role, interface_name): - """ - Given a role and interface name, return a list of relation names for the - current charm that use that interface under that role (where role is one - of ``provides``, ``requires``, or ``peers``). - - :returns: A list of relation names. - """ - _metadata = metadata() - results = [] - for relation_name, relation in _metadata.get(role, {}).items(): - if relation['interface'] == interface_name: - results.append(relation_name) - return results - - -@cached -def interface_to_relations(interface_name): - """ - Given an interface, return a list of relation names for the current - charm that use that interface. - - :returns: A list of relation names. - """ - results = [] - for role in ('provides', 'requires', 'peers'): - results.extend(role_and_interface_to_relations(role, interface_name)) - return results - - -@cached -def charm_name(): - """Get the name of the current charm as is specified on metadata.yaml""" - return metadata().get('name') - - -@cached -def relations(): - """Get a nested dictionary of relation data for all related units""" - rels = {} - for reltype in relation_types(): - relids = {} - for relid in relation_ids(reltype): - units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} - for unit in related_units(relid): - reldata = relation_get(unit=unit, rid=relid) - units[unit] = reldata - relids[relid] = units - rels[reltype] = relids - return rels - - -@cached -def is_relation_made(relation, keys='private-address'): - ''' - Determine whether a relation is established by checking for - presence of key(s). If a list of keys is provided, they - must all be present for the relation to be identified as made - ''' - if isinstance(keys, str): - keys = [keys] - for r_id in relation_ids(relation): - for unit in related_units(r_id): - context = {} - for k in keys: - context[k] = relation_get(k, rid=r_id, - unit=unit) - if None not in context.values(): - return True - return False - - -def _port_op(op_name, port, protocol="TCP"): - """Open or close a service network port""" - _args = [op_name] - icmp = protocol.upper() == "ICMP" - if icmp: - _args.append(protocol) - else: - _args.append('{}/{}'.format(port, protocol)) - try: - subprocess.check_call(_args) - except subprocess.CalledProcessError: - # Older Juju pre 2.3 doesn't support ICMP - # so treat it as a no-op if it fails. - if not icmp: - raise - - -def open_port(port, protocol="TCP"): - """Open a service network port""" - _port_op('open-port', port, protocol) - - -def close_port(port, protocol="TCP"): - """Close a service network port""" - _port_op('close-port', port, protocol) - - -def open_ports(start, end, protocol="TCP"): - """Opens a range of service network ports""" - _args = ['open-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def close_ports(start, end, protocol="TCP"): - """Close a range of service network ports""" - _args = ['close-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def opened_ports(): - """Get the opened ports - - *Note that this will only show ports opened in a previous hook* - - :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` - """ - _args = ['opened-ports', '--format=json'] - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - - -@cached -def unit_get(attribute): - """Get the unit ID for the remote unit""" - _args = ['unit-get', '--format=json', attribute] - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -def unit_public_ip(): - """Get this unit's public IP address""" - return unit_get('public-address') - - -def unit_private_ip(): - """Get this unit's private IP address""" - return unit_get('private-address') - - -@cached -def storage_get(attribute=None, storage_id=None): - """Get storage attributes""" - _args = ['storage-get', '--format=json'] - if storage_id: - _args.extend(('-s', storage_id)) - if attribute: - _args.append(attribute) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -@cached -def storage_list(storage_name=None): - """List the storage IDs for the unit""" - _args = ['storage-list', '--format=json'] - if storage_name: - _args.append(storage_name) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except OSError as e: - import errno - if e.errno == errno.ENOENT: - # storage-list does not exist - return [] - raise - - -class UnregisteredHookError(Exception): - """Raised when an undefined hook is called""" - pass - - -class Hooks(object): - """A convenient handler for hook functions. - - Example:: - - hooks = Hooks() - - # register a hook, taking its name from the function name - @hooks.hook() - def install(): - pass # your code here - - # register a hook, providing a custom hook name - @hooks.hook("config-changed") - def config_changed(): - pass # your code here - - if __name__ == "__main__": - # execute a hook based on the name the program is called by - hooks.execute(sys.argv) - """ - - def __init__(self, config_save=None): - super(Hooks, self).__init__() - self._hooks = {} - - # For unknown reasons, we allow the Hooks constructor to override - # config().implicit_save. - if config_save is not None: - config().implicit_save = config_save - - def register(self, name, function): - """Register a hook""" - self._hooks[name] = function - - def execute(self, args): - """Execute a registered hook based on args[0]""" - _run_atstart() - hook_name = os.path.basename(args[0]) - if hook_name in self._hooks: - try: - self._hooks[hook_name]() - except SystemExit as x: - if x.code is None or x.code == 0: - _run_atexit() - raise - _run_atexit() - else: - raise UnregisteredHookError(hook_name) - - def hook(self, *hook_names): - """Decorator, registering them as hooks""" - def wrapper(decorated): - for hook_name in hook_names: - self.register(hook_name, decorated) - else: - self.register(decorated.__name__, decorated) - if '_' in decorated.__name__: - self.register( - decorated.__name__.replace('_', '-'), decorated) - return decorated - return wrapper - - -class NoNetworkBinding(Exception): - pass - - -def charm_dir(): - """Return the root directory of the current charm""" - d = os.environ.get('JUJU_CHARM_DIR') - if d is not None: - return d - return os.environ.get('CHARM_DIR') - - -@cached -def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" - cmd = ['action-get'] - if key is not None: - cmd.append(key) - cmd.append('--format=json') - action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) - return action_data - - -def action_set(values): - """Sets the values to be returned after the action finishes""" - cmd = ['action-set'] - for k, v in list(values.items()): - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -def action_fail(message): - """Sets the action status to failed and sets the error message. - - The results set by action_set are preserved.""" - subprocess.check_call(['action-fail', message]) - - -def action_name(): - """Get the name of the currently executing action.""" - return os.environ.get('JUJU_ACTION_NAME') - - -def action_uuid(): - """Get the UUID of the currently executing action.""" - return os.environ.get('JUJU_ACTION_UUID') - - -def action_tag(): - """Get the tag for the currently executing action.""" - return os.environ.get('JUJU_ACTION_TAG') - - -def status_set(workload_state, message): - """Set the workload state with a message - - Use status-set to set the workload state with a message which is visible - to the user via juju status. If the status-set command is not found then - assume this is juju < 1.23 and juju-log the message unstead. - - workload_state -- valid juju workload state. - message -- status update message - """ - valid_states = ['maintenance', 'blocked', 'waiting', 'active'] - if workload_state not in valid_states: - raise ValueError( - '{!r} is not a valid workload state'.format(workload_state) - ) - cmd = ['status-set', workload_state, message] - try: - ret = subprocess.call(cmd) - if ret == 0: - return - except OSError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'status-set failed: {} {}'.format(workload_state, - message) - log(log_message, level='INFO') - - -def status_get(): - """Retrieve the previously set juju workload state and message - - If the status-get command is not found then assume this is juju < 1.23 and - return 'unknown', "" - - """ - cmd = ['status-get', "--format=json", "--include-data"] - try: - raw_status = subprocess.check_output(cmd) - except OSError as e: - if e.errno == errno.ENOENT: - return ('unknown', "") - else: - raise - else: - status = json.loads(raw_status.decode("UTF-8")) - return (status["status"], status["message"]) - - -def translate_exc(from_exc, to_exc): - def inner_translate_exc1(f): - @wraps(f) - def inner_translate_exc2(*args, **kwargs): - try: - return f(*args, **kwargs) - except from_exc: - raise to_exc - - return inner_translate_exc2 - - return inner_translate_exc1 - - -def application_version_set(version): - """Charm authors may trigger this command from any hook to output what - version of the application is running. This could be a package version, - for instance postgres version 9.5. It could also be a build number or - version control revision identifier, for instance git sha 6fb7ba68. """ - - cmd = ['application-version-set'] - cmd.append(version) - try: - subprocess.check_call(cmd) - except OSError: - log("Application Version: {}".format(version)) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def goal_state(): - """Juju goal state values""" - cmd = ['goal-state', '--format=json'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def is_leader(): - """Does the current unit hold the juju leadership - - Uses juju to determine whether the current unit is the leader of its peers - """ - cmd = ['is-leader', '--format=json'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_get(attribute=None): - """Juju leader get value(s)""" - cmd = ['leader-get', '--format=json'] + [attribute or '-'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_set(settings=None, **kwargs): - """Juju leader set value(s)""" - # Don't log secrets. - # log("Juju leader-set '%s'" % (settings), level=DEBUG) - cmd = ['leader-set'] - settings = settings or {} - settings.update(kwargs) - for k, v in settings.items(): - if v is None: - cmd.append('{}='.format(k)) - else: - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_register(ptype, klass, pid): - """ is used while a hook is running to let Juju know that a - payload has been started.""" - cmd = ['payload-register'] - for x in [ptype, klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_unregister(klass, pid): - """ is used while a hook is running to let Juju know - that a payload has been manually stopped. The and provided - must match a payload that has been previously registered with juju using - payload-register.""" - cmd = ['payload-unregister'] - for x in [klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_status_set(klass, pid, status): - """is used to update the current status of a registered payload. - The and provided must match a payload that has been previously - registered with juju using payload-register. The must be one of the - follow: starting, started, stopping, stopped""" - cmd = ['payload-status-set'] - for x in [klass, pid, status]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def resource_get(name): - """used to fetch the resource path of the given name. - - must match a name of defined resource in metadata.yaml - - returns either a path or False if resource not available - """ - if not name: - return False - - cmd = ['resource-get', name] - try: - return subprocess.check_output(cmd).decode('UTF-8') - except subprocess.CalledProcessError: - return False - - -@cached -def juju_version(): - """Full version string (eg. '1.23.3.1-trusty-amd64')""" - # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 - jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] - return subprocess.check_output([jujud, 'version'], - universal_newlines=True).strip() - - -def has_juju_version(minimum_version): - """Return True if the Juju version is at least the provided version""" - return LooseVersion(juju_version()) >= LooseVersion(minimum_version) - - -_atexit = [] -_atstart = [] - - -def atstart(callback, *args, **kwargs): - '''Schedule a callback to run before the main hook. - - Callbacks are run in the order they were added. - - This is useful for modules and classes to perform initialization - and inject behavior. In particular: - - - Run common code before all of your hooks, such as logging - the hook name or interesting relation data. - - Defer object or module initialization that requires a hook - context until we know there actually is a hook context, - making testing easier. - - Rather than requiring charm authors to include boilerplate to - invoke your helper's behavior, have it run automatically if - your object is instantiated or module imported. - - This is not at all useful after your hook framework as been launched. - ''' - global _atstart - _atstart.append((callback, args, kwargs)) - - -def atexit(callback, *args, **kwargs): - '''Schedule a callback to run on successful hook completion. - - Callbacks are run in the reverse order that they were added.''' - _atexit.append((callback, args, kwargs)) - - -def _run_atstart(): - '''Hook frameworks must invoke this before running the main hook body.''' - global _atstart - for callback, args, kwargs in _atstart: - callback(*args, **kwargs) - del _atstart[:] - - -def _run_atexit(): - '''Hook frameworks must invoke this after the main hook body has - successfully completed. Do not invoke it if the hook fails.''' - global _atexit - for callback, args, kwargs in reversed(_atexit): - callback(*args, **kwargs) - del _atexit[:] - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def network_get_primary_address(binding): - ''' - Deprecated since Juju 2.3; use network_get() - - Retrieve the primary network address for a named binding - - :param binding: string. The name of a relation of extra-binding - :return: string. The primary IP address for the named binding - :raise: NotImplementedError if run on Juju < 2.0 - ''' - cmd = ['network-get', '--primary-address', binding] - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - if 'no network config found for binding' in e.output.decode('UTF-8'): - raise NoNetworkBinding("No network binding for {}" - .format(binding)) - else: - raise - return response - - -def network_get(endpoint, relation_id=None): - """ - Retrieve the network details for a relation endpoint - - :param endpoint: string. The name of a relation endpoint - :param relation_id: int. The ID of the relation for the current context. - :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if request not supported by the Juju version. - """ - if not has_juju_version('2.2'): - raise NotImplementedError(juju_version()) # earlier versions require --primary-address - if relation_id and not has_juju_version('2.3'): - raise NotImplementedError # 2.3 added the -r option - - cmd = ['network-get', endpoint, '--format', 'yaml'] - if relation_id: - cmd.append('-r') - cmd.append(relation_id) - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - return yaml.safe_load(response) - - -def add_metric(*args, **kwargs): - """Add metric values. Values may be expressed with keyword arguments. For - metric names containing dashes, these may be expressed as one or more - 'key=value' positional arguments. May only be called from the collect-metrics - hook.""" - _args = ['add-metric'] - _kvpairs = [] - _kvpairs.extend(args) - _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) - _args.extend(sorted(_kvpairs)) - try: - subprocess.check_call(_args) - return - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) - log(log_message, level='INFO') - - -def meter_status(): - """Get the meter status, if running in the meter-status-changed hook.""" - return os.environ.get('JUJU_METER_STATUS') - - -def meter_info(): - """Get the meter status information, if running in the meter-status-changed - hook.""" - return os.environ.get('JUJU_METER_INFO') - - -def iter_units_for_relation_name(relation_name): - """Iterate through all units in a relation - - Generator that iterates through all the units in a relation and yields - a named tuple with rid and unit field names. - - Usage: - data = [(u.rid, u.unit) - for u in iter_units_for_relation_name(relation_name)] - - :param relation_name: string relation name - :yield: Named Tuple with rid and unit field names - """ - RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') - for rid in relation_ids(relation_name): - for unit in related_units(rid): - yield RelatedUnit(rid, unit) - - -def ingress_address(rid=None, unit=None): - """ - Retrieve the ingress-address from a relation when available. - Otherwise, return the private-address. - - When used on the consuming side of the relation (unit is a remote - unit), the ingress-address is the IP address that this unit needs - to use to reach the provided service on the remote unit. - - When used on the providing side of the relation (unit == local_unit()), - the ingress-address is the IP address that is advertised to remote - units on this relation. Remote units need to use this address to - reach the local provided service on this unit. - - Note that charms may document some other method to use in - preference to the ingress_address(), such as an address provided - on a different relation attribute or a service discovery mechanism. - This allows charms to redirect inbound connections to their peers - or different applications such as load balancers. - - Usage: - addresses = [ingress_address(rid=u.rid, unit=u.unit) - for u in iter_units_for_relation_name(relation_name)] - - :param rid: string relation id - :param unit: string unit name - :side effect: calls relation_get - :return: string IP address - """ - settings = relation_get(rid=rid, unit=unit) - return (settings.get('ingress-address') or - settings.get('private-address')) - - -def egress_subnets(rid=None, unit=None): - """ - Retrieve the egress-subnets from a relation. - - This function is to be used on the providing side of the - relation, and provides the ranges of addresses that client - connections may come from. The result is uninteresting on - the consuming side of a relation (unit == local_unit()). - - Returns a stable list of subnets in CIDR format. - eg. ['192.168.1.0/24', '2001::F00F/128'] - - If egress-subnets is not available, falls back to using the published - ingress-address, or finally private-address. - - :param rid: string relation id - :param unit: string unit name - :side effect: calls relation_get - :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] - """ - def _to_range(addr): - if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: - addr += '/32' - elif ':' in addr and '/' not in addr: # IPv6 - addr += '/128' - return addr - - settings = relation_get(rid=rid, unit=unit) - if 'egress-subnets' in settings: - return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] - if 'ingress-address' in settings: - return [_to_range(settings['ingress-address'])] - if 'private-address' in settings: - return [_to_range(settings['private-address'])] - return [] # Should never happen diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py deleted file mode 100644 index e9fd38a..0000000 --- a/hooks/charmhelpers/core/host.py +++ /dev/null @@ -1,1042 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tools for working with the host system""" -# Copyright 2012 Canonical Ltd. -# -# Authors: -# Nick Moffitt -# Matthew Wedgwood - -import os -import re -import pwd -import glob -import grp -import random -import string -import subprocess -import hashlib -import functools -import itertools -import six - -from contextlib import contextmanager -from collections import OrderedDict -from .hookenv import log, DEBUG, local_unit -from .fstab import Fstab -from charmhelpers.osplatform import get_platform - -__platform__ = get_platform() -if __platform__ == "ubuntu": - from charmhelpers.core.host_factory.ubuntu import ( - service_available, - add_new_group, - lsb_release, - cmp_pkgrevno, - CompareHostReleases, - ) # flake8: noqa -- ignore F401 for this import -elif __platform__ == "centos": - from charmhelpers.core.host_factory.centos import ( - service_available, - add_new_group, - lsb_release, - cmp_pkgrevno, - CompareHostReleases, - ) # flake8: noqa -- ignore F401 for this import - -UPDATEDB_PATH = '/etc/updatedb.conf' - -def service_start(service_name, **kwargs): - """Start a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example stops the ceph-osd service for instance id=4: - - service_stop('ceph-osd', id=4) - - :param service_name: the name of the service to stop - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - return service('start', service_name, **kwargs) - - -def service_stop(service_name, **kwargs): - """Stop a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example stops the ceph-osd service for instance id=4: - - service_stop('ceph-osd', id=4) - - :param service_name: the name of the service to stop - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - return service('stop', service_name, **kwargs) - - -def service_restart(service_name, **kwargs): - """Restart a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be restarted. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_restart('ceph-osd', id=4) - - :param service_name: the name of the service to restart - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - return service('restart', service_name) - - -def service_reload(service_name, restart_on_failure=False, **kwargs): - """Reload a system service, optionally falling back to restart if - reload fails. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_reload('ceph-osd', id=4) - - :param service_name: the name of the service to reload - :param restart_on_failure: boolean indicating whether to fallback to a - restart if the reload fails. - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - service_result = service('reload', service_name, **kwargs) - if not service_result and restart_on_failure: - service_result = service('restart', service_name, **kwargs) - return service_result - - -def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", - **kwargs): - """Pause a system service. - - Stop it, and prevent it from starting again at boot. - - :param service_name: the name of the service to pause - :param init_dir: path to the upstart init directory - :param initd_dir: path to the sysv init directory - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems which do not support - key=value arguments via the commandline. - """ - stopped = True - if service_running(service_name, **kwargs): - stopped = service_stop(service_name, **kwargs) - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): - service('disable', service_name) - service('mask', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "disable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - return stopped - - -def service_resume(service_name, init_dir="/etc/init", - initd_dir="/etc/init.d", **kwargs): - """Resume a system service. - - Reenable starting again at boot. Start the service. - - :param service_name: the name of the service to resume - :param init_dir: the path to the init dir - :param initd dir: the path to the initd dir - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): - service('unmask', service_name) - service('enable', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "enable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - started = service_running(service_name, **kwargs) - - if not started: - started = service_start(service_name, **kwargs) - return started - - -def service(action, service_name, **kwargs): - """Control a system service. - - :param action: the action to take on the service - :param service_name: the name of the service to perform th action on - :param **kwargs: additional params to be passed to the service command in - the form of key=value. - """ - if init_is_systemd(): - cmd = ['systemctl', action, service_name] - else: - cmd = ['service', service_name, action] - for key, value in six.iteritems(kwargs): - parameter = '%s=%s' % (key, value) - cmd.append(parameter) - return subprocess.call(cmd) == 0 - - -_UPSTART_CONF = "/etc/init/{}.conf" -_INIT_D_CONF = "/etc/init.d/{}" - - -def service_running(service_name, **kwargs): - """Determine whether a system service is running. - - :param service_name: the name of the service - :param **kwargs: additional args to pass to the service command. This is - used to pass additional key=value arguments to the - service command line for managing specific instance - units (e.g. service ceph-osd status id=2). The kwargs - are ignored in systemd services. - """ - if init_is_systemd(): - return service('is-active', service_name) - else: - if os.path.exists(_UPSTART_CONF.format(service_name)): - try: - cmd = ['status', service_name] - for key, value in six.iteritems(kwargs): - parameter = '%s=%s' % (key, value) - cmd.append(parameter) - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False - else: - # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running - # 'start/running' - if ("start/running" in output or - "is running" in output or - "up and running" in output): - return True - elif os.path.exists(_INIT_D_CONF.format(service_name)): - # Check System V scripts init script return codes - return service('status', service_name) - return False - - -SYSTEMD_SYSTEM = '/run/systemd/system' - - -def init_is_systemd(): - """Return True if the host system uses systemd, False otherwise.""" - if lsb_release()['DISTRIB_CODENAME'] == 'trusty': - return False - return os.path.isdir(SYSTEMD_SYSTEM) - - -def adduser(username, password=None, shell='/bin/bash', - system_user=False, primary_group=None, - secondary_groups=None, uid=None, home_dir=None): - """Add a user to the system. - - Will log but otherwise succeed if the user already exists. - - :param str username: Username to create - :param str password: Password for user; if ``None``, create a system user - :param str shell: The default shell for the user - :param bool system_user: Whether to create a login or system user - :param str primary_group: Primary group for user; defaults to username - :param list secondary_groups: Optional list of additional groups - :param int uid: UID for user being created - :param str home_dir: Home directory for user - - :returns: The password database entry struct, as returned by `pwd.getpwnam` - """ - try: - user_info = pwd.getpwnam(username) - log('user {0} already exists!'.format(username)) - if uid: - user_info = pwd.getpwuid(int(uid)) - log('user with uid {0} already exists!'.format(uid)) - except KeyError: - log('creating user {0}'.format(username)) - cmd = ['useradd'] - if uid: - cmd.extend(['--uid', str(uid)]) - if home_dir: - cmd.extend(['--home', str(home_dir)]) - if system_user or password is None: - cmd.append('--system') - else: - cmd.extend([ - '--create-home', - '--shell', shell, - '--password', password, - ]) - if not primary_group: - try: - grp.getgrnam(username) - primary_group = username # avoid "group exists" error - except KeyError: - pass - if primary_group: - cmd.extend(['-g', primary_group]) - if secondary_groups: - cmd.extend(['-G', ','.join(secondary_groups)]) - cmd.append(username) - subprocess.check_call(cmd) - user_info = pwd.getpwnam(username) - return user_info - - -def user_exists(username): - """Check if a user exists""" - try: - pwd.getpwnam(username) - user_exists = True - except KeyError: - user_exists = False - return user_exists - - -def uid_exists(uid): - """Check if a uid exists""" - try: - pwd.getpwuid(uid) - uid_exists = True - except KeyError: - uid_exists = False - return uid_exists - - -def group_exists(groupname): - """Check if a group exists""" - try: - grp.getgrnam(groupname) - group_exists = True - except KeyError: - group_exists = False - return group_exists - - -def gid_exists(gid): - """Check if a gid exists""" - try: - grp.getgrgid(gid) - gid_exists = True - except KeyError: - gid_exists = False - return gid_exists - - -def add_group(group_name, system_group=False, gid=None): - """Add a group to the system - - Will log but otherwise succeed if the group already exists. - - :param str group_name: group to create - :param bool system_group: Create system group - :param int gid: GID for user being created - - :returns: The password database entry struct, as returned by `grp.getgrnam` - """ - try: - group_info = grp.getgrnam(group_name) - log('group {0} already exists!'.format(group_name)) - if gid: - group_info = grp.getgrgid(gid) - log('group with gid {0} already exists!'.format(gid)) - except KeyError: - log('creating group {0}'.format(group_name)) - add_new_group(group_name, system_group, gid) - group_info = grp.getgrnam(group_name) - return group_info - - -def add_user_to_group(username, group): - """Add a user to a group""" - cmd = ['gpasswd', '-a', username, group] - log("Adding user {} to group {}".format(username, group)) - subprocess.check_call(cmd) - - -def chage(username, lastday=None, expiredate=None, inactive=None, - mindays=None, maxdays=None, root=None, warndays=None): - """Change user password expiry information - - :param str username: User to update - :param str lastday: Set when password was changed in YYYY-MM-DD format - :param str expiredate: Set when user's account will no longer be - accessible in YYYY-MM-DD format. - -1 will remove an account expiration date. - :param str inactive: Set the number of days of inactivity after a password - has expired before the account is locked. - -1 will remove an account's inactivity. - :param str mindays: Set the minimum number of days between password - changes to MIN_DAYS. - 0 indicates the password can be changed anytime. - :param str maxdays: Set the maximum number of days during which a - password is valid. - -1 as MAX_DAYS will remove checking maxdays - :param str root: Apply changes in the CHROOT_DIR directory - :param str warndays: Set the number of days of warning before a password - change is required - :raises subprocess.CalledProcessError: if call to chage fails - """ - cmd = ['chage'] - if root: - cmd.extend(['--root', root]) - if lastday: - cmd.extend(['--lastday', lastday]) - if expiredate: - cmd.extend(['--expiredate', expiredate]) - if inactive: - cmd.extend(['--inactive', inactive]) - if mindays: - cmd.extend(['--mindays', mindays]) - if maxdays: - cmd.extend(['--maxdays', maxdays]) - if warndays: - cmd.extend(['--warndays', warndays]) - cmd.append(username) - subprocess.check_call(cmd) - -remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') - -def rsync(from_path, to_path, flags='-r', options=None, timeout=None): - """Replicate the contents of a path""" - options = options or ['--delete', '--executability'] - cmd = ['/usr/bin/rsync', flags] - if timeout: - cmd = ['timeout', str(timeout)] + cmd - cmd.extend(options) - cmd.append(from_path) - cmd.append(to_path) - log(" ".join(cmd)) - return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() - - -def symlink(source, destination): - """Create a symbolic link""" - log("Symlinking {} as {}".format(source, destination)) - cmd = [ - 'ln', - '-sf', - source, - destination, - ] - subprocess.check_call(cmd) - - -def mkdir(path, owner='root', group='root', perms=0o555, force=False): - """Create a directory""" - log("Making dir {} {}:{} {:o}".format(path, owner, group, - perms)) - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - realpath = os.path.abspath(path) - path_exists = os.path.exists(realpath) - if path_exists and force: - if not os.path.isdir(realpath): - log("Removing non-directory file {} prior to mkdir()".format(path)) - os.unlink(realpath) - os.makedirs(realpath, perms) - elif not path_exists: - os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) - os.chmod(realpath, perms) - - -def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a byte string.""" - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - # lets see if we can grab the file and compare the context, to avoid doing - # a write. - existing_content = None - existing_uid, existing_gid = None, None - try: - with open(path, 'rb') as target: - existing_content = target.read() - stat = os.stat(path) - existing_uid, existing_gid = stat.st_uid, stat.st_gid - except: - pass - if content != existing_content: - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), - level=DEBUG) - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - if six.PY3 and isinstance(content, six.string_types): - content = content.encode('UTF-8') - target.write(content) - return - # the contents were the same, but we might still need to change the - # ownership. - if existing_uid != uid: - log("Changing uid on already existing content: {} -> {}" - .format(existing_uid, uid), level=DEBUG) - os.chown(path, uid, -1) - if existing_gid != gid: - log("Changing gid on already existing content: {} -> {}" - .format(existing_gid, gid), level=DEBUG) - os.chown(path, -1, gid) - - -def fstab_remove(mp): - """Remove the given mountpoint entry from /etc/fstab""" - return Fstab.remove_by_mountpoint(mp) - - -def fstab_add(dev, mp, fs, options=None): - """Adds the given device entry to the /etc/fstab file""" - return Fstab.add(dev, mp, fs, options=options) - - -def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): - """Mount a filesystem at a particular mountpoint""" - cmd_args = ['mount'] - if options is not None: - cmd_args.extend(['-o', options]) - cmd_args.extend([device, mountpoint]) - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) - return False - - if persist: - return fstab_add(device, mountpoint, filesystem, options=options) - return True - - -def umount(mountpoint, persist=False): - """Unmount a filesystem""" - cmd_args = ['umount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - - if persist: - return fstab_remove(mountpoint) - return True - - -def mounts(): - """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" - with open('/proc/mounts') as f: - # [['/mount/point','/dev/path'],[...]] - system_mounts = [m[1::-1] for m in [l.strip().split() - for l in f.readlines()]] - return system_mounts - - -def fstab_mount(mountpoint): - """Mount filesystem using fstab""" - cmd_args = ['mount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - return True - - -def file_hash(path, hash_type='md5'): - """Generate a hash checksum of the contents of 'path' or None if not found. - - :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - """ - if os.path.exists(path): - h = getattr(hashlib, hash_type)() - with open(path, 'rb') as source: - h.update(source.read()) - return h.hexdigest() - else: - return None - - -def path_hash(path): - """Generate a hash checksum of all files matching 'path'. Standard - wildcards like '*' and '?' are supported, see documentation for the 'glob' - module for more information. - - :return: dict: A { filename: hash } dictionary for all matched files. - Empty if none found. - """ - return { - filename: file_hash(filename) - for filename in glob.iglob(path) - } - - -def check_hash(path, checksum, hash_type='md5'): - """Validate a file using a cryptographic checksum. - - :param str checksum: Value of the checksum used to validate the file. - :param str hash_type: Hash algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - :raises ChecksumError: If the file fails the checksum - - """ - actual_checksum = file_hash(path, hash_type) - if checksum != actual_checksum: - raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) - - -class ChecksumError(ValueError): - """A class derived from Value error to indicate the checksum failed.""" - pass - - -def restart_on_change(restart_map, stopstart=False, restart_functions=None): - """Restart services based on configuration files changing - - This function is used a decorator, for example:: - - @restart_on_change({ - '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] - '/etc/apache/sites-enabled/*': [ 'apache2' ] - }) - def config_changed(): - pass # your code here - - In this example, the cinder-api and cinder-volume services - would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. The apache2 service would be - restarted if any file matching the pattern got changed, created - or removed. Standard wildcards are supported, see documentation - for the 'glob' module for more information. - - @param restart_map: {path_file_name: [service_name, ...] - @param stopstart: DEFAULT false; whether to stop, start OR restart - @param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - @returns result from decorated function - """ - def wrap(f): - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart, - restart_functions) - return wrapped_f - return wrap - - -def restart_on_change_helper(lambda_f, restart_map, stopstart=False, - restart_functions=None): - """Helper function to perform the restart_on_change function. - - This is provided for decorators to restart services if files described - in the restart_map have changed after an invocation of lambda_f(). - - @param lambda_f: function to call. - @param restart_map: {file: [service, ...]} - @param stopstart: whether to stop, start or restart a service - @param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - @returns result of lambda_f() - """ - if restart_functions is None: - restart_functions = {} - checksums = {path: path_hash(path) for path in restart_map} - r = lambda_f() - # create a list of lists of the services to restart - restarts = [restart_map[path] - for path in restart_map - if path_hash(path) != checksums[path]] - # create a flat list of ordered services without duplicates from lists - services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) - if services_list: - actions = ('stop', 'start') if stopstart else ('restart',) - for service_name in services_list: - if service_name in restart_functions: - restart_functions[service_name](service_name) - else: - for action in actions: - service(action, service_name) - return r - - -def pwgen(length=None): - """Generate a random pasword.""" - if length is None: - # A random length is ok to use a weak PRNG - length = random.choice(range(35, 45)) - alphanumeric_chars = [ - l for l in (string.ascii_letters + string.digits) - if l not in 'l0QD1vAEIOUaeiou'] - # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the - # actual password - random_generator = random.SystemRandom() - random_chars = [ - random_generator.choice(alphanumeric_chars) for _ in range(length)] - return(''.join(random_chars)) - - -def is_phy_iface(interface): - """Returns True if interface is not virtual, otherwise False.""" - if interface: - sys_net = '/sys/class/net' - if os.path.isdir(sys_net): - for iface in glob.glob(os.path.join(sys_net, '*')): - if '/virtual/' in os.path.realpath(iface): - continue - - if interface == os.path.basename(iface): - return True - - return False - - -def get_bond_master(interface): - """Returns bond master if interface is bond slave otherwise None. - - NOTE: the provided interface is expected to be physical - """ - if interface: - iface_path = '/sys/class/net/%s' % (interface) - if os.path.exists(iface_path): - if '/virtual/' in os.path.realpath(iface_path): - return None - - master = os.path.join(iface_path, 'master') - if os.path.exists(master): - master = os.path.realpath(master) - # make sure it is a bond master - if os.path.exists(os.path.join(master, 'bonding')): - return os.path.basename(master) - - return None - - -def list_nics(nic_type=None): - """Return a list of nics of given type(s)""" - if isinstance(nic_type, six.string_types): - int_types = [nic_type] - else: - int_types = nic_type - - interfaces = [] - if nic_type: - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).decode('UTF-8') - ip_output = ip_output.split('\n') - ip_output = (line for line in ip_output if line) - for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + - r'[0-9]+\.[0-9]+)@.*', line) - if matched: - iface = matched.groups()[0] - else: - iface = line.split()[1].replace(":", "") - - if iface not in interfaces: - interfaces.append(iface) - else: - cmd = ['ip', 'a'] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - ip_output = (line.strip() for line in ip_output if line) - - key = re.compile('^[0-9]+:\s+(.+):') - for line in ip_output: - matched = re.search(key, line) - if matched: - iface = matched.group(1) - iface = iface.partition("@")[0] - if iface not in interfaces: - interfaces.append(iface) - - return interfaces - - -def set_nic_mtu(nic, mtu): - """Set the Maximum Transmission Unit (MTU) on a network interface.""" - cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] - subprocess.check_call(cmd) - - -def get_nic_mtu(nic): - """Return the Maximum Transmission Unit (MTU) for a network interface.""" - cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - mtu = "" - for line in ip_output: - words = line.split() - if 'mtu' in words: - mtu = words[words.index("mtu") + 1] - return mtu - - -def get_nic_hwaddr(nic): - """Return the Media Access Control (MAC) for a network interface.""" - cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8') - hwaddr = "" - words = ip_output.split() - if 'link/ether' in words: - hwaddr = words[words.index('link/ether') + 1] - return hwaddr - - -@contextmanager -def chdir(directory): - """Change the current working directory to a different directory for a code - block and return the previous directory after the block exits. Useful to - run commands from a specificed directory. - - :param str directory: The directory path to change to for this context. - """ - cur = os.getcwd() - try: - yield os.chdir(directory) - finally: - os.chdir(cur) - - -def chownr(path, owner, group, follow_links=True, chowntopdir=False): - """Recursively change user and group ownership of files and directories - in given path. Doesn't chown path itself by default, only its children. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - :param bool follow_links: Also follow and chown links if True - :param bool chowntopdir: Also chown path itself if True - """ - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - if follow_links: - chown = os.chown - else: - chown = os.lchown - - if chowntopdir: - broken_symlink = os.path.lexists(path) and not os.path.exists(path) - if not broken_symlink: - chown(path, uid, gid) - for root, dirs, files in os.walk(path, followlinks=follow_links): - for name in dirs + files: - full = os.path.join(root, name) - broken_symlink = os.path.lexists(full) and not os.path.exists(full) - if not broken_symlink: - chown(full, uid, gid) - - -def lchownr(path, owner, group): - """Recursively change user and group ownership of files and directories - in a given path, not following symbolic links. See the documentation for - 'os.lchown' for more information. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - """ - chownr(path, owner, group, follow_links=False) - - -def owner(path): - """Returns a tuple containing the username & groupname owning the path. - - :param str path: the string path to retrieve the ownership - :return tuple(str, str): A (username, groupname) tuple containing the - name of the user and group owning the path. - :raises OSError: if the specified path does not exist - """ - stat = os.stat(path) - username = pwd.getpwuid(stat.st_uid)[0] - groupname = grp.getgrgid(stat.st_gid)[0] - return username, groupname - - -def get_total_ram(): - """The total amount of system RAM in bytes. - - This is what is reported by the OS, and may be overcommitted when - there are multiple containers hosted on the same machine. - """ - with open('/proc/meminfo', 'r') as f: - for line in f.readlines(): - if line: - key, value, unit = line.split() - if key == 'MemTotal:': - assert unit == 'kB', 'Unknown unit' - return int(value) * 1024 # Classic, not KiB. - raise NotImplementedError() - - -UPSTART_CONTAINER_TYPE = '/run/container_type' - - -def is_container(): - """Determine whether unit is running in a container - - @return: boolean indicating if unit is in a container - """ - if init_is_systemd(): - # Detect using systemd-detect-virt - return subprocess.call(['systemd-detect-virt', - '--container']) == 0 - else: - # Detect using upstart container file marker - return os.path.exists(UPSTART_CONTAINER_TYPE) - - -def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): - """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. - - This method has no effect if the path specified by updatedb_path does not - exist or is not a file. - - @param path: string the path to add to the updatedb.conf PRUNEPATHS value - @param updatedb_path: the path the updatedb.conf file - """ - if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): - # If the updatedb.conf file doesn't exist then don't attempt to update - # the file as the package providing mlocate may not be installed on - # the local system - return - - with open(updatedb_path, 'r+') as f_id: - updatedb_text = f_id.read() - output = updatedb(updatedb_text, path) - f_id.seek(0) - f_id.write(output) - f_id.truncate() - - -def updatedb(updatedb_text, new_path): - lines = [line for line in updatedb_text.split("\n")] - for i, line in enumerate(lines): - if line.startswith("PRUNEPATHS="): - paths_line = line.split("=")[1].replace('"', '') - paths = paths_line.split(" ") - if new_path not in paths: - paths.append(new_path) - lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) - output = "\n".join(lines) - return output - - -def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): - """ Modulo distribution - - This helper uses the unit number, a modulo value and a constant wait time - to produce a calculated wait time distribution. This is useful in large - scale deployments to distribute load during an expensive operation such as - service restarts. - - If you have 1000 nodes that need to restart 100 at a time 1 minute at a - time: - - time.wait(modulo_distribution(modulo=100, wait=60)) - restart() - - If you need restarts to happen serially set modulo to the exact number of - nodes and set a high constant wait time: - - time.wait(modulo_distribution(modulo=10, wait=120)) - restart() - - @param modulo: int The modulo number creates the group distribution - @param wait: int The constant time wait value - @param non_zero_wait: boolean Override unit % modulo == 0, - return modulo * wait. Used to avoid collisions with - leader nodes which are often given priority. - @return: int Calculated time to wait for unit operation - """ - unit_number = int(local_unit().split('/')[1]) - calculated_wait_time = (unit_number % modulo) * wait - if non_zero_wait and calculated_wait_time == 0: - return modulo * wait - else: - return calculated_wait_time diff --git a/hooks/charmhelpers/core/host_factory/__init__.py b/hooks/charmhelpers/core/host_factory/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hooks/charmhelpers/core/host_factory/centos.py b/hooks/charmhelpers/core/host_factory/centos.py deleted file mode 100644 index 7781a39..0000000 --- a/hooks/charmhelpers/core/host_factory/centos.py +++ /dev/null @@ -1,72 +0,0 @@ -import subprocess -import yum -import os - -from charmhelpers.core.strutils import BasicStringComparator - - -class CompareHostReleases(BasicStringComparator): - """Provide comparisons of Host releases. - - Use in the form of - - if CompareHostReleases(release) > 'trusty': - # do something with mitaka - """ - - def __init__(self, item): - raise NotImplementedError( - "CompareHostReleases() is not implemented for CentOS") - - -def service_available(service_name): - # """Determine whether a system service is available.""" - if os.path.isdir('/run/systemd/system'): - cmd = ['systemctl', 'is-enabled', service_name] - else: - cmd = ['service', service_name, 'is-enabled'] - return subprocess.call(cmd) == 0 - - -def add_new_group(group_name, system_group=False, gid=None): - cmd = ['groupadd'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('-r') - cmd.append(group_name) - subprocess.check_call(cmd) - - -def lsb_release(): - """Return /etc/os-release in a dict.""" - d = {} - with open('/etc/os-release', 'r') as lsb: - for l in lsb: - s = l.split('=') - if len(s) != 2: - continue - d[s[0].strip()] = s[1].strip() - return d - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package. - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports YumBase function if the pkgcache argument - is None. - """ - if not pkgcache: - y = yum.YumBase() - packages = y.doPackageLists() - pkgcache = {i.Name: i.version for i in packages['installed']} - pkg = pkgcache[package] - if pkg > revno: - return 1 - if pkg < revno: - return -1 - return 0 diff --git a/hooks/charmhelpers/core/host_factory/ubuntu.py b/hooks/charmhelpers/core/host_factory/ubuntu.py deleted file mode 100644 index a6d375a..0000000 --- a/hooks/charmhelpers/core/host_factory/ubuntu.py +++ /dev/null @@ -1,91 +0,0 @@ -import subprocess - -from charmhelpers.core.strutils import BasicStringComparator - - -UBUNTU_RELEASES = ( - 'lucid', - 'maverick', - 'natty', - 'oneiric', - 'precise', - 'quantal', - 'raring', - 'saucy', - 'trusty', - 'utopic', - 'vivid', - 'wily', - 'xenial', - 'yakkety', - 'zesty', - 'artful', - 'bionic', - 'cosmic', -) - - -class CompareHostReleases(BasicStringComparator): - """Provide comparisons of Ubuntu releases. - - Use in the form of - - if CompareHostReleases(release) > 'trusty': - # do something with mitaka - """ - _list = UBUNTU_RELEASES - - -def service_available(service_name): - """Determine whether a system service is available""" - try: - subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError as e: - return b'unrecognized service' not in e.output - else: - return True - - -def add_new_group(group_name, system_group=False, gid=None): - cmd = ['addgroup'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('--system') - else: - cmd.extend([ - '--group', - ]) - cmd.append(group_name) - subprocess.check_call(cmd) - - -def lsb_release(): - """Return /etc/lsb-release in a dict""" - d = {} - with open('/etc/lsb-release', 'r') as lsb: - for l in lsb: - k, v = l.split('=') - d[k.strip()] = v.strip() - return d - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package. - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports apt_cache function from charmhelpers.fetch if - the pkgcache argument is None. Be sure to add charmhelpers.fetch if - you call this function, or pass an apt_pkg.Cache() instance. - """ - import apt_pkg - if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/hooks/charmhelpers/core/hugepage.py b/hooks/charmhelpers/core/hugepage.py deleted file mode 100644 index 54b5b5e..0000000 --- a/hooks/charmhelpers/core/hugepage.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import yaml -from charmhelpers.core import fstab -from charmhelpers.core import sysctl -from charmhelpers.core.host import ( - add_group, - add_user_to_group, - fstab_mount, - mkdir, -) -from charmhelpers.core.strutils import bytes_from_string -from subprocess import check_output - - -def hugepage_support(user, group='hugetlb', nr_hugepages=256, - max_map_count=65536, mnt_point='/run/hugepages/kvm', - pagesize='2MB', mount=True, set_shmmax=False): - """Enable hugepages on system. - - Args: - user (str) -- Username to allow access to hugepages to - group (str) -- Group name to own hugepages - nr_hugepages (int) -- Number of pages to reserve - max_map_count (int) -- Number of Virtual Memory Areas a process can own - mnt_point (str) -- Directory to mount hugepages on - pagesize (str) -- Size of hugepages - mount (bool) -- Whether to Mount hugepages - """ - group_info = add_group(group) - gid = group_info.gr_gid - add_user_to_group(user, group) - if max_map_count < 2 * nr_hugepages: - max_map_count = 2 * nr_hugepages - sysctl_settings = { - 'vm.nr_hugepages': nr_hugepages, - 'vm.max_map_count': max_map_count, - 'vm.hugetlb_shm_group': gid, - } - if set_shmmax: - shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) - shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages - if shmmax_minsize > shmmax_current: - sysctl_settings['kernel.shmmax'] = shmmax_minsize - sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') - mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) - lfstab = fstab.Fstab() - fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) - if fstab_entry: - lfstab.remove_entry(fstab_entry) - entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', - 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) - lfstab.add_entry(entry) - if mount: - fstab_mount(mnt_point) diff --git a/hooks/charmhelpers/core/kernel.py b/hooks/charmhelpers/core/kernel.py deleted file mode 100644 index 2d40452..0000000 --- a/hooks/charmhelpers/core/kernel.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import subprocess - -from charmhelpers.osplatform import get_platform -from charmhelpers.core.hookenv import ( - log, - INFO -) - -__platform__ = get_platform() -if __platform__ == "ubuntu": - from charmhelpers.core.kernel_factory.ubuntu import ( - persistent_modprobe, - update_initramfs, - ) # flake8: noqa -- ignore F401 for this import -elif __platform__ == "centos": - from charmhelpers.core.kernel_factory.centos import ( - persistent_modprobe, - update_initramfs, - ) # flake8: noqa -- ignore F401 for this import - -__author__ = "Jorge Niedbalski " - - -def modprobe(module, persist=True): - """Load a kernel module and configure for auto-load on reboot.""" - cmd = ['modprobe', module] - - log('Loading kernel module %s' % module, level=INFO) - - subprocess.check_call(cmd) - if persist: - persistent_modprobe(module) - - -def rmmod(module, force=False): - """Remove a module from the linux kernel""" - cmd = ['rmmod'] - if force: - cmd.append('-f') - cmd.append(module) - log('Removing kernel module %s' % module, level=INFO) - return subprocess.check_call(cmd) - - -def lsmod(): - """Shows what kernel modules are currently loaded""" - return subprocess.check_output(['lsmod'], - universal_newlines=True) - - -def is_module_loaded(module): - """Checks if a kernel module is already loaded""" - matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) - return len(matches) > 0 diff --git a/hooks/charmhelpers/core/kernel_factory/__init__.py b/hooks/charmhelpers/core/kernel_factory/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hooks/charmhelpers/core/kernel_factory/centos.py b/hooks/charmhelpers/core/kernel_factory/centos.py deleted file mode 100644 index 1c402c1..0000000 --- a/hooks/charmhelpers/core/kernel_factory/centos.py +++ /dev/null @@ -1,17 +0,0 @@ -import subprocess -import os - - -def persistent_modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - if not os.path.exists('/etc/rc.modules'): - open('/etc/rc.modules', 'a') - os.chmod('/etc/rc.modules', 111) - with open('/etc/rc.modules', 'r+') as modules: - if module not in modules.read(): - modules.write('modprobe %s\n' % module) - - -def update_initramfs(version='all'): - """Updates an initramfs image.""" - return subprocess.check_call(["dracut", "-f", version]) diff --git a/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/hooks/charmhelpers/core/kernel_factory/ubuntu.py deleted file mode 100644 index 3de372f..0000000 --- a/hooks/charmhelpers/core/kernel_factory/ubuntu.py +++ /dev/null @@ -1,13 +0,0 @@ -import subprocess - - -def persistent_modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module + "\n") - - -def update_initramfs(version='all'): - """Updates an initramfs image.""" - return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/hooks/charmhelpers/core/services/__init__.py b/hooks/charmhelpers/core/services/__init__.py deleted file mode 100644 index 61fd074..0000000 --- a/hooks/charmhelpers/core/services/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .base import * # NOQA -from .helpers import * # NOQA diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py deleted file mode 100644 index 179ad4f..0000000 --- a/hooks/charmhelpers/core/services/base.py +++ /dev/null @@ -1,362 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import json -from inspect import getargspec -from collections import Iterable, OrderedDict - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -__all__ = ['ServiceManager', 'ManagerCallback', - 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', - 'service_restart', 'service_stop'] - - -class ServiceManager(object): - def __init__(self, services=None): - """ - Register a list of services, given their definitions. - - Service definitions are dicts in the following formats (all keys except - 'service' are optional):: - - { - "service": , - "required_data": , - "provided_data": , - "data_ready": , - "data_lost": , - "start": , - "stop": , - "ports": , - } - - The 'required_data' list should contain dicts of required data (or - dependency managers that act like dicts and know how to collect the data). - Only when all items in the 'required_data' list are populated are the list - of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more - information. - - The 'provided_data' list should contain relation data providers, most likely - a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, - that will indicate a set of data to set on a given relation. - - The 'data_ready' value should be either a single callback, or a list of - callbacks, to be called when all items in 'required_data' pass `is_ready()`. - Each callback will be called with the service name as the only parameter. - After all of the 'data_ready' callbacks are called, the 'start' callbacks - are fired. - - The 'data_lost' value should be either a single callback, or a list of - callbacks, to be called when a 'required_data' item no longer passes - `is_ready()`. Each callback will be called with the service name as the - only parameter. After all of the 'data_lost' callbacks are called, - the 'stop' callbacks are fired. - - The 'start' value should be either a single callback, or a list of - callbacks, to be called when starting the service, after the 'data_ready' - callbacks are complete. Each callback will be called with the service - name as the only parameter. This defaults to - `[host.service_start, services.open_ports]`. - - The 'stop' value should be either a single callback, or a list of - callbacks, to be called when stopping the service. If the service is - being stopped because it no longer has all of its 'required_data', this - will be called after all of the 'data_lost' callbacks are complete. - Each callback will be called with the service name as the only parameter. - This defaults to `[services.close_ports, host.service_stop]`. - - The 'ports' value should be a list of ports to manage. The default - 'start' handler will open the ports after the service is started, - and the default 'stop' handler will close the ports prior to stopping - the service. - - - Examples: - - The following registers an Upstart service called bingod that depends on - a mongodb relation and which runs a custom `db_migrate` function prior to - restarting the service, and a Runit service called spadesd:: - - manager = services.ServiceManager([ - { - 'service': 'bingod', - 'ports': [80, 443], - 'required_data': [MongoRelation(), config(), {'my': 'data'}], - 'data_ready': [ - services.template(source='bingod.conf'), - services.template(source='bingod.ini', - target='/etc/bingod.ini', - owner='bingo', perms=0400), - ], - }, - { - 'service': 'spadesd', - 'data_ready': services.template(source='spadesd_run.j2', - target='/etc/sv/spadesd/run', - perms=0555), - 'start': runit_start, - 'stop': runit_stop, - }, - ]) - manager.manage() - """ - self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') - self._ready = None - self.services = OrderedDict() - for service in services or []: - service_name = service['service'] - self.services[service_name] = service - - def manage(self): - """ - Handle the current hook by doing The Right Thing with the registered services. - """ - hookenv._run_atstart() - try: - hook_name = hookenv.hook_name() - if hook_name == 'stop': - self.stop_services() - else: - self.reconfigure_services() - self.provide_data() - except SystemExit as x: - if x.code is None or x.code == 0: - hookenv._run_atexit() - hookenv._run_atexit() - - def provide_data(self): - """ - Set the relation data for each provider in the ``provided_data`` list. - - A provider must have a `name` attribute, which indicates which relation - to set data on, and a `provide_data()` method, which returns a dict of - data to set. - - The `provide_data()` method can optionally accept two parameters: - - * ``remote_service`` The name of the remote service that the data will - be provided to. The `provide_data()` method will be called once - for each connected service (not unit). This allows the method to - tailor its data to the given service. - * ``service_ready`` Whether or not the service definition had all of - its requirements met, and thus the ``data_ready`` callbacks run. - - Note that the ``provided_data`` methods are now called **after** the - ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks - a chance to generate any data necessary for the providing to the remote - services. - """ - for service_name, service in self.services.items(): - service_ready = self.is_ready(service_name) - for provider in service.get('provided_data', []): - for relid in hookenv.relation_ids(provider.name): - units = hookenv.related_units(relid) - if not units: - continue - remote_service = units[0].split('/')[0] - argspec = getargspec(provider.provide_data) - if len(argspec.args) > 1: - data = provider.provide_data(remote_service, service_ready) - else: - data = provider.provide_data() - if data: - hookenv.relation_set(relid, data) - - def reconfigure_services(self, *service_names): - """ - Update all files for one or more registered services, and, - if ready, optionally restart them. - - If no service names are given, reconfigures all registered services. - """ - for service_name in service_names or self.services.keys(): - if self.is_ready(service_name): - self.fire_event('data_ready', service_name) - self.fire_event('start', service_name, default=[ - service_restart, - manage_ports]) - self.save_ready(service_name) - else: - if self.was_ready(service_name): - self.fire_event('data_lost', service_name) - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - self.save_lost(service_name) - - def stop_services(self, *service_names): - """ - Stop one or more registered services, by name. - - If no service names are given, stops all registered services. - """ - for service_name in service_names or self.services.keys(): - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - - def get_service(self, service_name): - """ - Given the name of a registered service, return its service definition. - """ - service = self.services.get(service_name) - if not service: - raise KeyError('Service not registered: %s' % service_name) - return service - - def fire_event(self, event_name, service_name, default=None): - """ - Fire a data_ready, data_lost, start, or stop event on a given service. - """ - service = self.get_service(service_name) - callbacks = service.get(event_name, default) - if not callbacks: - return - if not isinstance(callbacks, Iterable): - callbacks = [callbacks] - for callback in callbacks: - if isinstance(callback, ManagerCallback): - callback(self, service_name, event_name) - else: - callback(service_name) - - def is_ready(self, service_name): - """ - Determine if a registered service is ready, by checking its 'required_data'. - - A 'required_data' item can be any mapping type, and is considered ready - if `bool(item)` evaluates as True. - """ - service = self.get_service(service_name) - reqs = service.get('required_data', []) - return all(bool(req) for req in reqs) - - def _load_ready_file(self): - if self._ready is not None: - return - if os.path.exists(self._ready_file): - with open(self._ready_file) as fp: - self._ready = set(json.load(fp)) - else: - self._ready = set() - - def _save_ready_file(self): - if self._ready is None: - return - with open(self._ready_file, 'w') as fp: - json.dump(list(self._ready), fp) - - def save_ready(self, service_name): - """ - Save an indicator that the given service is now data_ready. - """ - self._load_ready_file() - self._ready.add(service_name) - self._save_ready_file() - - def save_lost(self, service_name): - """ - Save an indicator that the given service is no longer data_ready. - """ - self._load_ready_file() - self._ready.discard(service_name) - self._save_ready_file() - - def was_ready(self, service_name): - """ - Determine if the given service was previously data_ready. - """ - self._load_ready_file() - return service_name in self._ready - - -class ManagerCallback(object): - """ - Special case of a callback that takes the `ServiceManager` instance - in addition to the service name. - - Subclasses should implement `__call__` which should accept three parameters: - - * `manager` The `ServiceManager` instance - * `service_name` The name of the service it's being triggered for - * `event_name` The name of the event that this callback is handling - """ - def __call__(self, manager, service_name, event_name): - raise NotImplementedError() - - -class PortManagerCallback(ManagerCallback): - """ - Callback class that will open or close ports, for use as either - a start or stop action. - """ - def __call__(self, manager, service_name, event_name): - service = manager.get_service(service_name) - # turn this generator into a list, - # as we'll be going over it multiple times - new_ports = list(service.get('ports', [])) - port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) - if os.path.exists(port_file): - with open(port_file) as fp: - old_ports = fp.read().split(',') - for old_port in old_ports: - if bool(old_port) and not self.ports_contains(old_port, new_ports): - hookenv.close_port(old_port) - with open(port_file, 'w') as fp: - fp.write(','.join(str(port) for port in new_ports)) - for port in new_ports: - # A port is either a number or 'ICMP' - protocol = 'TCP' - if str(port).upper() == 'ICMP': - protocol = 'ICMP' - if event_name == 'start': - hookenv.open_port(port, protocol) - elif event_name == 'stop': - hookenv.close_port(port, protocol) - - def ports_contains(self, port, ports): - if not bool(port): - return False - if str(port).upper() != 'ICMP': - port = int(port) - return port in ports - - -def service_stop(service_name): - """ - Wrapper around host.service_stop to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_running(service_name): - host.service_stop(service_name) - - -def service_restart(service_name): - """ - Wrapper around host.service_restart to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_available(service_name): - if host.service_running(service_name): - host.service_restart(service_name) - else: - host.service_start(service_name) - - -# Convenience aliases -open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py deleted file mode 100644 index 3e6e30d..0000000 --- a/hooks/charmhelpers/core/services/helpers.py +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import yaml - -from charmhelpers.core import hookenv -from charmhelpers.core import host -from charmhelpers.core import templating - -from charmhelpers.core.services.base import ManagerCallback - - -__all__ = ['RelationContext', 'TemplateCallback', - 'render_template', 'template'] - - -class RelationContext(dict): - """ - Base class for a context generator that gets relation data from juju. - - Subclasses must provide the attributes `name`, which is the name of the - interface of interest, `interface`, which is the type of the interface of - interest, and `required_keys`, which is the set of keys required for the - relation to be considered complete. The data for all interfaces matching - the `name` attribute that are complete will used to populate the dictionary - values (see `get_data`, below). - - The generated context will be namespaced under the relation :attr:`name`, - to prevent potential naming conflicts. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = None - interface = None - - def __init__(self, name=None, additional_required_keys=None): - if not hasattr(self, 'required_keys'): - self.required_keys = [] - - if name is not None: - self.name = name - if additional_required_keys: - self.required_keys.extend(additional_required_keys) - self.get_data() - - def __bool__(self): - """ - Returns True if all of the required_keys are available. - """ - return self.is_ready() - - __nonzero__ = __bool__ - - def __repr__(self): - return super(RelationContext, self).__repr__() - - def is_ready(self): - """ - Returns True if all of the `required_keys` are available from any units. - """ - ready = len(self.get(self.name, [])) > 0 - if not ready: - hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) - return ready - - def _is_ready(self, unit_data): - """ - Helper method that tests a set of relation data and returns True if - all of the `required_keys` are present. - """ - return set(unit_data.keys()).issuperset(set(self.required_keys)) - - def get_data(self): - """ - Retrieve the relation data for each unit involved in a relation and, - if complete, store it in a list under `self[self.name]`. This - is automatically called when the RelationContext is instantiated. - - The units are sorted lexographically first by the service ID, then by - the unit ID. Thus, if an interface has two other services, 'db:1' - and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', - and 'db:2' having one unit, 'mediawiki/0', all of which have a complete - set of data, the relation data for the units will be stored in the - order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. - - If you only care about a single unit on the relation, you can just - access it as `{{ interface[0]['key'] }}`. However, if you can at all - support multiple units on a relation, you should iterate over the list, - like:: - - {% for unit in interface -%} - {{ unit['key'] }}{% if not loop.last %},{% endif %} - {%- endfor %} - - Note that since all sets of relation data from all related services and - units are in a single list, if you need to know which service or unit a - set of data came from, you'll need to extend this class to preserve - that information. - """ - if not hookenv.relation_ids(self.name): - return - - ns = self.setdefault(self.name, []) - for rid in sorted(hookenv.relation_ids(self.name)): - for unit in sorted(hookenv.related_units(rid)): - reldata = hookenv.relation_get(rid=rid, unit=unit) - if self._is_ready(reldata): - ns.append(reldata) - - def provide_data(self): - """ - Return data to be relation_set for this interface. - """ - return {} - - -class MysqlRelation(RelationContext): - """ - Relation context for the `mysql` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'db' - interface = 'mysql' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'user', 'password', 'database'] - RelationContext.__init__(self, *args, **kwargs) - - -class HttpRelation(RelationContext): - """ - Relation context for the `http` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'website' - interface = 'http' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'port'] - RelationContext.__init__(self, *args, **kwargs) - - def provide_data(self): - return { - 'host': hookenv.unit_get('private-address'), - 'port': 80, - } - - -class RequiredConfig(dict): - """ - Data context that loads config options with one or more mandatory options. - - Once the required options have been changed from their default values, all - config options will be available, namespaced under `config` to prevent - potential naming conflicts (for example, between a config option and a - relation property). - - :param list *args: List of options that must be changed from their default values. - """ - - def __init__(self, *args): - self.required_options = args - self['config'] = hookenv.config() - with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.load(fp).get('options', {}) - - def __bool__(self): - for option in self.required_options: - if option not in self['config']: - return False - current_value = self['config'][option] - default_value = self.config[option].get('default') - if current_value == default_value: - return False - if current_value in (None, '') and default_value in (None, ''): - return False - return True - - def __nonzero__(self): - return self.__bool__() - - -class StoredContext(dict): - """ - A data context that always returns the data that it was first created with. - - This is useful to do a one-time generation of things like passwords, that - will thereafter use the same value that was originally generated, instead - of generating a new value each time it is run. - """ - def __init__(self, file_name, config_data): - """ - If the file exists, populate `self` with the data from the file. - Otherwise, populate with the given data and persist it to the file. - """ - if os.path.exists(file_name): - self.update(self.read_context(file_name)) - else: - self.store_context(file_name, config_data) - self.update(config_data) - - def store_context(self, file_name, config_data): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0o600) - yaml.dump(config_data, file_stream) - - def read_context(self, file_name): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'r') as file_stream: - data = yaml.load(file_stream) - if not data: - raise OSError("%s is empty" % file_name) - return data - - -class TemplateCallback(ManagerCallback): - """ - Callback class that will render a Jinja2 template, for use as a ready - action. - - :param str source: The template source file, relative to - `$CHARM_DIR/templates` - - :param str target: The target to write the rendered template to (or None) - :param str owner: The owner of the rendered file - :param str group: The group of the rendered file - :param int perms: The permissions of the rendered file - :param partial on_change_action: functools partial to be executed when - rendered file changes - :param jinja2 loader template_loader: A jinja2 template loader - - :return str: The rendered template - """ - def __init__(self, source, target, - owner='root', group='root', perms=0o444, - on_change_action=None, template_loader=None): - self.source = source - self.target = target - self.owner = owner - self.group = group - self.perms = perms - self.on_change_action = on_change_action - self.template_loader = template_loader - - def __call__(self, manager, service_name, event_name): - pre_checksum = '' - if self.on_change_action and os.path.isfile(self.target): - pre_checksum = host.file_hash(self.target) - service = manager.get_service(service_name) - context = {'ctx': {}} - for ctx in service.get('required_data', []): - context.update(ctx) - context['ctx'].update(ctx) - - result = templating.render(self.source, self.target, context, - self.owner, self.group, self.perms, - template_loader=self.template_loader) - if self.on_change_action: - if pre_checksum == host.file_hash(self.target): - hookenv.log( - 'No change detected: {}'.format(self.target), - hookenv.DEBUG) - else: - self.on_change_action() - - return result - - -# Convenience aliases for templates -render_template = template = TemplateCallback diff --git a/hooks/charmhelpers/core/strutils.py b/hooks/charmhelpers/core/strutils.py deleted file mode 100644 index e8df045..0000000 --- a/hooks/charmhelpers/core/strutils.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six -import re - - -def bool_from_string(value): - """Interpret string value as boolean. - - Returns True if value translates to True otherwise False. - """ - if isinstance(value, six.string_types): - value = six.text_type(value) - else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) - raise ValueError(msg) - - value = value.strip().lower() - - if value in ['y', 'yes', 'true', 't', 'on']: - return True - elif value in ['n', 'no', 'false', 'f', 'off']: - return False - - msg = "Unable to interpret string value '%s' as boolean" % (value) - raise ValueError(msg) - - -def bytes_from_string(value): - """Interpret human readable string value as bytes. - - Returns int - """ - BYTE_POWER = { - 'K': 1, - 'KB': 1, - 'M': 2, - 'MB': 2, - 'G': 3, - 'GB': 3, - 'T': 4, - 'TB': 4, - 'P': 5, - 'PB': 5, - } - if isinstance(value, six.string_types): - value = six.text_type(value) - else: - msg = "Unable to interpret non-string value '%s' as bytes" % (value) - raise ValueError(msg) - matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if matches: - size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) - else: - # Assume that value passed in is bytes - try: - size = int(value) - except ValueError: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return size - - -class BasicStringComparator(object): - """Provides a class that will compare strings from an iterator type object. - Used to provide > and < comparisons on strings that may not necessarily be - alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the - z-wrap. - """ - - _list = None - - def __init__(self, item): - if self._list is None: - raise Exception("Must define the _list in the class definition!") - try: - self.index = self._list.index(item) - except Exception: - raise KeyError("Item '{}' is not in list '{}'" - .format(item, self._list)) - - def __eq__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index == self._list.index(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __lt__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index < self._list.index(other) - - def __ge__(self, other): - return not self.__lt__(other) - - def __gt__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index > self._list.index(other) - - def __le__(self, other): - return not self.__gt__(other) - - def __str__(self): - """Always give back the item at the index so it can be used in - comparisons like: - - s_mitaka = CompareOpenStack('mitaka') - s_newton = CompareOpenstack('newton') - - assert s_newton > s_mitaka - - @returns: - """ - return self._list[self.index] diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py deleted file mode 100644 index 1f188d8..0000000 --- a/hooks/charmhelpers/core/sysctl.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import yaml - -from subprocess import check_call - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - ERROR, -) - -__author__ = 'Jorge Niedbalski R. ' - - -def create(sysctl_dict, sysctl_file): - """Creates a sysctl.conf file from a YAML associative array - - :param sysctl_dict: a dict or YAML-formatted string of sysctl - options eg "{ 'kernel.max_pid': 1337 }" - :type sysctl_dict: str - :param sysctl_file: path to the sysctl file to be saved - :type sysctl_file: str or unicode - :returns: None - """ - if type(sysctl_dict) is not dict: - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return - else: - sysctl_dict_parsed = sysctl_dict - - with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict_parsed.items(): - fd.write("{}={}\n".format(key, value)) - - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), - level=DEBUG) - - check_call(["sysctl", "-p", sysctl_file]) diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py deleted file mode 100644 index 9014015..0000000 --- a/hooks/charmhelpers/core/templating.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', - template_loader=None, config_template=None): - """ - Render a template. - - The `source` path, if not absolute, is relative to the `templates_dir`. - - The `target` path should be absolute. It can also be `None`, in which - case no file will be written. - - The context should be a dict containing the values to be replaced in the - template. - - config_template may be provided to render from a provided template instead - of loading from a file. - - The `owner`, `group`, and `perms` options will be passed to `write_file`. - - If omitted, `templates_dir` defaults to the `templates` folder in the charm. - - The rendered template will be written to the file as well as being returned - as a string. - - Note: Using this requires python-jinja2 or python3-jinja2; if it is not - installed, calling this will attempt to use charmhelpers.fetch.apt_install - to install it. - """ - try: - from jinja2 import FileSystemLoader, Environment, exceptions - except ImportError: - try: - from charmhelpers.fetch import apt_install - except ImportError: - hookenv.log('Could not import jinja2, and could not import ' - 'charmhelpers.fetch to install it', - level=hookenv.ERROR) - raise - if sys.version_info.major == 2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) - from jinja2 import FileSystemLoader, Environment, exceptions - - if template_loader: - template_env = Environment(loader=template_loader) - else: - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - template_env = Environment(loader=FileSystemLoader(templates_dir)) - - # load from a string if provided explicitly - if config_template is not None: - template = template_env.from_string(config_template) - else: - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e - content = template.render(context) - if target is not None: - target_dir = os.path.dirname(target) - if not os.path.exists(target_dir): - # This is a terrible default directory permission, as the file - # or its siblings will often contain secrets. - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) - host.write_file(target, content.encode(encoding), owner, group, perms) - return content diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py deleted file mode 100644 index ab55432..0000000 --- a/hooks/charmhelpers/core/unitdata.py +++ /dev/null @@ -1,525 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Authors: -# Kapil Thangavelu -# -""" -Intro ------ - -A simple way to store state in units. This provides a key value -storage with support for versioned, transactional operation, -and can calculate deltas from previous values to simplify unit logic -when processing changes. - - -Hook Integration ----------------- - -There are several extant frameworks for hook execution, including - - - charmhelpers.core.hookenv.Hooks - - charmhelpers.core.services.ServiceManager - -The storage classes are framework agnostic, one simple integration is -via the HookData contextmanager. It will record the current hook -execution environment (including relation data, config data, etc.), -setup a transaction and allow easy access to the changes from -previously seen values. One consequence of the integration is the -reservation of particular keys ('rels', 'unit', 'env', 'config', -'charm_revisions') for their respective values. - -Here's a fully worked integration example using hookenv.Hooks:: - - from charmhelper.core import hookenv, unitdata - - hook_data = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # Print all changes to configuration from previously seen - # values. - for changed, (prev, cur) in hook_data.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - # Directly access all charm config as a mapping. - conf = db.getrange('config', True) - - # Directly access all relation data as a mapping - rels = db.getrange('rels', True) - - if __name__ == '__main__': - with hook_data(): - hook.execute() - - -A more basic integration is via the hook_scope context manager which simply -manages transaction scope (and records hook name, and timestamp):: - - >>> from unitdata import kv - >>> db = kv() - >>> with db.hook_scope('install'): - ... # do work, in transactional scope. - ... db.set('x', 1) - >>> db.get('x') - 1 - - -Usage ------ - -Values are automatically json de/serialized to preserve basic typing -and complex data struct capabilities (dicts, lists, ints, booleans, etc). - -Individual values can be manipulated via get/set:: - - >>> kv.set('y', True) - >>> kv.get('y') - True - - # We can set complex values (dicts, lists) as a single key. - >>> kv.set('config', {'a': 1, 'b': True'}) - - # Also supports returning dictionaries as a record which - # provides attribute access. - >>> config = kv.get('config', record=True) - >>> config.b - True - - -Groups of keys can be manipulated with update/getrange:: - - >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") - >>> kv.getrange('gui.', strip=True) - {'z': 1, 'y': 2} - -When updating values, its very helpful to understand which values -have actually changed and how have they changed. The storage -provides a delta method to provide for this:: - - >>> data = {'debug': True, 'option': 2} - >>> delta = kv.delta(data, 'config.') - >>> delta.debug.previous - None - >>> delta.debug.current - True - >>> delta - {'debug': (None, True), 'option': (None, 2)} - -Note the delta method does not persist the actual change, it needs to -be explicitly saved via 'update' method:: - - >>> kv.update(data, 'config.') - -Values modified in the context of a hook scope retain historical values -associated to the hookname. - - >>> with db.hook_scope('config-changed'): - ... db.set('x', 42) - >>> db.gethistory('x') - [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), - (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] - -""" - -import collections -import contextlib -import datetime -import itertools -import json -import os -import pprint -import sqlite3 -import sys - -__author__ = 'Kapil Thangavelu ' - - -class Storage(object): - """Simple key value database for local unit state within charms. - - Modifications are not persisted unless :meth:`flush` is called. - - To support dicts, lists, integer, floats, and booleans values - are automatically json encoded/decoded. - - Note: to facilitate unit testing, ':memory:' can be passed as the - path parameter which causes sqlite3 to only build the db in memory. - This should only be used for testing purposes. - """ - def __init__(self, path=None): - self.db_path = path - if path is None: - if 'UNIT_STATE_DB' in os.environ: - self.db_path = os.environ['UNIT_STATE_DB'] - else: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') - if self.db_path != ':memory:': - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) - self.conn = sqlite3.connect('%s' % self.db_path) - self.cursor = self.conn.cursor() - self.revision = None - self._closed = False - self._init() - - def close(self): - if self._closed: - return - self.flush(False) - self.cursor.close() - self.conn.close() - self._closed = True - - def get(self, key, default=None, record=False): - self.cursor.execute('select data from kv where key=?', [key]) - result = self.cursor.fetchone() - if not result: - return default - if record: - return Record(json.loads(result[0])) - return json.loads(result[0]) - - def getrange(self, key_prefix, strip=False): - """ - Get a range of keys starting with a common prefix as a mapping of - keys to values. - - :param str key_prefix: Common prefix among all keys - :param bool strip: Optionally strip the common prefix from the key - names in the returned dict - :return dict: A (possibly empty) dict of key-value mappings - """ - self.cursor.execute("select key, data from kv where key like ?", - ['%s%%' % key_prefix]) - result = self.cursor.fetchall() - - if not result: - return {} - if not strip: - key_prefix = '' - return dict([ - (k[len(key_prefix):], json.loads(v)) for k, v in result]) - - def update(self, mapping, prefix=""): - """ - Set the values of multiple keys at once. - - :param dict mapping: Mapping of keys to values - :param str prefix: Optional prefix to apply to all keys in `mapping` - before setting - """ - for k, v in mapping.items(): - self.set("%s%s" % (prefix, k), v) - - def unset(self, key): - """ - Remove a key from the database entirely. - """ - self.cursor.execute('delete from kv where key=?', [key]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - [key, self.revision, json.dumps('DELETED')]) - - def unsetrange(self, keys=None, prefix=""): - """ - Remove a range of keys starting with a common prefix, from the database - entirely. - - :param list keys: List of keys to remove. - :param str prefix: Optional prefix to apply to all keys in ``keys`` - before removing. - """ - if keys is not None: - keys = ['%s%s' % (prefix, key) for key in keys] - self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), - list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) - else: - self.cursor.execute('delete from kv where key like ?', - ['%s%%' % prefix]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) - - def set(self, key, value): - """ - Set a value in the database. - - :param str key: Key to set the value for - :param value: Any JSON-serializable value to be set - """ - serialized = json.dumps(value) - - self.cursor.execute('select data from kv where key=?', [key]) - exists = self.cursor.fetchone() - - # Skip mutations to the same value - if exists: - if exists[0] == serialized: - return value - - if not exists: - self.cursor.execute( - 'insert into kv (key, data) values (?, ?)', - (key, serialized)) - else: - self.cursor.execute(''' - update kv - set data = ? - where key = ?''', [serialized, key]) - - # Save - if not self.revision: - return value - - self.cursor.execute( - 'select 1 from kv_revisions where key=? and revision=?', - [key, self.revision]) - exists = self.cursor.fetchone() - - if not exists: - self.cursor.execute( - '''insert into kv_revisions ( - revision, key, data) values (?, ?, ?)''', - (self.revision, key, serialized)) - else: - self.cursor.execute( - ''' - update kv_revisions - set data = ? - where key = ? - and revision = ?''', - [serialized, key, self.revision]) - - return value - - def delta(self, mapping, prefix): - """ - return a delta containing values that have changed. - """ - previous = self.getrange(prefix, strip=True) - if not previous: - pk = set() - else: - pk = set(previous.keys()) - ck = set(mapping.keys()) - delta = DeltaSet() - - # added - for k in ck.difference(pk): - delta[k] = Delta(None, mapping[k]) - - # removed - for k in pk.difference(ck): - delta[k] = Delta(previous[k], None) - - # changed - for k in pk.intersection(ck): - c = mapping[k] - p = previous[k] - if c != p: - delta[k] = Delta(p, c) - - return delta - - @contextlib.contextmanager - def hook_scope(self, name=""): - """Scope all future interactions to the current hook execution - revision.""" - assert not self.revision - self.cursor.execute( - 'insert into hooks (hook, date) values (?, ?)', - (name or sys.argv[0], - datetime.datetime.utcnow().isoformat())) - self.revision = self.cursor.lastrowid - try: - yield self.revision - self.revision = None - except Exception: - self.flush(False) - self.revision = None - raise - else: - self.flush() - - def flush(self, save=True): - if save: - self.conn.commit() - elif self._closed: - return - else: - self.conn.rollback() - - def _init(self): - self.cursor.execute(''' - create table if not exists kv ( - key text, - data text, - primary key (key) - )''') - self.cursor.execute(''' - create table if not exists kv_revisions ( - key text, - revision integer, - data text, - primary key (key, revision) - )''') - self.cursor.execute(''' - create table if not exists hooks ( - version integer primary key autoincrement, - hook text, - date text - )''') - self.conn.commit() - - def gethistory(self, key, deserialize=False): - self.cursor.execute( - ''' - select kv.revision, kv.key, kv.data, h.hook, h.date - from kv_revisions kv, - hooks h - where kv.key=? - and kv.revision = h.version - ''', [key]) - if deserialize is False: - return self.cursor.fetchall() - return map(_parse_history, self.cursor.fetchall()) - - def debug(self, fh=sys.stderr): - self.cursor.execute('select * from kv') - pprint.pprint(self.cursor.fetchall(), stream=fh) - self.cursor.execute('select * from kv_revisions') - pprint.pprint(self.cursor.fetchall(), stream=fh) - - -def _parse_history(d): - return (d[0], d[1], json.loads(d[2]), d[3], - datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) - - -class HookData(object): - """Simple integration for existing hook exec frameworks. - - Records all unit information, and stores deltas for processing - by the hook. - - Sample:: - - from charmhelper.core import hookenv, unitdata - - changes = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # View all changes to configuration - for changed, (prev, cur) in changes.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - if __name__ == '__main__': - with changes(): - hook.execute() - - """ - def __init__(self): - self.kv = kv() - self.conf = None - self.rels = None - - @contextlib.contextmanager - def __call__(self): - from charmhelpers.core import hookenv - hook_name = hookenv.hook_name() - - with self.kv.hook_scope(hook_name): - self._record_charm_version(hookenv.charm_dir()) - delta_config, delta_relation = self._record_hook(hookenv) - yield self.kv, delta_config, delta_relation - - def _record_charm_version(self, charm_dir): - # Record revisions.. charm revisions are meaningless - # to charm authors as they don't control the revision. - # so logic dependnent on revision is not particularly - # useful, however it is useful for debugging analysis. - charm_rev = open( - os.path.join(charm_dir, 'revision')).read().strip() - charm_rev = charm_rev or '0' - revs = self.kv.get('charm_revisions', []) - if charm_rev not in revs: - revs.append(charm_rev.strip() or '0') - self.kv.set('charm_revisions', revs) - - def _record_hook(self, hookenv): - data = hookenv.execution_environment() - self.conf = conf_delta = self.kv.delta(data['conf'], 'config') - self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') - self.kv.set('env', dict(data['env'])) - self.kv.set('unit', data['unit']) - self.kv.set('relid', data.get('relid')) - return conf_delta, rels_delta - - -class Record(dict): - - __slots__ = () - - def __getattr__(self, k): - if k in self: - return self[k] - raise AttributeError(k) - - -class DeltaSet(Record): - - __slots__ = () - - -Delta = collections.namedtuple('Delta', ['previous', 'current']) - - -_KV = None - - -def kv(): - global _KV - if _KV is None: - _KV = Storage() - return _KV diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py deleted file mode 100644 index 480a627..0000000 --- a/hooks/charmhelpers/fetch/__init__.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import importlib -from charmhelpers.osplatform import get_platform -from yaml import safe_load -from charmhelpers.core.hookenv import ( - config, - log, -) - -import six -if six.PY3: - from urllib.parse import urlparse, urlunparse -else: - from urlparse import urlparse, urlunparse - - -# The order of this list is very important. Handlers should be listed in from -# least- to most-specific URL matching. -FETCH_HANDLERS = ( - 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', - 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', - 'charmhelpers.fetch.giturl.GitUrlFetchHandler', -) - - -class SourceConfigError(Exception): - pass - - -class UnhandledSource(Exception): - pass - - -class AptLockError(Exception): - pass - - -class GPGKeyError(Exception): - """Exception occurs when a GPG key cannot be fetched or used. The message - indicates what the problem is. - """ - pass - - -class BaseFetchHandler(object): - - """Base class for FetchHandler implementations in fetch plugins""" - - def can_handle(self, source): - """Returns True if the source can be handled. Otherwise returns - a string explaining why it cannot""" - return "Wrong source type" - - def install(self, source): - """Try to download and unpack the source. Return the path to the - unpacked files or raise UnhandledSource.""" - raise UnhandledSource("Wrong source type {}".format(source)) - - def parse_url(self, url): - return urlparse(url) - - def base_url(self, url): - """Return url without querystring or fragment""" - parts = list(self.parse_url(url)) - parts[4:] = ['' for i in parts[4:]] - return urlunparse(parts) - - -__platform__ = get_platform() -module = "charmhelpers.fetch.%s" % __platform__ -fetch = importlib.import_module(module) - -filter_installed_packages = fetch.filter_installed_packages -install = fetch.apt_install -upgrade = fetch.apt_upgrade -update = _fetch_update = fetch.apt_update -purge = fetch.apt_purge -add_source = fetch.add_source - -if __platform__ == "ubuntu": - apt_cache = fetch.apt_cache - apt_install = fetch.apt_install - apt_update = fetch.apt_update - apt_upgrade = fetch.apt_upgrade - apt_purge = fetch.apt_purge - apt_mark = fetch.apt_mark - apt_hold = fetch.apt_hold - apt_unhold = fetch.apt_unhold - import_key = fetch.import_key - get_upstream_version = fetch.get_upstream_version -elif __platform__ == "centos": - yum_search = fetch.yum_search - - -def configure_sources(update=False, - sources_var='install_sources', - keys_var='install_keys'): - """Configure multiple sources from charm configuration. - - The lists are encoded as yaml fragments in the configuration. - The fragment needs to be included as a string. Sources and their - corresponding keys are of the types supported by add_source(). - - Example config: - install_sources: | - - "ppa:foo" - - "http://example.com/repo precise main" - install_keys: | - - null - - "a1b2c3d4" - - Note that 'null' (a.k.a. None) should not be quoted. - """ - sources = safe_load((config(sources_var) or '').strip()) or [] - keys = safe_load((config(keys_var) or '').strip()) or None - - if isinstance(sources, six.string_types): - sources = [sources] - - if keys is None: - for source in sources: - add_source(source, None) - else: - if isinstance(keys, six.string_types): - keys = [keys] - - if len(sources) != len(keys): - raise SourceConfigError( - 'Install sources and keys lists are different lengths') - for source, key in zip(sources, keys): - add_source(source, key) - if update: - _fetch_update(fatal=True) - - -def install_remote(source, *args, **kwargs): - """Install a file tree from a remote source. - - The specified source should be a url of the form: - scheme://[host]/path[#[option=value][&...]] - - Schemes supported are based on this modules submodules. - Options supported are submodule-specific. - Additional arguments are passed through to the submodule. - - For example:: - - dest = install_remote('http://example.com/archive.tgz', - checksum='deadbeef', - hash_type='sha1') - - This will download `archive.tgz`, validate it using SHA1 and, if - the file is ok, extract it and return the directory in which it - was extracted. If the checksum fails, it will raise - :class:`charmhelpers.core.host.ChecksumError`. - """ - # We ONLY check for True here because can_handle may return a string - # explaining why it can't handle a given source. - handlers = [h for h in plugins() if h.can_handle(source) is True] - for handler in handlers: - try: - return handler.install(source, *args, **kwargs) - except UnhandledSource as e: - log('Install source attempt unsuccessful: {}'.format(e), - level='WARNING') - raise UnhandledSource("No handler found for source {}".format(source)) - - -def install_from_config(config_var_name): - """Install a file from config.""" - charm_config = config() - source = charm_config[config_var_name] - return install_remote(source) - - -def plugins(fetch_handlers=None): - if not fetch_handlers: - fetch_handlers = FETCH_HANDLERS - plugin_list = [] - for handler_name in fetch_handlers: - package, classname = handler_name.rsplit('.', 1) - try: - handler_class = getattr( - importlib.import_module(package), - classname) - plugin_list.append(handler_class()) - except NotImplementedError: - # Skip missing plugins so that they can be ommitted from - # installation if desired - log("FetchHandler {} not found, skipping plugin".format( - handler_name)) - return plugin_list diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py deleted file mode 100644 index d25587a..0000000 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import hashlib -import re - -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource -) -from charmhelpers.payload.archive import ( - get_archive_handler, - extract, -) -from charmhelpers.core.host import mkdir, check_hash - -import six -if six.PY3: - from urllib.request import ( - build_opener, install_opener, urlopen, urlretrieve, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - ) - from urllib.parse import urlparse, urlunparse, parse_qs - from urllib.error import URLError -else: - from urllib import urlretrieve - from urllib2 import ( - build_opener, install_opener, urlopen, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - URLError - ) - from urlparse import urlparse, urlunparse, parse_qs - - -def splituser(host): - '''urllib.splituser(), but six's support of this seems broken''' - _userprog = re.compile('^(.*)@(.*)$') - match = _userprog.match(host) - if match: - return match.group(1, 2) - return None, host - - -def splitpasswd(user): - '''urllib.splitpasswd(), but six's support of this is missing''' - _passwdprog = re.compile('^([^:]*):(.*)$', re.S) - match = _passwdprog.match(user) - if match: - return match.group(1, 2) - return user, None - - -class ArchiveUrlFetchHandler(BaseFetchHandler): - """ - Handler to download archive files from arbitrary URLs. - - Can fetch from http, https, ftp, and file URLs. - - Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. - - Installs the contents of the archive in $CHARM_DIR/fetched/. - """ - def can_handle(self, source): - url_parts = self.parse_url(source) - if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): - # XXX: Why is this returning a boolean and a string? It's - # doomed to fail since "bool(can_handle('foo://'))" will be True. - return "Wrong source type" - if get_archive_handler(self.base_url(source)): - return True - return False - - def download(self, source, dest): - """ - Download an archive file. - - :param str source: URL pointing to an archive file. - :param str dest: Local path location to download archive file to. - """ - # propagate all exceptions - # URLError, OSError, etc - proto, netloc, path, params, query, fragment = urlparse(source) - if proto in ('http', 'https'): - auth, barehost = splituser(netloc) - if auth is not None: - source = urlunparse((proto, barehost, path, params, query, fragment)) - username, password = splitpasswd(auth) - passman = HTTPPasswordMgrWithDefaultRealm() - # Realm is set to None in add_password to force the username and password - # to be used whatever the realm - passman.add_password(None, source, username, password) - authhandler = HTTPBasicAuthHandler(passman) - opener = build_opener(authhandler) - install_opener(opener) - response = urlopen(source) - try: - with open(dest, 'wb') as dest_file: - dest_file.write(response.read()) - except Exception as e: - if os.path.isfile(dest): - os.unlink(dest) - raise e - - # Mandatory file validation via Sha1 or MD5 hashing. - def download_and_validate(self, url, hashsum, validate="sha1"): - tempfile, headers = urlretrieve(url) - check_hash(tempfile, hashsum, validate) - return tempfile - - def install(self, source, dest=None, checksum=None, hash_type='sha1'): - """ - Download and install an archive file, with optional checksum validation. - - The checksum can also be given on the `source` URL's fragment. - For example:: - - handler.install('http://example.com/file.tgz#sha1=deadbeef') - - :param str source: URL pointing to an archive file. - :param str dest: Local destination path to install to. If not given, - installs to `$CHARM_DIR/archives/archive_file_name`. - :param str checksum: If given, validate the archive file after download. - :param str hash_type: Algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - - """ - url_parts = self.parse_url(source) - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) - dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) - try: - self.download(source, dld_file) - except URLError as e: - raise UnhandledSource(e.reason) - except OSError as e: - raise UnhandledSource(e.strerror) - options = parse_qs(url_parts.fragment) - for key, value in options.items(): - if not six.PY3: - algorithms = hashlib.algorithms - else: - algorithms = hashlib.algorithms_available - if key in algorithms: - if len(value) != 1: - raise TypeError( - "Expected 1 hash value, not %d" % len(value)) - expected = value[0] - check_hash(dld_file, expected, key) - if checksum: - check_hash(dld_file, checksum, hash_type) - return extract(dld_file, dest) diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py deleted file mode 100644 index 07cd029..0000000 --- a/hooks/charmhelpers/fetch/bzrurl.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from subprocess import check_call -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource, - filter_installed_packages, - install, -) -from charmhelpers.core.host import mkdir - - -if filter_installed_packages(['bzr']) != []: - install(['bzr']) - if filter_installed_packages(['bzr']) != []: - raise NotImplementedError('Unable to install bzr') - - -class BzrUrlFetchHandler(BaseFetchHandler): - """Handler for bazaar branches via generic and lp URLs.""" - - def can_handle(self, source): - url_parts = self.parse_url(source) - if url_parts.scheme not in ('bzr+ssh', 'lp', ''): - return False - elif not url_parts.scheme: - return os.path.exists(os.path.join(source, '.bzr')) - else: - return True - - def branch(self, source, dest, revno=None): - if not self.can_handle(source): - raise UnhandledSource("Cannot handle {}".format(source)) - cmd_opts = [] - if revno: - cmd_opts += ['-r', str(revno)] - if os.path.exists(dest): - cmd = ['bzr', 'pull'] - cmd += cmd_opts - cmd += ['--overwrite', '-d', dest, source] - else: - cmd = ['bzr', 'branch'] - cmd += cmd_opts - cmd += [source, dest] - check_call(cmd) - - def install(self, source, dest=None, revno=None): - url_parts = self.parse_url(source) - branch_name = url_parts.path.strip("/").split("/")[-1] - if dest: - dest_dir = os.path.join(dest, branch_name) - else: - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) - - if dest and not os.path.exists(dest): - mkdir(dest, perms=0o755) - - try: - self.branch(source, dest_dir, revno) - except OSError as e: - raise UnhandledSource(e.strerror) - return dest_dir diff --git a/hooks/charmhelpers/fetch/centos.py b/hooks/charmhelpers/fetch/centos.py deleted file mode 100644 index a91dcff..0000000 --- a/hooks/charmhelpers/fetch/centos.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import subprocess -import os -import time -import six -import yum - -from tempfile import NamedTemporaryFile -from charmhelpers.core.hookenv import log - -YUM_NO_LOCK = 1 # The return code for "couldn't acquire lock" in YUM. -YUM_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. -YUM_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. - - -def filter_installed_packages(packages): - """Return a list of packages that require installation.""" - yb = yum.YumBase() - package_list = yb.doPackageLists() - temp_cache = {p.base_package_name: 1 for p in package_list['installed']} - - _pkgs = [p for p in packages if not temp_cache.get(p, False)] - return _pkgs - - -def install(packages, options=None, fatal=False): - """Install one or more packages.""" - cmd = ['yum', '--assumeyes'] - if options is not None: - cmd.extend(options) - cmd.append('install') - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - _run_yum_command(cmd, fatal) - - -def upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages.""" - cmd = ['yum', '--assumeyes'] - if options is not None: - cmd.extend(options) - cmd.append('upgrade') - log("Upgrading with options: {}".format(options)) - _run_yum_command(cmd, fatal) - - -def update(fatal=False): - """Update local yum cache.""" - cmd = ['yum', '--assumeyes', 'update'] - log("Update with fatal: {}".format(fatal)) - _run_yum_command(cmd, fatal) - - -def purge(packages, fatal=False): - """Purge one or more packages.""" - cmd = ['yum', '--assumeyes', 'remove'] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Purging {}".format(packages)) - _run_yum_command(cmd, fatal) - - -def yum_search(packages): - """Search for a package.""" - output = {} - cmd = ['yum', 'search'] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Searching for {}".format(packages)) - result = subprocess.check_output(cmd) - for package in list(packages): - output[package] = package in result - return output - - -def add_source(source, key=None): - """Add a package source to this system. - - @param source: a URL with a rpm package - - @param key: A key to be added to the system's keyring and used - to verify the signatures on packages. Ideally, this should be an - ASCII format GPG public key including the block headers. A GPG key - id may also be used, but be aware that only insecure protocols are - available to retrieve the actual public key from a public keyserver - placing your Juju environment at risk. - """ - if source is None: - log('Source is not present. Skipping') - return - - if source.startswith('http'): - directory = '/etc/yum.repos.d/' - for filename in os.listdir(directory): - with open(directory + filename, 'r') as rpm_file: - if source in rpm_file.read(): - break - else: - log("Add source: {!r}".format(source)) - # write in the charms.repo - with open(directory + 'Charms.repo', 'a') as rpm_file: - rpm_file.write('[%s]\n' % source[7:].replace('/', '_')) - rpm_file.write('name=%s\n' % source[7:]) - rpm_file.write('baseurl=%s\n\n' % source) - else: - log("Unknown source: {!r}".format(source)) - - if key: - if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile('w+') as key_file: - key_file.write(key) - key_file.flush() - key_file.seek(0) - subprocess.check_call(['rpm', '--import', key_file.name]) - else: - subprocess.check_call(['rpm', '--import', key]) - - -def _run_yum_command(cmd, fatal=False): - """Run an YUM command. - - Checks the output and retry if the fatal flag is set to True. - - :param: cmd: str: The yum command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. - """ - env = os.environ.copy() - - if fatal: - retry_count = 0 - result = None - - # If the command is considered "fatal", we need to retry if the yum - # lock was not acquired. - - while result is None or result == YUM_NO_LOCK: - try: - result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > YUM_NO_LOCK_RETRY_COUNT: - raise - result = e.returncode - log("Couldn't acquire YUM lock. Will retry in {} seconds." - "".format(YUM_NO_LOCK_RETRY_DELAY)) - time.sleep(YUM_NO_LOCK_RETRY_DELAY) - - else: - subprocess.call(cmd, env=env) diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py deleted file mode 100644 index 4cf21bc..0000000 --- a/hooks/charmhelpers/fetch/giturl.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from subprocess import check_call, CalledProcessError -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource, - filter_installed_packages, - install, -) - -if filter_installed_packages(['git']) != []: - install(['git']) - if filter_installed_packages(['git']) != []: - raise NotImplementedError('Unable to install git') - - -class GitUrlFetchHandler(BaseFetchHandler): - """Handler for git branches via generic and github URLs.""" - - def can_handle(self, source): - url_parts = self.parse_url(source) - # TODO (mattyw) no support for ssh git@ yet - if url_parts.scheme not in ('http', 'https', 'git', ''): - return False - elif not url_parts.scheme: - return os.path.exists(os.path.join(source, '.git')) - else: - return True - - def clone(self, source, dest, branch="master", depth=None): - if not self.can_handle(source): - raise UnhandledSource("Cannot handle {}".format(source)) - - if os.path.exists(dest): - cmd = ['git', '-C', dest, 'pull', source, branch] - else: - cmd = ['git', 'clone', source, dest, '--branch', branch] - if depth: - cmd.extend(['--depth', depth]) - check_call(cmd) - - def install(self, source, branch="master", dest=None, depth=None): - url_parts = self.parse_url(source) - branch_name = url_parts.path.strip("/").split("/")[-1] - if dest: - dest_dir = os.path.join(dest, branch_name) - else: - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) - try: - self.clone(source, dest_dir, branch, depth) - except CalledProcessError as e: - raise UnhandledSource(e) - except OSError as e: - raise UnhandledSource(e.strerror) - return dest_dir diff --git a/hooks/charmhelpers/fetch/snap.py b/hooks/charmhelpers/fetch/snap.py deleted file mode 100644 index 395836c..0000000 --- a/hooks/charmhelpers/fetch/snap.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2014-2017 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Charm helpers snap for classic charms. - -If writing reactive charms, use the snap layer: -https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html -""" -import subprocess -import os -from time import sleep -from charmhelpers.core.hookenv import log - -__author__ = 'Joseph Borg ' - -# The return code for "couldn't acquire lock" in Snap -# (hopefully this will be improved). -SNAP_NO_LOCK = 1 -SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. -SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. -SNAP_CHANNELS = [ - 'edge', - 'beta', - 'candidate', - 'stable', -] - - -class CouldNotAcquireLockException(Exception): - pass - - -class InvalidSnapChannel(Exception): - pass - - -def _snap_exec(commands): - """ - Execute snap commands. - - :param commands: List commands - :return: Integer exit code - """ - assert type(commands) == list - - retry_count = 0 - return_code = None - - while return_code is None or return_code == SNAP_NO_LOCK: - try: - return_code = subprocess.check_call(['snap'] + commands, - env=os.environ) - except subprocess.CalledProcessError as e: - retry_count += + 1 - if retry_count > SNAP_NO_LOCK_RETRY_COUNT: - raise CouldNotAcquireLockException( - 'Could not aquire lock after {} attempts' - .format(SNAP_NO_LOCK_RETRY_COUNT)) - return_code = e.returncode - log('Snap failed to acquire lock, trying again in {} seconds.' - .format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN')) - sleep(SNAP_NO_LOCK_RETRY_DELAY) - - return return_code - - -def snap_install(packages, *flags): - """ - Install a snap package. - - :param packages: String or List String package name - :param flags: List String flags to pass to install command - :return: Integer return code from snap - """ - if type(packages) is not list: - packages = [packages] - - flags = list(flags) - - message = 'Installing snap(s) "%s"' % ', '.join(packages) - if flags: - message += ' with option(s) "%s"' % ', '.join(flags) - - log(message, level='INFO') - return _snap_exec(['install'] + flags + packages) - - -def snap_remove(packages, *flags): - """ - Remove a snap package. - - :param packages: String or List String package name - :param flags: List String flags to pass to remove command - :return: Integer return code from snap - """ - if type(packages) is not list: - packages = [packages] - - flags = list(flags) - - message = 'Removing snap(s) "%s"' % ', '.join(packages) - if flags: - message += ' with options "%s"' % ', '.join(flags) - - log(message, level='INFO') - return _snap_exec(['remove'] + flags + packages) - - -def snap_refresh(packages, *flags): - """ - Refresh / Update snap package. - - :param packages: String or List String package name - :param flags: List String flags to pass to refresh command - :return: Integer return code from snap - """ - if type(packages) is not list: - packages = [packages] - - flags = list(flags) - - message = 'Refreshing snap(s) "%s"' % ', '.join(packages) - if flags: - message += ' with options "%s"' % ', '.join(flags) - - log(message, level='INFO') - return _snap_exec(['refresh'] + flags + packages) - - -def valid_snap_channel(channel): - """ Validate snap channel exists - - :raises InvalidSnapChannel: When channel does not exist - :return: Boolean - """ - if channel.lower() in SNAP_CHANNELS: - return True - else: - raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel)) diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py deleted file mode 100644 index 19aa6ba..0000000 --- a/hooks/charmhelpers/fetch/ubuntu.py +++ /dev/null @@ -1,592 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import OrderedDict -import os -import platform -import re -import six -import time -import subprocess -from tempfile import NamedTemporaryFile - -from charmhelpers.core.host import ( - lsb_release -) -from charmhelpers.core.hookenv import ( - log, - DEBUG, - WARNING, -) -from charmhelpers.fetch import SourceConfigError, GPGKeyError - -PROPOSED_POCKET = ( - "# Proposed\n" - "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe " - "multiverse restricted\n") -PROPOSED_PORTS_POCKET = ( - "# Proposed\n" - "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe " - "multiverse restricted\n") -# Only supports 64bit and ppc64 at the moment. -ARCH_TO_PROPOSED_POCKET = { - 'x86_64': PROPOSED_POCKET, - 'ppc64le': PROPOSED_PORTS_POCKET, - 'aarch64': PROPOSED_PORTS_POCKET, - 's390x': PROPOSED_PORTS_POCKET, -} -CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" -CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' -CLOUD_ARCHIVE = """# Ubuntu Cloud Archive -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main -""" -CLOUD_ARCHIVE_POCKETS = { - # Folsom - 'folsom': 'precise-updates/folsom', - 'folsom/updates': 'precise-updates/folsom', - 'precise-folsom': 'precise-updates/folsom', - 'precise-folsom/updates': 'precise-updates/folsom', - 'precise-updates/folsom': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'precise-folsom/proposed': 'precise-proposed/folsom', - 'precise-proposed/folsom': 'precise-proposed/folsom', - # Grizzly - 'grizzly': 'precise-updates/grizzly', - 'grizzly/updates': 'precise-updates/grizzly', - 'precise-grizzly': 'precise-updates/grizzly', - 'precise-grizzly/updates': 'precise-updates/grizzly', - 'precise-updates/grizzly': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly', - 'precise-grizzly/proposed': 'precise-proposed/grizzly', - 'precise-proposed/grizzly': 'precise-proposed/grizzly', - # Havana - 'havana': 'precise-updates/havana', - 'havana/updates': 'precise-updates/havana', - 'precise-havana': 'precise-updates/havana', - 'precise-havana/updates': 'precise-updates/havana', - 'precise-updates/havana': 'precise-updates/havana', - 'havana/proposed': 'precise-proposed/havana', - 'precise-havana/proposed': 'precise-proposed/havana', - 'precise-proposed/havana': 'precise-proposed/havana', - # Icehouse - 'icehouse': 'precise-updates/icehouse', - 'icehouse/updates': 'precise-updates/icehouse', - 'precise-icehouse': 'precise-updates/icehouse', - 'precise-icehouse/updates': 'precise-updates/icehouse', - 'precise-updates/icehouse': 'precise-updates/icehouse', - 'icehouse/proposed': 'precise-proposed/icehouse', - 'precise-icehouse/proposed': 'precise-proposed/icehouse', - 'precise-proposed/icehouse': 'precise-proposed/icehouse', - # Juno - 'juno': 'trusty-updates/juno', - 'juno/updates': 'trusty-updates/juno', - 'trusty-juno': 'trusty-updates/juno', - 'trusty-juno/updates': 'trusty-updates/juno', - 'trusty-updates/juno': 'trusty-updates/juno', - 'juno/proposed': 'trusty-proposed/juno', - 'trusty-juno/proposed': 'trusty-proposed/juno', - 'trusty-proposed/juno': 'trusty-proposed/juno', - # Kilo - 'kilo': 'trusty-updates/kilo', - 'kilo/updates': 'trusty-updates/kilo', - 'trusty-kilo': 'trusty-updates/kilo', - 'trusty-kilo/updates': 'trusty-updates/kilo', - 'trusty-updates/kilo': 'trusty-updates/kilo', - 'kilo/proposed': 'trusty-proposed/kilo', - 'trusty-kilo/proposed': 'trusty-proposed/kilo', - 'trusty-proposed/kilo': 'trusty-proposed/kilo', - # Liberty - 'liberty': 'trusty-updates/liberty', - 'liberty/updates': 'trusty-updates/liberty', - 'trusty-liberty': 'trusty-updates/liberty', - 'trusty-liberty/updates': 'trusty-updates/liberty', - 'trusty-updates/liberty': 'trusty-updates/liberty', - 'liberty/proposed': 'trusty-proposed/liberty', - 'trusty-liberty/proposed': 'trusty-proposed/liberty', - 'trusty-proposed/liberty': 'trusty-proposed/liberty', - # Mitaka - 'mitaka': 'trusty-updates/mitaka', - 'mitaka/updates': 'trusty-updates/mitaka', - 'trusty-mitaka': 'trusty-updates/mitaka', - 'trusty-mitaka/updates': 'trusty-updates/mitaka', - 'trusty-updates/mitaka': 'trusty-updates/mitaka', - 'mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', - # Newton - 'newton': 'xenial-updates/newton', - 'newton/updates': 'xenial-updates/newton', - 'xenial-newton': 'xenial-updates/newton', - 'xenial-newton/updates': 'xenial-updates/newton', - 'xenial-updates/newton': 'xenial-updates/newton', - 'newton/proposed': 'xenial-proposed/newton', - 'xenial-newton/proposed': 'xenial-proposed/newton', - 'xenial-proposed/newton': 'xenial-proposed/newton', - # Ocata - 'ocata': 'xenial-updates/ocata', - 'ocata/updates': 'xenial-updates/ocata', - 'xenial-ocata': 'xenial-updates/ocata', - 'xenial-ocata/updates': 'xenial-updates/ocata', - 'xenial-updates/ocata': 'xenial-updates/ocata', - 'ocata/proposed': 'xenial-proposed/ocata', - 'xenial-ocata/proposed': 'xenial-proposed/ocata', - 'xenial-proposed/ocata': 'xenial-proposed/ocata', - # Pike - 'pike': 'xenial-updates/pike', - 'xenial-pike': 'xenial-updates/pike', - 'xenial-pike/updates': 'xenial-updates/pike', - 'xenial-updates/pike': 'xenial-updates/pike', - 'pike/proposed': 'xenial-proposed/pike', - 'xenial-pike/proposed': 'xenial-proposed/pike', - 'xenial-proposed/pike': 'xenial-proposed/pike', - # Queens - 'queens': 'xenial-updates/queens', - 'xenial-queens': 'xenial-updates/queens', - 'xenial-queens/updates': 'xenial-updates/queens', - 'xenial-updates/queens': 'xenial-updates/queens', - 'queens/proposed': 'xenial-proposed/queens', - 'xenial-queens/proposed': 'xenial-proposed/queens', - 'xenial-proposed/queens': 'xenial-proposed/queens', - # Rocky - 'rocky': 'bionic-updates/rocky', - 'bionic-rocky': 'bionic-updates/rocky', - 'bionic-rocky/updates': 'bionic-updates/rocky', - 'bionic-updates/rocky': 'bionic-updates/rocky', - 'rocky/proposed': 'bionic-proposed/rocky', - 'bionic-rocky/proposed': 'bionic-proposed/rocky', - 'bionic-proposed/rocky': 'bionic-proposed/rocky', -} - - -APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. -CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. -CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times. - - -def filter_installed_packages(packages): - """Return a list of packages that require installation.""" - cache = apt_cache() - _pkgs = [] - for package in packages: - try: - p = cache[package] - p.current_ver or _pkgs.append(package) - except KeyError: - log('Package {} has no installation candidate.'.format(package), - level='WARNING') - _pkgs.append(package) - return _pkgs - - -def apt_cache(in_memory=True, progress=None): - """Build and return an apt cache.""" - from apt import apt_pkg - apt_pkg.init() - if in_memory: - apt_pkg.config.set("Dir::Cache::pkgcache", "") - apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache(progress) - - -def apt_install(packages, options=None, fatal=False): - """Install one or more packages.""" - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - cmd.append('install') - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - _run_apt_command(cmd, fatal) - - -def apt_upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages.""" - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - if dist: - cmd.append('dist-upgrade') - else: - cmd.append('upgrade') - log("Upgrading with options: {}".format(options)) - _run_apt_command(cmd, fatal) - - -def apt_update(fatal=False): - """Update local apt cache.""" - cmd = ['apt-get', 'update'] - _run_apt_command(cmd, fatal) - - -def apt_purge(packages, fatal=False): - """Purge one or more packages.""" - cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Purging {}".format(packages)) - _run_apt_command(cmd, fatal) - - -def apt_mark(packages, mark, fatal=False): - """Flag one or more packages using apt-mark.""" - log("Marking {} as {}".format(packages, mark)) - cmd = ['apt-mark', mark] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - - if fatal: - subprocess.check_call(cmd, universal_newlines=True) - else: - subprocess.call(cmd, universal_newlines=True) - - -def apt_hold(packages, fatal=False): - return apt_mark(packages, 'hold', fatal=fatal) - - -def apt_unhold(packages, fatal=False): - return apt_mark(packages, 'unhold', fatal=fatal) - - -def import_key(key): - """Import an ASCII Armor key. - - /!\ A Radix64 format keyid is also supported for backwards - compatibility, but should never be used; the key retrieval - mechanism is insecure and subject to man-in-the-middle attacks - voiding all signature checks using that key. - - :param keyid: The key in ASCII armor format, - including BEGIN and END markers. - :raises: GPGKeyError if the key could not be imported - """ - key = key.strip() - if '-' in key or '\n' in key: - # Send everything not obviously a keyid to GPG to import, as - # we trust its validation better than our own. eg. handling - # comments before the key. - log("PGP key found (looks like ASCII Armor format)", level=DEBUG) - if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and - '-----END PGP PUBLIC KEY BLOCK-----' in key): - log("Importing ASCII Armor PGP key", level=DEBUG) - with NamedTemporaryFile() as keyfile: - with open(keyfile.name, 'w') as fd: - fd.write(key) - fd.write("\n") - cmd = ['apt-key', 'add', keyfile.name] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - error = "Error importing PGP key '{}'".format(key) - log(error) - raise GPGKeyError(error) - else: - raise GPGKeyError("ASCII armor markers missing from GPG key") - else: - # We should only send things obviously not a keyid offsite - # via this unsecured protocol, as it may be a secret or part - # of one. - log("PGP key found (looks like Radix64 format)", level=WARNING) - log("INSECURLY importing PGP key from keyserver; " - "full key not provided.", level=WARNING) - cmd = ['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] - try: - _run_with_retries(cmd) - except subprocess.CalledProcessError: - error = "Error importing PGP key '{}'".format(key) - log(error) - raise GPGKeyError(error) - - -def add_source(source, key=None, fail_invalid=False): - """Add a package source to this system. - - @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples:: - - ppa:charmers/example - deb https://stub:key@private.example.com/ubuntu trusty main - - In addition: - 'proposed:' may be used to enable the standard 'proposed' - pocket for the release. - 'cloud:' may be used to activate official cloud archive pockets, - such as 'cloud:icehouse' - 'distro' may be used as a noop - - Full list of source specifications supported by the function are: - - 'distro': A NOP; i.e. it has no effect. - 'proposed': the proposed deb spec [2] is wrtten to - /etc/apt/sources.list/proposed - 'distro-proposed': adds -proposed to the debs [2] - 'ppa:': add-apt-repository --yes - 'deb ': add-apt-repository --yes deb - 'http://....': add-apt-repository --yes http://... - 'cloud-archive:': add-apt-repository -yes cloud-archive: - 'cloud:[-staging]': specify a Cloud Archive pocket with - optional staging version. If staging is used then the staging PPA [2] - with be used. If staging is NOT used then the cloud archive [3] will be - added, and the 'ubuntu-cloud-keyring' package will be added for the - current distro. - - Otherwise the source is not recognised and this is logged to the juju log. - However, no error is raised, unless sys_error_on_exit is True. - - [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main - where {} is replaced with the derived pocket name. - [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \ - main universe multiverse restricted - where {} is replaced with the lsb_release codename (e.g. xenial) - [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu - to /etc/apt/sources.list.d/cloud-archive-list - - @param key: A key to be added to the system's APT keyring and used - to verify the signatures on packages. Ideally, this should be an - ASCII format GPG public key including the block headers. A GPG key - id may also be used, but be aware that only insecure protocols are - available to retrieve the actual public key from a public keyserver - placing your Juju environment at risk. ppa and cloud archive keys - are securely added automtically, so sould not be provided. - - @param fail_invalid: (boolean) if True, then the function raises a - SourceConfigError is there is no matching installation source. - - @raises SourceConfigError() if for cloud:, the is not a - valid pocket in CLOUD_ARCHIVE_POCKETS - """ - _mapping = OrderedDict([ - (r"^distro$", lambda: None), # This is a NOP - (r"^(?:proposed|distro-proposed)$", _add_proposed), - (r"^cloud-archive:(.*)$", _add_apt_repository), - (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), - (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), - (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), - (r"^cloud:(.*)$", _add_cloud_pocket), - (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), - ]) - if source is None: - source = '' - for r, fn in six.iteritems(_mapping): - m = re.match(r, source) - if m: - # call the assoicated function with the captured groups - # raises SourceConfigError on error. - fn(*m.groups()) - if key: - try: - import_key(key) - except GPGKeyError as e: - raise SourceConfigError(str(e)) - break - else: - # nothing matched. log an error and maybe sys.exit - err = "Unknown source: {!r}".format(source) - log(err) - if fail_invalid: - raise SourceConfigError(err) - - -def _add_proposed(): - """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list - - Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for - the deb line. - - For intel architecutres PROPOSED_POCKET is used for the release, but for - other architectures PROPOSED_PORTS_POCKET is used for the release. - """ - release = lsb_release()['DISTRIB_CODENAME'] - arch = platform.machine() - if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): - raise SourceConfigError("Arch {} not supported for (distro-)proposed" - .format(arch)) - with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: - apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release)) - - -def _add_apt_repository(spec): - """Add the spec using add_apt_repository - - :param spec: the parameter to pass to add_apt_repository - """ - _run_with_retries(['add-apt-repository', '--yes', spec]) - - -def _add_cloud_pocket(pocket): - """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list - - Note that this overwrites the existing file if there is one. - - This function also converts the simple pocket in to the actual pocket using - the CLOUD_ARCHIVE_POCKETS mapping. - - :param pocket: string representing the pocket to add a deb spec for. - :raises: SourceConfigError if the cloud pocket doesn't exist or the - requested release doesn't match the current distro version. - """ - apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - if pocket not in CLOUD_ARCHIVE_POCKETS: - raise SourceConfigError( - 'Unsupported cloud: source option %s' % - pocket) - actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) - - -def _add_cloud_staging(cloud_archive_release, openstack_release): - """Add the cloud staging repository which is in - ppa:ubuntu-cloud-archive/-staging - - This function checks that the cloud_archive_release matches the current - codename for the distro that charm is being installed on. - - :param cloud_archive_release: string, codename for the release. - :param openstack_release: String, codename for the openstack release. - :raises: SourceConfigError if the cloud_archive_release doesn't match the - current version of the os. - """ - _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) - ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release) - cmd = 'add-apt-repository -y {}'.format(ppa) - _run_with_retries(cmd.split(' ')) - - -def _add_cloud_distro_check(cloud_archive_release, openstack_release): - """Add the cloud pocket, but also check the cloud_archive_release against - the current distro, and use the openstack_release as the full lookup. - - This just calls _add_cloud_pocket() with the openstack_release as pocket - to get the correct cloud-archive.list for dpkg to work with. - - :param cloud_archive_release:String, codename for the distro release. - :param openstack_release: String, spec for the release to look up in the - CLOUD_ARCHIVE_POCKETS - :raises: SourceConfigError if this is the wrong distro, or the pocket spec - doesn't exist. - """ - _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) - _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release)) - - -def _verify_is_ubuntu_rel(release, os_release): - """Verify that the release is in the same as the current ubuntu release. - - :param release: String, lowercase for the release. - :param os_release: String, the os_release being asked for - :raises: SourceConfigError if the release is not the same as the ubuntu - release. - """ - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - if release != ubuntu_rel: - raise SourceConfigError( - 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' - 'version ({})'.format(release, os_release, ubuntu_rel)) - - -def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), - retry_message="", cmd_env=None): - """Run a command and retry until success or max_retries is reached. - - :param: cmd: str: The apt command to run. - :param: max_retries: int: The number of retries to attempt on a fatal - command. Defaults to CMD_RETRY_COUNT. - :param: retry_exitcodes: tuple: Optional additional exit codes to retry. - Defaults to retry on exit code 1. - :param: retry_message: str: Optional log prefix emitted during retries. - :param: cmd_env: dict: Environment variables to add to the command run. - """ - - env = None - kwargs = {} - if cmd_env: - env = os.environ.copy() - env.update(cmd_env) - kwargs['env'] = env - - if not retry_message: - retry_message = "Failed executing '{}'".format(" ".join(cmd)) - retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) - - retry_count = 0 - result = None - - retry_results = (None,) + retry_exitcodes - while result in retry_results: - try: - # result = subprocess.check_call(cmd, env=env) - result = subprocess.check_call(cmd, **kwargs) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > max_retries: - raise - result = e.returncode - log(retry_message) - time.sleep(CMD_RETRY_DELAY) - - -def _run_apt_command(cmd, fatal=False): - """Run an apt command with optional retries. - - :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. - """ - # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment. - cmd_env = { - 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')} - - if fatal: - _run_with_retries( - cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), - retry_message="Couldn't acquire DPKG lock") - else: - env = os.environ.copy() - env.update(cmd_env) - subprocess.call(cmd, env=env) - - -def get_upstream_version(package): - """Determine upstream version based on installed package - - @returns None (if not installed) or the upstream version - """ - import apt_pkg - cache = apt_cache() - try: - pkg = cache[package] - except Exception: - # the package is unknown to the current apt cache. - return None - - if not pkg.current_ver: - # package is known, but no version is currently installed. - return None - - return apt_pkg.upstream_version(pkg.current_ver.ver_str) diff --git a/hooks/charmhelpers/osplatform.py b/hooks/charmhelpers/osplatform.py deleted file mode 100644 index d9a4d5c..0000000 --- a/hooks/charmhelpers/osplatform.py +++ /dev/null @@ -1,25 +0,0 @@ -import platform - - -def get_platform(): - """Return the current OS platform. - - For example: if current os platform is Ubuntu then a string "ubuntu" - will be returned (which is the name of the module). - This string is used to decide which platform module should be imported. - """ - # linux_distribution is deprecated and will be removed in Python 3.7 - # Warings *not* disabled, as we certainly need to fix this. - tuple_platform = platform.linux_distribution() - current_platform = tuple_platform[0] - if "Ubuntu" in current_platform: - return "ubuntu" - elif "CentOS" in current_platform: - return "centos" - elif "debian" in current_platform: - # Stock Python does not detect Ubuntu and instead returns debian. - # Or at least it does in some build environments like Travis CI - return "ubuntu" - else: - raise RuntimeError("This module is not supported on {}." - .format(current_platform)) diff --git a/hooks/charmhelpers/payload/__init__.py b/hooks/charmhelpers/payload/__init__.py deleted file mode 100644 index ee55cb3..0000000 --- a/hooks/charmhelpers/payload/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"Tools for working with files injected into a charm just before deployment." diff --git a/hooks/charmhelpers/payload/archive.py b/hooks/charmhelpers/payload/archive.py deleted file mode 100644 index 7fc453f..0000000 --- a/hooks/charmhelpers/payload/archive.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import tarfile -import zipfile -from charmhelpers.core import ( - host, - hookenv, -) - - -class ArchiveError(Exception): - pass - - -def get_archive_handler(archive_name): - if os.path.isfile(archive_name): - if tarfile.is_tarfile(archive_name): - return extract_tarfile - elif zipfile.is_zipfile(archive_name): - return extract_zipfile - else: - # look at the file name - for ext in ('.tar', '.tar.gz', '.tgz', 'tar.bz2', '.tbz2', '.tbz'): - if archive_name.endswith(ext): - return extract_tarfile - for ext in ('.zip', '.jar'): - if archive_name.endswith(ext): - return extract_zipfile - - -def archive_dest_default(archive_name): - archive_file = os.path.basename(archive_name) - return os.path.join(hookenv.charm_dir(), "archives", archive_file) - - -def extract(archive_name, destpath=None): - handler = get_archive_handler(archive_name) - if handler: - if not destpath: - destpath = archive_dest_default(archive_name) - if not os.path.isdir(destpath): - host.mkdir(destpath) - handler(archive_name, destpath) - return destpath - else: - raise ArchiveError("No handler for archive") - - -def extract_tarfile(archive_name, destpath): - "Unpack a tar archive, optionally compressed" - archive = tarfile.open(archive_name) - archive.extractall(destpath) - - -def extract_zipfile(archive_name, destpath): - "Unpack a zip file" - archive = zipfile.ZipFile(archive_name) - archive.extractall(destpath) diff --git a/hooks/charmhelpers/payload/execd.py b/hooks/charmhelpers/payload/execd.py deleted file mode 100644 index 1502aa0..0000000 --- a/hooks/charmhelpers/payload/execd.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys -import subprocess -from charmhelpers.core import hookenv - - -def default_execd_dir(): - return os.path.join(os.environ['CHARM_DIR'], 'exec.d') - - -def execd_module_paths(execd_dir=None): - """Generate a list of full paths to modules within execd_dir.""" - if not execd_dir: - execd_dir = default_execd_dir() - - if not os.path.exists(execd_dir): - return - - for subpath in os.listdir(execd_dir): - module = os.path.join(execd_dir, subpath) - if os.path.isdir(module): - yield module - - -def execd_submodule_paths(command, execd_dir=None): - """Generate a list of full paths to the specified command within exec_dir. - """ - for module_path in execd_module_paths(execd_dir): - path = os.path.join(module_path, command) - if os.access(path, os.X_OK) and os.path.isfile(path): - yield path - - -def execd_run(command, execd_dir=None, die_on_error=True, stderr=subprocess.STDOUT): - """Run command for each module within execd_dir which defines it.""" - for submodule_path in execd_submodule_paths(command, execd_dir): - try: - subprocess.check_output(submodule_path, stderr=stderr, - universal_newlines=True) - except subprocess.CalledProcessError as e: - hookenv.log("Error ({}) running {}. Output: {}".format( - e.returncode, e.cmd, e.output)) - if die_on_error: - sys.exit(e.returncode) - - -def execd_preinstall(execd_dir=None): - """Run charm-pre-install for each module within execd_dir.""" - execd_run('charm-pre-install', execd_dir=execd_dir) diff --git a/hooks/config-changed b/hooks/config-changed deleted file mode 120000 index 7bd4378..0000000 --- a/hooks/config-changed +++ /dev/null @@ -1 +0,0 @@ -odl_controller_hooks.py \ No newline at end of file diff --git a/hooks/controller-api-relation-joined b/hooks/controller-api-relation-joined deleted file mode 120000 index 7bd4378..0000000 --- a/hooks/controller-api-relation-joined +++ /dev/null @@ -1 +0,0 @@ -odl_controller_hooks.py \ No newline at end of file diff --git a/hooks/install b/hooks/install deleted file mode 100755 index 83a9d3c..0000000 --- a/hooks/install +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# Wrapper to deal with newer Ubuntu versions that don't have py2 installed -# by default. - -declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') - -check_and_install() { - pkg="${1}-${2}" - if ! dpkg -s ${pkg} 2>&1 > /dev/null; then - apt-get -y install ${pkg} - fi -} - -PYTHON="python" - -for dep in ${DEPS[@]}; do - check_and_install ${PYTHON} ${dep} -done - -exec ./hooks/install.real diff --git a/hooks/install.real b/hooks/install.real deleted file mode 120000 index 7bd4378..0000000 --- a/hooks/install.real +++ /dev/null @@ -1 +0,0 @@ -odl_controller_hooks.py \ No newline at end of file diff --git a/hooks/odl_controller_hooks.py b/hooks/odl_controller_hooks.py deleted file mode 100755 index 3170f1a..0000000 --- a/hooks/odl_controller_hooks.py +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import shutil -from subprocess import check_call -import sys - -from charmhelpers.core.hookenv import ( - Hooks, - UnregisteredHookError, - config, - log, - relation_set, - relation_ids, -) - -from charmhelpers.core.host import ( - adduser, - mkdir, - restart_on_change, - service_start, - init_is_systemd, - service, -) - -from charmhelpers.fetch import ( - configure_sources, apt_install, install_remote) - -from odl_controller_utils import write_mvn_config, process_odl_cmds -from odl_controller_utils import PROFILES, assess_status - -PACKAGES = ["default-jre-headless", "python-jinja2"] -KARAF_PACKAGE = "opendaylight-karaf" - -hooks = Hooks() -config = config() - - -@hooks.hook("config-changed") -@restart_on_change({"/home/opendaylight/.m2/settings.xml": ["odl-controller"]}) -def config_changed(): - process_odl_cmds(PROFILES[config["profile"]]) - for r_id in relation_ids("controller-api"): - controller_api_joined(r_id) - write_mvn_config() - - -@hooks.hook("controller-api-relation-joined") -def controller_api_joined(r_id=None): - relation_set(relation_id=r_id, - port=PROFILES[config["profile"]]["port"], - username="admin", password="admin") - - -@hooks.hook('install.real') -def install(): - if config.get("install-sources"): - configure_sources(update=True, sources_var="install-sources", - keys_var="install-keys") - - # install packages - apt_install(PACKAGES, fatal=True) - - install_url = config["install-url"] - if install_url: - # install opendaylight from tarball - - # this extracts the archive too - install_remote(install_url, dest="/opt") - # The extracted dirname. Look at what's on disk instead of mangling, so - # the distribution tar.gz's name doesn't matter. - install_dir_name = [ - f for f in os.listdir("/opt") - if f.startswith("distribution-karaf")][0] - if not os.path.exists("/opt/opendaylight-karaf"): - os.symlink(install_dir_name, "/opt/opendaylight-karaf") - else: - apt_install([KARAF_PACKAGE], fatal=True) - install_dir_name = "opendaylight-karaf" - - if init_is_systemd(): - shutil.copy("files/odl-controller.service", "/lib/systemd/system") - service('enable', 'odl-controller') - else: - shutil.copy("files/odl-controller.conf", "/etc/init") - - adduser("opendaylight", system_user=True) - mkdir("/home/opendaylight", owner="opendaylight", group="opendaylight", - perms=0755) - check_call( - ["chown", "-R", "opendaylight:opendaylight", - os.path.join("/opt", install_dir_name)]) - mkdir("/var/log/opendaylight", owner="opendaylight", group="opendaylight", - perms=0755) - - # install features - write_mvn_config() - service_start("odl-controller") - - -def main(): - try: - hooks.execute(sys.argv) - except UnregisteredHookError as e: - log("Unknown hook {} - skipping.".format(e)) - assess_status() - - -@hooks.hook("ovsdb-manager-relation-joined") -def ovsdb_manager_joined(): - relation_set(port=6640, protocol="tcp") - - -@hooks.hook("upgrade-charm") -def upgrade_charm(): - pass - -if __name__ == "__main__": - main() diff --git a/hooks/odl_controller_utils.py b/hooks/odl_controller_utils.py deleted file mode 100644 index 3a3011a..0000000 --- a/hooks/odl_controller_utils.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import subprocess -from os import environ -import urlparse - -from charmhelpers.core.templating import render -from charmhelpers.core.hookenv import config, status_set -from charmhelpers.core.decorators import retry_on_exception -from charmhelpers.core.host import service_running - - -PROFILES = { - "cisco-vpp": { - "feature:install": ["cosc-cvpn-ovs-rest", - "odl-netconf-connector-all"], - "log:set": { - "TRACE": ["cosc-cvpn-ovs-rest", - "odl-netconf-connector-all"], - }, - "port": 8181 - }, - "openvswitch-odl": { - "feature:install": ["odl-base-all", "odl-aaa-authn", - "odl-restconf", "odl-nsf-all", - "odl-adsal-northbound", - "odl-mdsal-apidocs", - "odl-ovsdb-openstack", - "odl-ovsdb-northbound", - "odl-dlux-core"], - "port": 8080 - }, - "openvswitch-odl-lithium": { - "feature:install": ["odl-ovsdb-openstack"], - "port": 8080 - }, - "openvswitch-odl-beryllium": { - "feature:install": ["odl-ovsdb-openstack", - "odl-restconf", - "odl-aaa-authn", - "odl-dlux-all"], - "port": 8080 - }, - "openvswitch-odl-beryllium-l3": { - "feature:install": ["odl-ovsdb-openstack"], - "port": 8080 - }, - "openvswitch-odl-beryllium-sfc": { - "feature:install": ["odl-ovsdb-openstack", - "odl-sfc-core", - "odl-sfc-sb-rest", - "odl-sfc-ui", - "odl-sfc-netconf", - "odl-sfc-ovs", - "odl-sfcofl2", - "odl-sfc-test-consumer"], - "port": 8080 - }, - "openvswitch-odl-beryllium-vpn": { - "feature:install": ["odl-ovsdb-openstack", - "odl-vpnservice-api", - "odl-vpnservice-impl", - "odl-vpnservice-impl-rest", - "odl-vpnservice-impl-ui", - "odl-vpnservice-core"], - "port": 8080 - }, - "openvswitch-odl-boron": { - "feature:install": ["odl-netvirt-openstack", - "odl-dlux-all"], - "port": 8080 - }, - "openvswitch-odl-boron-sfc": { - "feature:install": ["odl-ovsdb-sfc-rest", - "odl-dlux-all"], - "port": 8080 - }, -} -PROFILES["default"] = PROFILES["openvswitch-odl"] - - -def mvn_ctx(): - ctx = {} - ctx.update(mvn_proxy_ctx("http")) - ctx.update(mvn_proxy_ctx("https")) - return ctx - - -def mvn_proxy_ctx(protocol): - ctx = {} - proxy = config("{}-proxy".format(protocol)) - key = protocol + "_proxy" - if proxy: - url = urlparse.urlparse(proxy) - elif key in environ: - url = urlparse.urlparse(environ[key]) - else: - url = None - - if url: - hostname = url.hostname - if hostname: - ctx[key] = True - ctx[protocol + "_proxy_host"] = hostname - port = url.port - ctx[protocol + "_proxy_port"] = port if port else 80 - username = url.username - if username: - ctx[protocol + "_proxy_username"] = username - ctx[protocol + "_proxy_password"] = url.password - no_proxy = [] - if "no_proxy" in environ: - np = environ["no_proxy"] - if np: - no_proxy = np.split(",") - ctx[protocol + "_noproxy"] = no_proxy - return ctx - - -def write_mvn_config(): - ctx = mvn_ctx() - render("settings.xml", "/home/opendaylight/.m2/settings.xml", ctx, - "opendaylight", "opendaylight", 0400) - - -@retry_on_exception(5, base_delay=10, exc_type=subprocess.CalledProcessError) -def run_odl(cmds, host="localhost", port=8101, retries=20, user="karaf"): - run_cmd = ["/opt/opendaylight-karaf/bin/client", "-r", str(retries), - "-h", host, "-a", str(port), "-u", str(user)] - run_cmd.extend(cmds) - output = subprocess.check_output(run_cmd) - return output - - -def installed_features(): - installed = [] - out = run_odl(["feature:list"]) - for line in out.split("\n"): - columns = line.split("|") - if len(columns) > 2: - install_flag = columns[2].replace(" ", "") - if install_flag == "x": - installed.append(columns[0].replace(" ", "")) - return installed - - -def filter_installed(features): - installed = installed_features() - whitelist = [feature for feature in features if feature not in installed] - return whitelist - - -def process_odl_cmds(odl_cmds): - features = filter_installed(odl_cmds.get("feature:install", [])) - if features: - run_odl(["feature:install"] + features) - logging = odl_cmds.get("log:set") - if logging: - for log_level in logging.keys(): - for target in logging[log_level]: - run_odl(["log:set", log_level, target]) - - -def assess_status(): - '''Assess unit status and inform juju using status-set''' - if service_running('odl-controller'): - status_set('active', 'Unit is ready') - else: - status_set('blocked', 'ODL controller not running') diff --git a/hooks/ovsdb-manager-relation-joined b/hooks/ovsdb-manager-relation-joined deleted file mode 120000 index 7bd4378..0000000 --- a/hooks/ovsdb-manager-relation-joined +++ /dev/null @@ -1 +0,0 @@ -odl_controller_hooks.py \ No newline at end of file diff --git a/hooks/start b/hooks/start deleted file mode 120000 index 7bd4378..0000000 --- a/hooks/start +++ /dev/null @@ -1 +0,0 @@ -odl_controller_hooks.py \ No newline at end of file diff --git a/hooks/stop b/hooks/stop deleted file mode 120000 index 7bd4378..0000000 --- a/hooks/stop +++ /dev/null @@ -1 +0,0 @@ -odl_controller_hooks.py \ No newline at end of file diff --git a/hooks/upgrade-charm b/hooks/upgrade-charm deleted file mode 120000 index 7bd4378..0000000 --- a/hooks/upgrade-charm +++ /dev/null @@ -1 +0,0 @@ -odl_controller_hooks.py \ No newline at end of file diff --git a/icon.svg b/icon.svg deleted file mode 100644 index 6d587bd..0000000 --- a/icon.svg +++ /dev/null @@ -1,394 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - - diff --git a/lib/.keep b/lib/.keep deleted file mode 100644 index f49b91a..0000000 --- a/lib/.keep +++ /dev/null @@ -1,3 +0,0 @@ - This file was created by release-tools to ensure that this empty - directory is preserved in vcs re: lint check definitions in global - tox.ini files. This file can be removed if/when this dir is actually in use. diff --git a/metadata.yaml b/metadata.yaml deleted file mode 100644 index 5674f14..0000000 --- a/metadata.yaml +++ /dev/null @@ -1,20 +0,0 @@ -name: odl-controller -summary: OpenDaylight Controller -maintainer: OpenStack Charmers -description: | - OpenDaylight is a network virtualization solution that provides an overlay - virtual-network to virtual-machines, containers or network namespaces. - . - This charm provides the controller component. -tags: - - openstack -series: - - xenial - - bionic - - cosmic - - trusty -provides: - controller-api: - interface: odl-controller-api - ovsdb-manager: - interface: ovsdb-manager diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 4f5de86..0000000 --- a/requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -# This file is managed centrally. If you find the need to modify this as a -# one-off, please don't. Intead, consult #openstack-charms and ask about -# requirements management in charms via bot-control. Thank you. -pbr>=1.8.0,<1.9.0 -simplejson>=2.2.0 -netifaces>=0.10.4 -netaddr>=0.7.12,!=0.7.16 -Jinja2>=2.6 # BSD License (3 clause) -six>=1.9.0 -dnspython>=1.12.0 -psutil>=1.1.1,<2.0.0 diff --git a/templates/settings.xml b/templates/settings.xml deleted file mode 100644 index 284f586..0000000 --- a/templates/settings.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - {%- if http_proxy %} - - http_proxy - true - http - {{ http_proxy_host }} - {{ http_proxy_port }} - {%- if http_proxy_username %} - {{ http_proxy_username }} - {{ http_proxy_password }} - {%- endif %} - {{ http_noproxy|join("|") }} - - {%- endif %} - {%- if https_proxy %} - - https_proxy - true - https - {{ https_proxy_host }} - {{ https_proxy_port }} - {%- if https_proxy_username %} - {{ https_proxy_username }} - {{ https_proxy_password }} - {%- endif %} - {{ https_noproxy|join("|") }} - - {%- endif %} - - - diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index f013863..0000000 --- a/test-requirements.txt +++ /dev/null @@ -1,33 +0,0 @@ -# This file is managed centrally. If you find the need to modify this as a -# one-off, please don't. Intead, consult #openstack-charms and ask about -# requirements management in charms via bot-control. Thank you. -charm-tools>=2.4.4 -coverage>=3.6 -mock>=1.2 -flake8>=2.2.4,<=2.4.1 -os-testr>=0.4.1 -requests>=2.18.4 -# BEGIN: Amulet OpenStack Charm Helper Requirements -# Liberty client lower constraints -amulet>=1.14.3,<2.0;python_version=='2.7' -bundletester>=0.6.1,<1.0;python_version=='2.7' -aodhclient>=0.1.0 -gnocchiclient>=3.1.0,<3.2.0 -python-barbicanclient>=4.0.1 -python-ceilometerclient>=1.5.0 -python-cinderclient>=1.4.0 -python-designateclient>=1.5 -python-glanceclient>=1.1.0 -python-heatclient>=0.8.0 -python-keystoneclient>=1.7.1 -python-manilaclient>=1.8.1 -python-neutronclient>=3.1.0 -python-novaclient>=2.30.1 -python-openstackclient>=1.7.0 -python-swiftclient>=2.6.0 -pika>=0.10.0,<1.0 -distro-info -git+https://github.com/juju/charm-helpers.git#egg=charmhelpers -# END: Amulet OpenStack Charm Helper Requirements -pytz # workaround for 14.04 pip/tox -pyudev # for ceph-* charm unit tests (not mocked?) diff --git a/tests/README.md b/tests/README.md deleted file mode 100644 index 046be7f..0000000 --- a/tests/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Overview - -This directory provides Amulet tests to verify basic deployment functionality -from the perspective of this charm, its requirements and its features, as -exercised in a subset of the full OpenStack deployment test bundle topology. - -For full details on functional testing of OpenStack charms please refer to -the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing) -section of the OpenStack Charm Guide. diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py deleted file mode 100644 index af0dd3b..0000000 --- a/tests/basic_deployment.py +++ /dev/null @@ -1,541 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import os - -from neutronclient.v2_0 import client as neutronclient - -from charmhelpers.contrib.openstack.amulet.deployment import ( - OpenStackAmuletDeployment -) - -from charmhelpers.contrib.openstack.amulet.utils import ( - OpenStackAmuletUtils, - DEBUG, - # ERROR -) - -from novaclient import exceptions - - -class NovaOpenStackAmuletUtils(OpenStackAmuletUtils): - """Nova based helper extending base helper for creation of flavors""" - - def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create the specified flavor.""" - try: - nova.flavors.find(name=name) - except (exceptions.NotFound, exceptions.NoUniqueMatch): - self.log.debug('Creating flavor ({})'.format(name)) - nova.flavors.create(name, ram, vcpus, disk, flavorid, - ephemeral, swap, rxtx_factor, is_public) - - -# Use DEBUG to turn on debug logging -u = NovaOpenStackAmuletUtils(DEBUG) - -ODL_PROFILES = { - 'helium': { - 'location': 'AMULET_ODL_LOCATION', - 'profile': 'openvswitch-odl' - }, - 'beryllium': { - 'location': 'AMULET_ODL_BE_LOCATION', - 'profile': 'openvswitch-odl-beryllium' - }, - 'boron': { - 'location': 'AMULET_ODL_BO_LOCATION', - 'profile': 'openvswitch-odl-boron' - }, -} - - -class ODLControllerBasicDeployment(OpenStackAmuletDeployment): - """Amulet tests on a basic OVS ODL deployment.""" - - def __init__(self, series, openstack=None, source=None, git=False, - stable=False, odl_version='helium'): - """Deploy the entire test environment.""" - super(ODLControllerBasicDeployment, self).__init__(series, openstack, - source, stable) - self.odl_version = odl_version - self._add_services() - self._add_relations() - self._configure_services() - self._deploy() - exclude_services = ['odl-controller', 'neutron-api-odl'] - self._auto_wait_for_status(exclude_services=exclude_services) - self.d.sentry.wait() - self._initialize_tests() - - def _add_services(self): - """Add services - - Add the services that we're testing, where odl-controller is local, - and the rest of the service are from lp branches that are - compatible with the local charm (e.g. stable or next). - """ - this_service = { - 'name': 'odl-controller', - 'constraints': {'mem': '8G'}, - } - other_services = [ - {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}}, - {'name': 'rabbitmq-server'}, - {'name': 'keystone'}, - {'name': 'nova-cloud-controller'}, - {'name': 'neutron-gateway'}, - {'name': 'neutron-api-odl'}, - {'name': 'openvswitch-odl'}, - {'name': 'neutron-api'}, - {'name': 'nova-compute'}, - {'name': 'glance'}, - ] - - super(ODLControllerBasicDeployment, self)._add_services( - this_service, other_services) - - def _add_relations(self): - """Add all of the relations for the services.""" - relations = { - 'keystone:shared-db': 'percona-cluster:shared-db', - 'neutron-gateway:amqp': 'rabbitmq-server:amqp', - 'nova-cloud-controller:quantum-network-service': - 'neutron-gateway:quantum-network-service', - 'nova-cloud-controller:shared-db': 'percona-cluster:shared-db', - 'nova-cloud-controller:identity-service': 'keystone:' - 'identity-service', - 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp', - 'neutron-api:shared-db': 'percona-cluster:shared-db', - 'neutron-api:amqp': 'rabbitmq-server:amqp', - 'neutron-api:neutron-api': 'nova-cloud-controller:neutron-api', - 'neutron-api:identity-service': 'keystone:identity-service', - 'neutron-api:neutron-plugin-api-subordinate': - 'neutron-api-odl:neutron-plugin-api-subordinate', - 'neutron-gateway:juju-info': 'openvswitch-odl:container', - 'openvswitch-odl:ovsdb-manager': 'odl-controller:ovsdb-manager', - 'neutron-api-odl:odl-controller': 'odl-controller:controller-api', - 'glance:identity-service': 'keystone:identity-service', - 'glance:shared-db': 'percona-cluster:shared-db', - 'glance:amqp': 'rabbitmq-server:amqp', - 'nova-compute:image-service': 'glance:image-service', - 'nova-compute:amqp': 'rabbitmq-server:amqp', - 'nova-cloud-controller:cloud-compute': 'nova-compute:' - 'cloud-compute', - 'nova-cloud-controller:image-service': 'glance:image-service', - } - super(ODLControllerBasicDeployment, self)._add_relations(relations) - - def _configure_services(self): - """Configure all of the services.""" - neutron_gateway_config = { - 'plugin': 'ovs-odl', - 'instance-mtu': '1400', - } - neutron_api_config = { - 'neutron-security-groups': 'True', - 'manage-neutron-plugin-legacy-mode': 'False', - } - neutron_api_odl_config = { - 'overlay-network-type': 'vxlan gre', - } - - odl_controller_config = {} - if os.environ.get(ODL_PROFILES[self.odl_version]['location']): - odl_controller_config['install-url'] = \ - os.environ.get(ODL_PROFILES[self.odl_version]['location']) - if os.environ.get('AMULET_HTTP_PROXY'): - odl_controller_config['http-proxy'] = \ - os.environ['AMULET_HTTP_PROXY'] - if os.environ.get('AMULET_HTTP_PROXY'): - odl_controller_config['https-proxy'] = \ - os.environ['AMULET_HTTP_PROXY'] - odl_controller_config['profile'] = \ - ODL_PROFILES[self.odl_version]['profile'] - - keystone_config = { - 'admin-password': 'openstack', - 'admin-token': 'ubuntutesting' - } - nova_cc_config = { - 'network-manager': 'Neutron' - } - pxc_config = { - 'dataset-size': '25%', - 'max-connections': 1000, - 'root-password': 'ChangeMe123', - 'sst-password': 'ChangeMe123', - } - configs = { - 'neutron-gateway': neutron_gateway_config, - 'neutron-api': neutron_api_config, - 'neutron-api-odl': neutron_api_odl_config, - 'odl-controller': odl_controller_config, - 'keystone': keystone_config, - 'nova-cloud-controller': nova_cc_config, - 'percona-cluster': pxc_config, - } - super(ODLControllerBasicDeployment, self)._configure_services(configs) - - def _initialize_tests(self): - """Perform final initialization before tests get run.""" - # Access the sentries for inspecting service units - self.pxc_sentry = self.d.sentry['percona-cluster'][0] - self.keystone_sentry = self.d.sentry['keystone'][0] - self.rmq_sentry = self.d.sentry['rabbitmq-server'][0] - self.nova_cc_sentry = self.d.sentry['nova-cloud-controller'][0] - self.neutron_gateway_sentry = self.d.sentry['neutron-gateway'][0] - self.neutron_api_sentry = self.d.sentry['neutron-api'][0] - self.odl_controller_sentry = self.d.sentry['odl-controller'][0] - self.neutron_api_odl_sentry = self.d.sentry['neutron-api-odl'][0] - self.openvswitch_odl_sentry = self.d.sentry['openvswitch-odl'][0] - - # Authenticate admin with keystone - self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, - user='admin', - password='openstack', - tenant='admin') - # Authenticate admin with neutron - ep = self.keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - self.neutron = neutronclient.Client(auth_url=ep, - username='admin', - password='openstack', - tenant_name='admin', - region_name='RegionOne') - # Authenticate admin with glance endpoint - self.glance = u.authenticate_glance_admin(self.keystone) - # Create a demo tenant/role/user - self.demo_tenant = 'demoTenant' - self.demo_role = 'demoRole' - self.demo_user = 'demoUser' - if not u.tenant_exists(self.keystone, self.demo_tenant): - tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, - description='demo tenant', - enabled=True) - self.keystone.roles.create(name=self.demo_role) - self.keystone.users.create(name=self.demo_user, - password='password', - tenant_id=tenant.id, - email='demo@demo.com') - - # Authenticate demo user with keystone - self.keystone_demo = \ - u.authenticate_keystone_user(self.keystone, user=self.demo_user, - password='password', - tenant=self.demo_tenant) - - # Authenticate demo user with nova-api - self.nova_demo = u.authenticate_nova_user(self.keystone, - user=self.demo_user, - password='password', - tenant=self.demo_tenant) - - # Authenticate admin with nova endpoint - self.nova = u.authenticate_nova_user(self.keystone, - user='admin', - password='openstack', - tenant='admin') - - def test_100_services(self): - """Verify the expected services are running on the corresponding - service units.""" - neutron_services = ['neutron-dhcp-agent', - 'neutron-lbaas-agent', - 'neutron-metadata-agent', - 'neutron-metering-agent', - 'neutron-l3-agent'] - - odl_c_services = ['odl-controller'] - - commands = { - self.neutron_gateway_sentry: neutron_services, - self.odl_controller_sentry: odl_c_services, - } - - if self._get_openstack_release() >= self.xenial_newton: - commands[self.neutron_gateway_sentry].remove( - 'neutron-lbaas-agent' - ) - commands[self.neutron_gateway_sentry].append( - 'neutron-lbaasv2-agent' - ) - - ret = u.validate_services_by_name(commands) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_102_service_catalog(self): - """Verify that the service catalog endpoint data is valid.""" - u.log.debug('Checking keystone service catalog...') - endpoint_check = { - 'adminURL': u.valid_url, - 'id': u.not_null, - 'region': 'RegionOne', - 'publicURL': u.valid_url, - 'internalURL': u.valid_url - } - expected = { - 'network': [endpoint_check], - 'compute': [endpoint_check], - 'identity': [endpoint_check] - } - actual = self.keystone.service_catalog.get_endpoints() - - ret = u.validate_svc_catalog_endpoint_data(expected, actual) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_104_network_endpoint(self): - """Verify the neutron network endpoint data.""" - u.log.debug('Checking neutron network api endpoint data...') - endpoints = self.keystone.endpoints.list() - admin_port = internal_port = public_port = '9696' - expected = { - 'id': u.not_null, - 'region': 'RegionOne', - 'adminurl': u.valid_url, - 'internalurl': u.valid_url, - 'publicurl': u.valid_url, - 'service_id': u.not_null - } - ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, - public_port, expected) - - if ret: - amulet.raise_status(amulet.FAIL, - msg='glance endpoint: {}'.format(ret)) - - def test_110_users(self): - """Verify expected users.""" - u.log.debug('Checking keystone users...') - expected = [ - {'name': 'admin', - 'enabled': True, - 'tenantId': u.not_null, - 'id': u.not_null, - 'email': 'juju@localhost'}, - {'name': 'neutron', - 'enabled': True, - 'tenantId': u.not_null, - 'id': u.not_null, - 'email': 'juju@localhost'} - ] - - if self._get_openstack_release() >= self.xenial_ocata: - # Ocata or later - expected.append({ - 'name': 'placement_nova', - 'enabled': True, - 'tenantId': u.not_null, - 'id': u.not_null, - 'email': 'juju@localhost' - }) - elif self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later - expected.append({ - 'name': 'nova', - 'enabled': True, - 'tenantId': u.not_null, - 'id': u.not_null, - 'email': 'juju@localhost' - }) - else: - # Juno and earlier - expected.append({ - 'name': 's3_ec2_nova', - 'enabled': True, - 'tenantId': u.not_null, - 'id': u.not_null, - 'email': 'juju@localhost' - }) - - actual = self.keystone.users.list() - ret = u.validate_user_data(expected, actual) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_200_odl_controller_controller_api_relation(self): - """Verify the odl-controller to neutron-api-odl relation data""" - u.log.debug('Checking odl-controller to neutron-api-odl relation data') - unit = self.odl_controller_sentry - relation = ['controller-api', 'neutron-api-odl:odl-controller'] - expected = { - 'private-address': u.valid_ip, - 'username': 'admin', - 'password': 'admin', - 'port': '8080', - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('odl-controller controller-api', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_201_neutron_api_odl_odl_controller_relation(self): - """Verify the odl-controller to neutron-api-odl relation data""" - u.log.debug('Checking odl-controller to neutron-api-odl relation data') - unit = self.neutron_api_odl_sentry - relation = ['odl-controller', 'odl-controller:controller-api'] - expected = { - 'private-address': u.valid_ip, - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('neutron-api-odl odl-controller', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_202_odl_controller_ovsdb_manager_relation(self): - """Verify the odl-controller to openvswitch-odl relation data""" - u.log.debug('Checking odl-controller to openvswitch-odl relation data') - unit = self.odl_controller_sentry - relation = ['ovsdb-manager', 'openvswitch-odl:ovsdb-manager'] - expected = { - 'private-address': u.valid_ip, - 'protocol': 'tcp', - 'port': '6640', - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('odl-controller openvswitch-odl', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_203_openvswitch_odl_ovsdb_manager_relation(self): - """Verify the openvswitch-odl to odl-controller relation data""" - u.log.debug('Checking openvswitch-odl to odl-controller relation data') - unit = self.openvswitch_odl_sentry - relation = ['ovsdb-manager', 'odl-controller:ovsdb-manager'] - expected = { - 'private-address': u.valid_ip, - } - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('openvswitch-odl to odl-controller', - ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_400_create_network(self): - """Create a network, verify that it exists, and then delete it.""" - u.log.debug('Creating neutron network...') - self.neutron.format = 'json' - net_name = 'ext_net' - - # Verify that the network doesn't exist - networks = self.neutron.list_networks(name=net_name) - net_count = len(networks['networks']) - if net_count != 0: - msg = "Expected zero networks, found {}".format(net_count) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create a network and verify that it exists - network = {'name': net_name} - self.neutron.create_network({'network': network}) - - networks = self.neutron.list_networks(name=net_name) - u.log.debug('Networks: {}'.format(networks)) - net_len = len(networks['networks']) - if net_len != 1: - msg = "Expected 1 network, found {}".format(net_len) - amulet.raise_status(amulet.FAIL, msg=msg) - - u.log.debug('Confirming new neutron network...') - network = networks['networks'][0] - if network['name'] != net_name: - amulet.raise_status(amulet.FAIL, msg="network ext_net not found") - - # Cleanup - u.log.debug('Deleting neutron network...') - self.neutron.delete_network(network['id']) - - def test_400_gateway_bridges(self): - """Ensure that all bridges are present and configured with the - ODL controller as their NorthBound controller URL.""" - odl_ip = self.odl_controller_sentry.relation( - 'ovsdb-manager', - 'openvswitch-odl:ovsdb-manager' - )['private-address'] - # NOTE: 6633 is legacy 6653 is IANA assigned - if self.odl_version == 'helium': - controller_url = "tcp:{}:6633".format(odl_ip) - check_bridges = ['br-int', 'br-ex', 'br-data'] - else: - controller_url = "tcp:{}:6653".format(odl_ip) - # NOTE: later ODL releases only manage br-int - check_bridges = ['br-int'] - cmd = 'ovs-vsctl list-br' - output, _ = self.neutron_gateway_sentry.run(cmd) - bridges = output.split() - u.log.debug('Checking bridge configuration...') - for bridge in check_bridges: - if bridge not in bridges: - amulet.raise_status( - amulet.FAIL, - msg="Missing bridge {} from gateway unit".format(bridge) - ) - u.log.debug('Validating ...') - cmd = 'ovs-vsctl get-controller {}'.format(bridge) - br_controllers, _ = self.neutron_gateway_sentry.run(cmd) - br_controllers = list(set(br_controllers.strip().split('\n'))) - if len(br_controllers) != 1 or br_controllers[0] != controller_url: - status, _ = self.neutron_gateway_sentry.run('ovs-vsctl show') - amulet.raise_status( - amulet.FAIL, - msg="Controller configuration on bridge" - " {} incorrect: !{}! not in !{}!\n" - "{}".format(bridge, - br_controllers, - controller_url, - status) - ) - - def test_400_image_instance_create(self): - """Create an image/instance, verify they exist, and delete them.""" - - u.log.debug('Checking nova instance creation...') - - image = u.create_cirros_image(self.glance, "cirros-image") - if not image: - amulet.raise_status(amulet.FAIL, msg="Image create failed") - - # NOTE(jamespage): ensure require flavor exists, required for >= newton - u.create_flavor(nova=self.nova, - name='m1.tiny', ram=512, vcpus=1, disk=1) - - instance = u.create_instance(self.nova_demo, "cirros-image", "cirros", - "m1.tiny") - if not instance: - amulet.raise_status(amulet.FAIL, msg="Instance create failed") - - found = False - for instance in self.nova_demo.servers.list(): - if instance.name == 'cirros': - found = True - if instance.status != 'ACTIVE': - msg = "cirros instance is not active" - amulet.raise_status(amulet.FAIL, msg=msg) - - if not found: - message = "nova cirros instance does not exist" - amulet.raise_status(amulet.FAIL, msg=message) - - u.delete_resource(self.glance.images, image.id, - msg="glance image") - - u.delete_resource(self.nova_demo.servers, instance.id, - msg="nova instance") diff --git a/tests/gate-basic-trusty-icehouse b/tests/gate-basic-trusty-icehouse deleted file mode 100755 index 5e2257d..0000000 --- a/tests/gate-basic-trusty-icehouse +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic odl controller deployment on trusty-icehouse.""" - -from basic_deployment import ODLControllerBasicDeployment - -if __name__ == '__main__': - deployment = ODLControllerBasicDeployment(series='trusty') - deployment.run_tests() diff --git a/tests/gate-basic-trusty-mitaka b/tests/gate-basic-trusty-mitaka deleted file mode 100755 index 7b1bf5f..0000000 --- a/tests/gate-basic-trusty-mitaka +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic odl controller deployment on trusty-mitaka.""" - -from basic_deployment import ODLControllerBasicDeployment - -if __name__ == '__main__': - deployment = ODLControllerBasicDeployment(series='trusty', - openstack='cloud:trusty-mitaka', - source='cloud:trusty-updates/mitaka') - deployment.run_tests() diff --git a/tests/gate-basic-xenial-mitaka b/tests/gate-basic-xenial-mitaka deleted file mode 100755 index 8d6cda6..0000000 --- a/tests/gate-basic-xenial-mitaka +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic odl controller deployment on xenial-mitaka.""" - -from basic_deployment import ODLControllerBasicDeployment - -if __name__ == '__main__': - deployment = ODLControllerBasicDeployment(series='xenial', - odl_version='beryllium') - deployment.run_tests() diff --git a/tests/gate-basic-xenial-ocata b/tests/gate-basic-xenial-ocata deleted file mode 100755 index 0195dd1..0000000 --- a/tests/gate-basic-xenial-ocata +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic odl controller deployment on xenial-ocata.""" - -from basic_deployment import ODLControllerBasicDeployment - -if __name__ == '__main__': - deployment = ODLControllerBasicDeployment(series='xenial', - openstack='cloud:xenial-ocata', - source='cloud:xenial-updates/ocata', - odl_version='beryllium') - deployment.run_tests() diff --git a/tests/gate-basic-xenial-pike b/tests/gate-basic-xenial-pike deleted file mode 100644 index 3531d6d..0000000 --- a/tests/gate-basic-xenial-pike +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic odl controller deployment on xenial-pike.""" - -from basic_deployment import ODLControllerBasicDeployment - -if __name__ == '__main__': - deployment = ODLControllerBasicDeployment(series='xenial', - openstack='cloud:xenial-pike', - source='cloud:xenial-updates/pike', - odl_version='beryllium') - deployment.run_tests() diff --git a/tests/tests.yaml b/tests/tests.yaml deleted file mode 100644 index a03e7ba..0000000 --- a/tests/tests.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Bootstrap the model if necessary. -bootstrap: True -# Re-use bootstrap node. -reset: True -# Use tox/requirements to drive the venv instead of bundletester's venv feature. -virtualenv: False -# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet. -makefile: [] -# Do not specify juju PPA sources. Juju is presumed to be pre-installed -# and configured in all test runner environments. -#sources: -# Do not specify or rely on system packages. -#packages: -# Do not specify python packages here. Use test-requirements.txt -# and tox instead. ie. The venv is constructed before bundletester -# is invoked. -#python-packages: -reset_timeout: 600 diff --git a/tox.ini b/tox.ini deleted file mode 100644 index a72d281..0000000 --- a/tox.ini +++ /dev/null @@ -1,85 +0,0 @@ -# Classic charm: ./tox.ini -# This file is managed centrally by release-tools and should not be modified -# within individual charm repos. -[tox] -envlist = pep8,py27 -skipsdist = True - -[testenv] -setenv = VIRTUAL_ENV={envdir} - PYTHONHASHSEED=0 - CHARM_DIR={envdir} - AMULET_SETUP_TIMEOUT=5400 -install_command = - pip install {opts} {packages} -commands = ostestr {posargs} -whitelist_externals = juju -passenv = HOME TERM AMULET_* CS_API_* - -[testenv:py27] -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -[testenv:py35] -basepython = python3.5 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - -[testenv:pep8] -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} hooks unit_tests tests actions lib - charm-proof - -[testenv:venv] -commands = {posargs} - -[testenv:func27-noop] -# DRY RUN - For Debug -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy - -[testenv:func27] -# Charm Functional Test -# Run all gate tests which are +x (expected to always pass) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy - -[testenv:func27-smoke] -# Charm Functional Test -# Run a specific test as an Amulet smoke test (expected to always pass) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy - -[testenv:func27-dfs] -# Charm Functional Test -# Run all deploy-from-source tests which are +x (may not always pass!) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy - -[testenv:func27-dev] -# Charm Functional Test -# Run all development test targets which are +x (may not always pass!) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy - -[flake8] -ignore = E402,E226 -exclude = */charmhelpers diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py deleted file mode 100644 index 8c9a6a7..0000000 --- a/unit_tests/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -sys.path.append('hooks/') diff --git a/unit_tests/odl_outputs.py b/unit_tests/odl_outputs.py deleted file mode 100644 index b02505d..0000000 --- a/unit_tests/odl_outputs.py +++ /dev/null @@ -1,285 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# flake8: noqa -ODL_023_FEATURE_LIST = """ -client: JAVA_HOME not set; results may vary -Name | Version | Installed | Repository | Description ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -odl-aaa-all | 0.1.3-Helium-SR3 | | odl-aaa-0.1.3-Helium-SR3 | OpenDaylight :: AAA :: Authentication :: All Featu -odl-aaa-authn | 0.1.3-Helium-SR3 | x | odl-aaa-0.1.3-Helium-SR3 | OpenDaylight :: AAA :: Authentication -odl-aaa-authn-sssd | 0.1.3-Helium-SR3 | | odl-aaa-0.1.3-Helium-SR3 | OpenDaylight :: AAA :: SSSD Federation -odl-aaa-authn-plugin | 0.1.3-Helium-SR3 | | odl-aaa-0.1.3-Helium-SR3 | OpenDaylight :: AAA :: ODL NETCONF Plugin -framework-security | 3.0.1 | | standard-3.0.1 | OSGi Security for Karaf -standard | 3.0.1 | x | standard-3.0.1 | Karaf standard feature -aries-annotation | 3.0.1 | | standard-3.0.1 | Aries Annotations -wrapper | 3.0.1 | | standard-3.0.1 | Provide OS integration -service-wrapper | 3.0.1 | | standard-3.0.1 | Provide OS integration (alias to wrapper feature) -obr | 3.0.1 | | standard-3.0.1 | Provide OSGi Bundle Repository (OBR) support -config | 3.0.1 | x | standard-3.0.1 | Provide OSGi ConfigAdmin support -region | 3.0.1 | | standard-3.0.1 | Provide Region Support -package | 3.0.1 | x | standard-3.0.1 | Package commands and mbeans -http | 3.0.1 | x | standard-3.0.1 | Implementation of the OSGI HTTP Service -http-whiteboard | 3.0.1 | | standard-3.0.1 | Provide HTTP Whiteboard pattern support -war | 3.0.1 | x | standard-3.0.1 | Turn Karaf as a full WebContainer -jetty | 8.1.9.v20130131 | | standard-3.0.1 | -kar | 3.0.1 | x | standard-3.0.1 | Provide KAR (KARaf archive) support -webconsole | 3.0.1 | | standard-3.0.1 | Base support of the Karaf WebConsole -ssh | 3.0.1 | x | standard-3.0.1 | Provide a SSHd server on Karaf -management | 3.0.1 | x | standard-3.0.1 | Provide a JMX MBeanServer and a set of MBeans in K -scheduler | 3.0.1 | | standard-3.0.1 | Provide a scheduler service in Karaf to fire event -eventadmin | 3.0.1 | | standard-3.0.1 | OSGi Event Admin service specification for event-b -jasypt-encryption | 3.0.1 | | standard-3.0.1 | Advanced encryption support for Karaf security -scr | 3.0.1 | | standard-3.0.1 | Declarative Service support -blueprint-web | 3.0.1 | | standard-3.0.1 | Provides an OSGI-aware Servlet ContextListener for -odl-l2switch-all | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: All -odl-l2switch-switch | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: Switch -odl-l2switch-switch-rest | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: Switch -odl-l2switch-switch-ui | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: Switch -odl-l2switch-hosttracker | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: HostTracker -odl-l2switch-addresstracker | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: AddressTracker -odl-l2switch-arphandler | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: ArpHandler -odl-l2switch-loopremover | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: LoopRemover -odl-l2switch-packethandler | 0.1.3-Helium-SR3 | | l2switch-0.1.3-Helium-SR3 | OpenDaylight :: L2Switch :: PacketHandler -odl-packetcable-all | 1.1.3-Helium-SR3 | | odl-packetcable-1.1.3-Helium-SR3 | OpenDaylight :: packetcable :: All -odl-packetcable-consumer | 1.1.3-Helium-SR3 | | odl-packetcable-1.1.3-Helium-SR3 | OpenDaylight :: packetcable :: Consumer -odl-packetcable-model | 1.1.3-Helium-SR3 | | odl-packetcable-1.1.3-Helium-SR3 | OpenDaylight :: packetcable :: Model -odl-packetcable-provider | 1.1.3-Helium-SR3 | | odl-packetcable-1.1.3-Helium-SR3 | OpenDaylight :: packetcable :: Provider -odl-packetcable-driver | 1.1.3-Helium-SR3 | | odl-packetcable-1.1.3-Helium-SR3 | OpenDaylight :: packetcable :: Driver -odl-netconf-connector-all | 1.1.3-Helium-SR3 | | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: Netconf Connector :: All -odl-netconf-connector | 1.1.3-Helium-SR3 | | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: Netconf Connector :: Netconf Conne -odl-netconf-connector-ssh | 1.1.3-Helium-SR3 | | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: Netconf Connector :: Netconf Conne -odl-netconf-ssh | 0.2.8-Helium-SR3 | | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: Netconf Connector :: SSH -odl-netconf-tcp | 0.2.8-Helium-SR3 | | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: Netconf Connector :: TCP -odl-snmp4sdn-all | 0.1.6-Helium-SR3 | | odl-snmp4sdn-0.1.6-Helium-SR3 | OpenDaylight :: SNMP4SDN :: All -odl-snmp4sdn-snmp4sdn | 0.1.6-Helium-SR3 | | odl-snmp4sdn-0.1.6-Helium-SR3 | OpenDaylight :: SNMP4SDN :: Plugin -odl-plugin2oc | 0.1.3-Helium-SR3 | | odl-plugin2oc-0.1.3-Helium-SR3 | OpenDaylight :: plugin2oc :: Plugin -odl-config-all | 0.2.8-Helium-SR3 | | odl-config-0.2.8-Helium-SR3 | OpenDaylight :: Config :: All -odl-mdsal-common | 1.1.3-Helium-SR3 | x | odl-config-0.2.8-Helium-SR3 | OpenDaylight :: Config :: All -odl-config-api | 0.2.8-Helium-SR3 | x | odl-config-0.2.8-Helium-SR3 | OpenDaylight :: Config :: API -odl-config-netty-config-api | 0.2.8-Helium-SR3 | x | odl-config-0.2.8-Helium-SR3 | OpenDaylight :: Config :: Netty Config API -odl-config-core | 0.2.8-Helium-SR3 | x | odl-config-0.2.8-Helium-SR3 | OpenDaylight :: Config :: Core -odl-config-manager | 0.2.8-Helium-SR3 | x | odl-config-0.2.8-Helium-SR3 | OpenDaylight :: Config :: Manager -odl-adsal-all | 0.8.4-Helium-SR3 | x | adsal-0.8.4-Helium-SR3 | OpenDaylight AD-SAL All Features -odl-adsal-core | 0.8.4-Helium-SR3 | x | adsal-0.8.4-Helium-SR3 | OpenDaylight :: AD-SAL :: Core -odl-adsal-networkconfiguration | 0.0.6-Helium-SR3 | x | adsal-0.8.4-Helium-SR3 | OpenDaylight :: AD-SAL :: Network Configuration -odl-adsal-connection | 0.1.5-Helium-SR3 | x | adsal-0.8.4-Helium-SR3 | OpenDaylight :: AD-SAL :: Connection -odl-adsal-clustering | 0.5.4-Helium-SR3 | x | adsal-0.8.4-Helium-SR3 | OpenDaylight :: AD-SAL :: Clustering -odl-adsal-configuration | 0.4.6-Helium-SR3 | x | adsal-0.8.4-Helium-SR3 | OpenDaylight :: AD-SAL :: Configuration -odl-adsal-thirdparty | 0.8.4-Helium-SR3 | x | adsal-0.8.4-Helium-SR3 | OpenDaylight :: AD-SAL :: Third-Party Depenencies -odl-config-netty | 0.2.8-Helium-SR3 | x | odl-config-persister-0.2.8-Helium-SR3 | OpenDaylight :: Config-Netty -odl-base-all | 1.4.5-Helium-SR3 | x | odl-base-1.4.5-Helium-SR3 | OpenDaylight Controller -odl-base-dummy-console | 1.1.3-Helium-SR3 | x | odl-base-1.4.5-Helium-SR3 | Temporary Dummy Console -odl-base-felix-dm | 3.1.0 | x | odl-base-1.4.5-Helium-SR3 | Felix Dependency Manager -odl-base-aries-spi-fly | 1.0.0 | x | odl-base-1.4.5-Helium-SR3 | Aries SPI Fly -odl-base-netty | 4.0.23.Final | x | odl-base-1.4.5-Helium-SR3 | -odl-base-jersey | 1.17 | x | odl-base-1.4.5-Helium-SR3 | Jersey -odl-base-jersey2-osgi | 4.0 | | odl-base-1.4.5-Helium-SR3 | OSGi friendly Jersey -odl-base-jackson | 2.3.2 | x | odl-base-1.4.5-Helium-SR3 | Jackson JAX-RS -odl-base-slf4j | 1.7.2 | x | odl-base-1.4.5-Helium-SR3 | SLF4J Logging -odl-base-apache-commons | 1.4.5-Helium-SR3 | x | odl-base-1.4.5-Helium-SR3 | Apache Commons Libraries -odl-base-eclipselink-persistence | 2.0.4.v201112161009 | x | odl-base-1.4.5-Helium-SR3 | EclipseLink Persistence API -odl-base-gemini-web | 2.2.0.RELEASE | x | odl-base-1.4.5-Helium-SR3 | Gemini Web -odl-base-tomcat | 7.0.53 | x | odl-base-1.4.5-Helium-SR3 | OpenDaylight Tomcat -odl-base-spring | 3.1.3.RELEASE | x | odl-base-1.4.5-Helium-SR3 | Opendaylight Spring Support -odl-base-spring-web | 3.1.3.RELEASE | x | odl-base-1.4.5-Helium-SR3 | OpenDaylight Spring Web -odl-base-spring-security | 3.1.3.RELEASE | x | odl-base-1.4.5-Helium-SR3 | OpenDaylight Spring Security -odl-tcpmd5-all | 1.0.3-Helium-SR3 | | odl-tcpmd5-1.0.3-Helium-SR3 | -odl-tcpmd5-base | 1.0.3-Helium-SR3 | | odl-tcpmd5-1.0.3-Helium-SR3 | -odl-tcpmd5-netty | 1.0.3-Helium-SR3 | | odl-tcpmd5-1.0.3-Helium-SR3 | -odl-tcpmd5-nio | 1.0.3-Helium-SR3 | | odl-tcpmd5-1.0.3-Helium-SR3 | -odl-sfc-all | 0.0.4-Helium-SR3 | | odl-sfc-0.0.4-Helium-SR3 | OpenDaylight :: sfc :: All -odl-sfc-provider | 0.0.4-Helium-SR3 | | odl-sfc-0.0.4-Helium-SR3 | OpenDaylight :: sfc :: Provider -odl-sfc-model | 0.0.4-Helium-SR3 | | odl-sfc-0.0.4-Helium-SR3 | OpenDaylight :: sfc :: Model -odl-sfc-test-consumer | 0.0.4-Helium-SR3 | | odl-sfc-0.0.4-Helium-SR3 | OpenDaylight :: sfc :: Test :: Consumer -odl-sfc-ui | 0.0.4-Helium-SR3 | | odl-sfc-0.0.4-Helium-SR3 | OpenDaylight :: sfc :: UI -odl-mdsal-all | 1.1.3-Helium-SR3 | | odl-mdsal-1.1.3-Helium-SR3 | OpenDaylight :: MDSAL :: All -odl-mdsal-broker | 1.1.3-Helium-SR3 | x | odl-mdsal-1.1.3-Helium-SR3 | OpenDaylight :: MDSAL :: Broker -odl-toaster | 1.1.3-Helium-SR3 | | odl-mdsal-1.1.3-Helium-SR3 | OpenDaylight :: Toaster -odl-mdsal-xsql | 1.1.3-Helium-SR3 | | odl-mdsal-1.1.3-Helium-SR3 | -odl-mdsal-clustering-commons | 1.1.3-Helium-SR3 | | odl-mdsal-1.1.3-Helium-SR3 | -odl-mdsal-distributed-datastore | 1.1.3-Helium-SR3 | | odl-mdsal-1.1.3-Helium-SR3 | -odl-mdsal-remoterpc-connector | 1.1.3-Helium-SR3 | | odl-mdsal-1.1.3-Helium-SR3 | -odl-mdsal-clustering | 1.1.3-Helium-SR3 | | odl-mdsal-1.1.3-Helium-SR3 | -odl-clustering-test-app | 1.1.3-Helium-SR3 | | odl-mdsal-1.1.3-Helium-SR3 | -odl-flow-model | 1.1.3-Helium-SR3 | x | odl-flow-1.1.3-Helium-SR3 | OpenDaylight :: Flow :: Model -odl-flow-services | 1.1.3-Helium-SR3 | x | odl-flow-1.1.3-Helium-SR3 | OpenDaylight :: Flow :: Services -odl-openflow-nxm-extensions | 0.0.6-Helium-SR3 | x | ovsdb-0.0.6-Helium-SR3 | OpenDaylight :: Openflow :: Nicira Extensions -odl-integration-compatible-with-all | 0.2.3-Helium-SR3 | | odl-integration-0.2.3-Helium-SR3 | -odl-integration-all | 0.2.3-Helium-SR3 | | odl-integration-0.2.3-Helium-SR3 | -odl-openflowjava-all | 0.0.0 | | odl-openflowjava-0.5.3-Helium-SR3 | OpenDaylight :: Openflow Java :: All -odl-openflowjava-protocol | 0.5.3-Helium-SR3 | x | odl-openflowjava-0.5.3-Helium-SR3 | OpenDaylight :: Openflow Java :: Protocol -odl-nsf-all | 0.4.5-Helium-SR3 | x | nsf-0.4.5-Helium-SR3 | OpenDaylight :: NSF :: All Network Service Functio -odl-nsf-managers | 0.4.5-Helium-SR3 | x | nsf-0.4.5-Helium-SR3 | OpenDaylight :: AD-SAL :: Network Service Function -odl-adsal-northbound | 0.4.5-Helium-SR3 | x | nsf-0.4.5-Helium-SR3 | OpenDaylight :: AD-SAL :: Northbound APIs -odl-bgpcep-all | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | -odl-bgpcep-dependencies | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | -odl-bgpcep-util | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | -odl-bgpcep-concepts | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | -odl-bgpcep-linkstate | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | -odl-bgpcep-pcep-impl | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | -odl-pcep-segment-routing | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | -odl-bgpcep-parser | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | -odl-bgpcep-rib | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | -odl-bgpcep-tunnel | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | -odl-bgpcep-programming | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | -odl-pcep-api | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | -odl-pcep-spi | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | -odl-bgpcep-topology | 0.3.4-Helium-SR3 | | odl-bgpcep-0.3.4-Helium-SR3 | -odl-config-persister-all | 0.2.8-Helium-SR3 | | odl-config-persister-0.2.8-Helium-SR3 | OpenDaylight :: Config Persister:: All -odl-config-persister | 0.2.8-Helium-SR3 | x | odl-config-persister-0.2.8-Helium-SR3 | OpenDaylight :: Config Persister -odl-config-startup | 0.2.8-Helium-SR3 | x | odl-config-persister-0.2.8-Helium-SR3 | OpenDaylight :: Config Persister:: Config Startup -odl-protocol-framework | 0.5.3-Helium-SR3 | x | odl-protocol-framework-0.5.3-Helium-SR3 | OpenDaylight :: Protocol Framework -odl-restconf-all | 1.1.3-Helium-SR3 | | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: Restconf :: All -odl-restconf | 1.1.3-Helium-SR3 | x | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: Restconf -odl-restconf-noauth | 1.1.3-Helium-SR3 | x | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: Restconf -odl-mdsal-apidocs | 1.1.3-Helium-SR3 | x | odl-controller-1.1.3-Helium-SR3 | OpenDaylight :: MDSAL :: APIDOCS -odl-toaster-rest | 1.1.3-Helium-SR3 | | odl-controller-1.1.3-Helium-SR3 | -odl-toaster-ui | 1.1.3-Helium-SR3 | | odl-controller-1.1.3-Helium-SR3 | -odl-yangtools-all | 0.6.5-Helium-SR3 | | odl-yangtools-0.6.5-Helium-SR3 | OpenDaylight Yangtools All -odl-yangtools-models | 0.6.5-Helium-SR3 | x | odl-yangtools-0.6.5-Helium-SR3 | OpenDaylight :: Yangtools :: Models -odl-yangtools-data-binding | 0.6.5-Helium-SR3 | x | odl-yangtools-0.6.5-Helium-SR3 | OpenDaylight :: Yangtools :: Data Binding -odl-yangtools-binding | 0.6.5-Helium-SR3 | x | odl-yangtools-0.6.5-Helium-SR3 | OpenDaylight :: Yangtools :: Binding -odl-yangtools-common | 0.6.5-Helium-SR3 | x | odl-yangtools-0.6.5-Helium-SR3 | OpenDaylight :: Yangtools :: Common -odl-yangtools-binding-generator | 0.6.5-Helium-SR3 | x | odl-yangtools-0.6.5-Helium-SR3 | OpenDaylight :: Yangtools :: Binding Generator -odl-yangtools-restconf | 0.6.5-Helium-SR3 | | odl-yangtools-0.6.5-Helium-SR3 | OpenDaylight :: Yangtools :: Restconf -odl-adsal-compatibility-all | 1.4.5-Helium-SR3 | | odl-adsal-compatibility-0.8.4-Helium-SR3 | OpenDaylight :: controller :: All -odl-adsal-compatibility | 0.8.4-Helium-SR3 | x | odl-adsal-compatibility-0.8.4-Helium-SR3 | OpenDaylight :: AD-SAL :: Compatibility -odl-ovsdb-all | 1.0.3-Helium-SR3 | | ovsdb-1.0.3-Helium-SR3 | OpenDaylight :: OVSDB :: all -odl-ovsdb-library | 1.0.3-Helium-SR3 | x | ovsdb-1.0.3-Helium-SR3 | OVSDB :: Library -odl-ovsdb-schema-openvswitch | 1.0.3-Helium-SR3 | x | ovsdb-1.0.3-Helium-SR3 | OVSDB :: Schema :: Open_vSwitch -odl-ovsdb-schema-hardwarevtep | 1.0.3-Helium-SR3 | x | ovsdb-1.0.3-Helium-SR3 | OVSDB :: Schema :: hardware_vtep -odl-ovsdb-plugin | 1.0.3-Helium-SR3 | x | ovsdb-1.0.3-Helium-SR3 | OpenDaylight :: OVSDB :: Plugin -odl-ovsdb-northbound | 0.6.3-Helium-SR3 | x | ovsdb-1.0.3-Helium-SR3 | OpenDaylight :: OVSDB :: Northbound -odl-ovsdb-openstack | 1.0.3-Helium-SR3 | x | ovsdb-1.0.3-Helium-SR3 | OpenDaylight :: OVSDB :: OpenStack Network Virtual -odl-ovsdb-ovssfc | 0.0.4-Helium-SR3 | | ovsdb-0.0.4-Helium-SR3 | OpenDaylight :: OVSDB :: OVS Service Function Chai -odl-sfclisp | 0.0.4-Helium-SR3 | | odl-sfclisp-0.0.4-Helium-SR3 | OpenDaylight :: sfclisp :: all -odl-sfcofl2 | 0.0.4-Helium-SR3 | | odl-sfcofl2-0.0.4-Helium-SR3 | OpenDaylight :: sfcofl2 -odl-dlux-all | 0.1.3-Helium-SR3 | | odl-dlux-0.1.3-Helium-SR3 | -odl-dlux-core | 0.1.3-Helium-SR3 | x | odl-dlux-0.1.3-Helium-SR3 | -pax-cdi | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | Provide CDI support -pax-cdi-1.1 | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | Provide CDI 1.1 support -pax-cdi-weld | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | Weld CDI support -pax-cdi-1.1-weld | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | Weld CDI 1.1 support -pax-cdi-openwebbeans | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | OpenWebBeans CDI support -pax-cdi-web | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | Web CDI support -pax-cdi-1.1-web | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | Web CDI 1.1 support -pax-cdi-web-weld | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | Weld Web CDI support -pax-cdi-1.1-web-weld | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | Weld Web CDI 1.1 support -pax-cdi-web-openwebbeans | 0.7.0 | | org.ops4j.pax.cdi-0.7.0 | OpenWebBeans Web CDI support -pax-cdi-deltaspike-core | >0.5 | | org.ops4j.pax.cdi-0.7.0 | Apache Deltaspike core support -pax-cdi-deltaspike-jpa | 0.5 | | org.ops4j.pax.cdi-0.7.0 | Apche Deltaspike jpa support -odl-vtn-manager-all | 0.2.3-Helium-SR3 | | vtn-manager-0.2.3-Helium-SR3 | OpenDaylight VTN Manager All -odl-vtn-manager-java-api | 0.2.3-Helium-SR3 | | vtn-manager-0.2.3-Helium-SR3 | OpenDaylight :: VTN Manager :: Java API -odl-vtn-manager-northbound | 0.2.3-Helium-SR3 | | vtn-manager-0.2.3-Helium-SR3 | OpenDaylight :: VTN Manager :: Northbound -odl-vtn-manager-neutron | 0.2.3-Helium-SR3 | | vtn-manager-0.2.3-Helium-SR3 | OpenDaylight :: VTN Manager :: Neutron Interface -odl-aaa-authz-all | 0.1.3-Helium-SR3 | | odl-aaa-0.1.3-Helium-SR3 | OpenDaylight :: AAA :: Authorization :: All Featur -odl-aaa-authz | 0.1.3-Helium-SR3 | | odl-aaa-0.1.3-Helium-SR3 | OpenDaylight :: AAA :: Authorization -spring-dm | 1.2.1 | | spring-3.0.1 | Spring DM support -spring-dm-web | 1.2.1 | | spring-3.0.1 | Spring DM Web support -spring | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x support -spring-aspects | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x AOP support -spring-instrument | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x Instrument support -spring-jdbc | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x JDBC support -spring-jms | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x JMS support -spring-struts | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x Struts support -spring-test | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x Test support -spring-orm | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x ORM support -spring-oxm | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x OXM support -spring-tx | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x Transaction (TX) support -spring-web | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x Web support -spring-web-portlet | 3.1.4.RELEASE | | spring-3.0.1 | Spring 3.1.x Web Portlet support -spring | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x support -spring-aspects | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x AOP support -spring-instrument | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x Instrument support -spring-jdbc | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x JDBC support -spring-jms | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x JMS support -spring-struts | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x Struts support -spring-test | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x Test support -spring-orm | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x ORM support -spring-oxm | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x OXM support -spring-tx | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x Transaction (TX) support -spring-web | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x Web support -spring-web-portlet | 3.2.4.RELEASE | | spring-3.0.1 | Spring 3.2.x Web Portlet support -spring | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x support -spring-aspects | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x AOP support -spring-instrument | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x Instrument support -spring-jdbc | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x JDBC support -spring-jms | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x JMS support -spring-test | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x Test support -spring-orm | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x ORM support -spring-oxm | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x OXM support -spring-tx | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x Transaction (TX) support -spring-web | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x Web support -spring-web-portlet | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x Web Portlet support -spring-websocket | 4.0.2.RELEASE_1 | | spring-3.0.1 | Spring 4.0.x WebSocket support -spring-security | 3.1.4.RELEASE | | spring-3.0.1 | Spring Security 3.1.x support -gemini-blueprint | 1.0.0.RELEASE | | spring-3.0.1 | Gemini Blueprint Extender -odl-sdninterfaceapp-all | 1.4.5-Helium-SR3 | | odl-sdninterfaceapp-1.4.5-Helium-SR3 | OpenDaylight :: sdninterfaceapp -odl-lispflowmapping-all | 1.1.14-Helium-SR3 | | odl-lispflowmapping-1.1.14-Helium-SR3 | OpenDaylight :: LISP Flow Mapping :: All -odl-lispflowmapping-mappingservice | 1.1.14-Helium-SR3 | | odl-lispflowmapping-1.1.14-Helium-SR3 | OpenDaylight :: LISP Flow Mapping :: Mapping Servi -odl-lispflowmapping-southbound | 1.1.14-Helium-SR3 | | odl-lispflowmapping-1.1.14-Helium-SR3 | OpenDaylight :: LISP Flow Mapping :: Southbound Pl -odl-lispflowmapping-northbound | 1.1.14-Helium-SR3 | | odl-lispflowmapping-1.1.14-Helium-SR3 | OpenDaylight :: LISP Flow Mapping :: Northbound -odl-lispflowmapping-netconf | 1.1.14-Helium-SR3 | | odl-lispflowmapping-1.1.14-Helium-SR3 | OpenDaylight :: LISP Flow Mapping :: NETCONF -odl-lispflowmapping-neutron | 1.1.14-Helium-SR3 | | odl-lispflowmapping-1.1.14-Helium-SR3 | OpenDaylight :: LISP Flow Mapping :: Neutron Integ -odl-akka-all | 1.4.5-Helium-SR3 | | odl-controller-1.4.5-Helium-SR3 | OpenDaylight :: Akka :: All -odl-akka-scala | 2.10 | | odl-controller-1.4.5-Helium-SR3 | Scala Runtime for OpenDaylight -odl-akka-system | 2.3.4 | | odl-controller-1.4.5-Helium-SR3 | Akka Actor Framework System Bundles -odl-akka-clustering | 2.3.4 | | odl-controller-1.4.5-Helium-SR3 | Akka Clustering -odl-akka-leveldb | 0.7 | | odl-controller-1.4.5-Helium-SR3 | LevelDB -odl-akka-persistence | 2.3.4 | | odl-controller-1.4.5-Helium-SR3 | Akka Persistence -transaction | 1.0.1 | x | enterprise-3.0.1 | OSGi Transaction Manager -jpa | 1.0.1 | | enterprise-3.0.1 | OSGi Persistence Container -openjpa | 2.2.2 | | enterprise-3.0.1 | Apache OpenJPA 2.2.x persistence engine support -openjpa | 2.3.0 | | enterprise-3.0.1 | Apache OpenJPA 2.3.x persistence engine support -hibernate | 3.3.2.GA | | enterprise-3.0.1 | Hibernate 3.x JPA persistence engine support -hibernate | 4.2.7.Final | | enterprise-3.0.1 | Hibernate 4.2.x JPA persistence engine support -hibernate-envers | 4.2.7.Final | | enterprise-3.0.1 | Hibernate Envers 4.2.x -hibernate | 4.3.1.Final | | enterprise-3.0.1 | Hibernate 4.3.x JPA persistence engine support -hibernate-envers | 4.3.1.Final | | enterprise-3.0.1 | Hibernate Envers 4.3.x -hibernate-validator | 5.0.3.Final | | enterprise-3.0.1 | Hibernate Validator support -jndi | 3.0.1 | | enterprise-3.0.1 | OSGi Service Registry JNDI access -jdbc | 3.0.1 | | enterprise-3.0.1 | JDBC service and commands -jms | 3.0.1 | | enterprise-3.0.1 | JMS service and commands -openwebbeans | 1.2.1 | | enterprise-3.0.1 | Apache OpenWebBeans CDI container support -weld | 2.1.1.Final | | enterprise-3.0.1 | JBoss Weld CDI container support -application-without-isolation | 1.0.0 | | enterprise-3.0.1 | Provide EBA archive support -odl-ttp-all | 0.0.4-Helium-SR3 | | odl-ttp-0.0.4-Helium-SR3 | OpenDaylight :: ttp :: All -odl-ttp-model | 0.0.4-Helium-SR3 | | odl-ttp-0.0.4-Helium-SR3 | OpenDaylight :: ttp :: Model -odl-netconf-all | 0.2.8-Helium-SR3 | | odl-netconf-0.2.8-Helium-SR3 | OpenDaylight :: Netconf :: All -odl-netconf-api | 0.2.8-Helium-SR3 | x | odl-netconf-0.2.8-Helium-SR3 | OpenDaylight :: Netconf :: API -odl-netconf-mapping-api | 0.2.8-Helium-SR3 | x | odl-netconf-0.2.8-Helium-SR3 | OpenDaylight :: Netconf :: Mapping API -odl-netconf-util | 0.2.8-Helium-SR3 | x | odl-netconf-0.2.8-Helium-SR3 | -odl-netconf-impl | 0.2.8-Helium-SR3 | x | odl-netconf-0.2.8-Helium-SR3 | OpenDaylight :: Netconf :: Impl -odl-config-netconf-connector | 0.2.8-Helium-SR3 | x | odl-netconf-0.2.8-Helium-SR3 | OpenDaylight :: Netconf :: Connector -odl-netconf-netty-util | 0.2.8-Helium-SR3 | x | odl-netconf-0.2.8-Helium-SR3 | OpenDaylight :: Netconf :: Netty Util -odl-netconf-client | 0.2.8-Helium-SR3 | | odl-netconf-0.2.8-Helium-SR3 | OpenDaylight :: Netconf :: Client -odl-netconf-monitoring | 0.2.8-Helium-SR3 | x | odl-netconf-0.2.8-Helium-SR3 | OpenDaylight :: Netconf :: Monitoring -odl-snbi-all | 1.0.3-Helium-SR3 | | odl-snbi-1.0.3-Helium-SR3 | OpenDaylight :: snbi :: All -odl-snbi-southplugin | 1.0.3-Helium-SR3 | | odl-snbi-1.0.3-Helium-SR3 | OpenDaylight :: SNBI :: SouthPlugin -odl-snbi-shellplugin | 1.0.3-Helium-SR3 | | odl-snbi-1.0.3-Helium-SR3 | OpenDaylight :: SNBI :: ShellPlugin -pax-jetty | 8.1.14.v20131031 | x | org.ops4j.pax.web-3.1.0 | Provide Jetty engine support -pax-tomcat | 7.0.27.1 | | org.ops4j.pax.web-3.1.0 | Provide Tomcat engine support -pax-http | 3.1.0 | x | org.ops4j.pax.web-3.1.0 | Implementation of the OSGI HTTP Service -pax-http-whiteboard | 3.1.0 | x | org.ops4j.pax.web-3.1.0 | Provide HTTP Whiteboard pattern support -pax-war | 3.1.0 | x | org.ops4j.pax.web-3.1.0 | Provide support of a full WebContainer -odl-openflowplugin-all | 0.0.6-Helium-SR3 | | openflowplugin-0.0.6-Helium-SR3 | OpenDaylight :: Openflow Plugin :: All -odl-openflowplugin-southbound | 0.0.6-Helium-SR3 | x | openflowplugin-0.0.6-Helium-SR3 | OpenDaylight :: Openflow Plugin :: SouthBound -odl-openflowplugin-flow-services | 0.0.6-Helium-SR3 | x | openflowplugin-0.0.6-Helium-SR3 | OpenDaylight :: Openflow Plugin :: Flow Services -odl-openflowplugin-flow-services-rest | 0.0.6-Helium-SR3 | | openflowplugin-0.0.6-Helium-SR3 | OpenDaylight :: Openflow Plugin :: Flow Services : -odl-openflowplugin-flow-services-ui | 0.0.6-Helium-SR3 | | openflowplugin-0.0.6-Helium-SR3 | OpenDaylight :: Openflow Plugin :: Flow Services : -odl-openflowplugin-drop-test | 0.0.6-Helium-SR3 | | openflowplugin-0.0.6-Helium-SR3 | OpenDaylight :: Openflow Plugin :: Drop Test -odl-openflowplugin-apps | 0.0.6-Helium-SR3 | | openflowplugin-0.0.6-Helium-SR3 | OpenDaylight :: Openflow Plugin :: Applications -odl-groupbasedpolicy-ofoverlay | 0.1.3-Helium-SR3 | | odl-groupbasedpolicy-0.1.3-Helium-SR3 | OpenDaylight :: groupbasedpolicy :: OpenFlow Overl -""" diff --git a/unit_tests/test_odl_controller_hooks.py b/unit_tests/test_odl_controller_hooks.py deleted file mode 100644 index 086cdaa..0000000 --- a/unit_tests/test_odl_controller_hooks.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from mock import patch, call -from test_utils import CharmTestCase - -with patch('charmhelpers.core.hookenv.config') as config: - config.return_value = False - import odl_controller_hooks as hooks - - -TO_PATCH = [ - 'adduser', - 'apt_install', - 'check_call', - 'config', - 'install_remote', - 'log', - 'mkdir', - 'process_odl_cmds', - 'relation_set', - 'relation_ids', - 'restart_on_change', - 'service_start', - 'shutil', - 'write_mvn_config', - 'init_is_systemd', - 'service', -] - - -class ODLControllerHooksTests(CharmTestCase): - - def setUp(self): - super(ODLControllerHooksTests, self).setUp(hooks, TO_PATCH) - - self.config.__getitem__.side_effect = self.test_config.get - self.config.get.side_effect = self.test_config.get - self.install_url = 'http://10.10.10.10/distribution-karaf.tgz' - self.test_config.set('install-url', self.install_url) - self.test_config.set('profile', 'default') - self.init_is_systemd.return_value = False - - def _call_hook(self, hookname): - hooks.hooks.execute([ - 'hooks/{}'.format(hookname)]) - - @patch('os.symlink') - @patch('os.path.exists') - @patch('os.listdir') - def test_install_hook(self, mock_listdir, mock_path_exists, mock_symlink): - mock_listdir.return_value = ['random-file', 'distribution-karaf.tgz'] - mock_path_exists.return_value = False - self._call_hook('install') - self.apt_install.assert_called_with([ - "default-jre-headless", "python-jinja2"], - fatal=True - ) - mock_symlink.assert_called_with('distribution-karaf.tgz', - '/opt/opendaylight-karaf') - self.adduser.assert_called_with("opendaylight", system_user=True) - self.mkdir.assert_has_calls([ - call('/home/opendaylight', owner="opendaylight", - group="opendaylight", perms=0755), - call('/var/log/opendaylight', owner="opendaylight", - group="opendaylight", perms=0755) - ]) - self.check_call.assert_called_with([ - "chown", "-R", "opendaylight:opendaylight", - "/opt/distribution-karaf.tgz" - ]) - self.write_mvn_config.assert_called_with() - self.service_start.assert_called_with('odl-controller') - self.shutil.copy.assert_called_with('files/odl-controller.conf', - '/etc/init') - - @patch('os.symlink') - @patch('os.path.exists') - @patch('os.listdir') - def test_install_hook_systemd(self, mock_listdir, - mock_path_exists, mock_symlink): - self.init_is_systemd.return_value = True - mock_listdir.return_value = ['random-file', 'distribution-karaf.tgz'] - mock_path_exists.return_value = False - self._call_hook('install') - self.apt_install.assert_called_with([ - "default-jre-headless", "python-jinja2"], - fatal=True - ) - mock_symlink.assert_called_with('distribution-karaf.tgz', - '/opt/opendaylight-karaf') - self.adduser.assert_called_with("opendaylight", system_user=True) - self.mkdir.assert_has_calls([ - call('/home/opendaylight', owner="opendaylight", - group="opendaylight", perms=0755), - call('/var/log/opendaylight', owner="opendaylight", - group="opendaylight", perms=0755) - ]) - self.check_call.assert_called_with([ - "chown", "-R", "opendaylight:opendaylight", - "/opt/distribution-karaf.tgz" - ]) - self.write_mvn_config.assert_called_with() - self.service_start.assert_called_with('odl-controller') - self.shutil.copy.assert_called_with('files/odl-controller.service', - '/lib/systemd/system') - self.service.assert_called_with('enable', 'odl-controller') - - def test_ovsdb_manager_joined_hook(self): - self._call_hook('ovsdb-manager-relation-joined') - self.relation_set.assert_called_with(port=6640, protocol="tcp") - - def test_controller_api_relation_joined_hook(self): - self._call_hook('controller-api-relation-joined') - self.relation_set.assert_called_with(relation_id=None, port=8080, - username="admin", - password="admin") - - @patch.object(hooks, 'controller_api_joined') - def test_config_changed_hook(self, mock_controller_api_joined): - self.relation_ids.return_value = ['controller-api:2'] - self._call_hook('config-changed') - self.write_mvn_config.assert_called_with() - mock_controller_api_joined.assert_called_with('controller-api:2') - self.process_odl_cmds.assert_called_with({ - 'feature:install': [ - 'odl-base-all', 'odl-aaa-authn', 'odl-restconf', 'odl-nsf-all', - 'odl-adsal-northbound', 'odl-mdsal-apidocs', - 'odl-ovsdb-openstack', 'odl-ovsdb-northbound', - 'odl-dlux-core' - ], - 'port': 8080 - }) diff --git a/unit_tests/test_odl_controller_utils.py b/unit_tests/test_odl_controller_utils.py deleted file mode 100644 index 0218a5a..0000000 --- a/unit_tests/test_odl_controller_utils.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from mock import patch, call, ANY -from test_utils import CharmTestCase - -import odl_controller_utils as utils -import odl_outputs - -TO_PATCH = [ - 'subprocess', - 'render', - 'config', - 'retry_on_exception', -] - - -class ODLControllerUtilsTests(CharmTestCase): - - def setUp(self): - super(ODLControllerUtilsTests, self).setUp(utils, TO_PATCH) - self.config.side_effect = self.test_config.get - self.test_config.set('http-proxy', 'http://proxy.int:3128') - - def test_mvn_proxy_ctx(self): - expect = { - 'http_noproxy': [], - 'http_proxy': True, - 'http_proxy_host': 'proxy.int', - 'http_proxy_port': 3128 - } - self.assertEqual(utils.mvn_proxy_ctx('http'), expect) - - def test_mvn_ctx(self): - self.test_config.set('http-proxy', 'http://proxy.int:3128') - expect = { - 'http_noproxy': [], - 'http_proxy': True, - 'http_proxy_host': 'proxy.int', - 'http_proxy_port': 3128 - } - self.assertEqual(utils.mvn_ctx(), expect) - - def test_mvn_ctx_unset(self): - self.test_config.set('http-proxy', '') - self.assertEqual(utils.mvn_ctx(), {}) - - def test_write_mvn_config(self): - self.test_config.set('http-proxy', '') - self.test_config.set('https-proxy', '') - utils.write_mvn_config() - self.render.assert_called_with( - "settings.xml", "/home/opendaylight/.m2/settings.xml", {}, - "opendaylight", "opendaylight", 0400 - ) - - def test_run_odl(self): - utils.run_odl(["feature:list"]) - self.subprocess.check_output.assert_called_with( - ["/opt/opendaylight-karaf/bin/client", "-r", '20', "-h", - 'localhost', "-a", '8101', "-u", "karaf", 'feature:list'] - ) - - def test_installed_features(self): - self.subprocess.check_output.return_value = \ - odl_outputs.ODL_023_FEATURE_LIST - installed = utils.installed_features() - for feature in utils.PROFILES["openvswitch-odl"]["feature:install"]: - self.assertTrue(feature in installed) - self.assertFalse('odl-l2switch-hosttracker' in installed) - - def test_filter_installed(self): - self.subprocess.check_output.return_value = \ - odl_outputs.ODL_023_FEATURE_LIST - self.assertEqual( - utils.filter_installed(['odl-l2switch-hosttracker']), - ['odl-l2switch-hosttracker'] - ) - self.assertEqual(utils.filter_installed(['odl-config-api']), []) - - @patch.object(utils, 'run_odl') - @patch.object(utils, 'filter_installed') - def test_process_odl_cmds(self, mock_filter_installed, mock_run_odl): - test_profile = { - "feature:install": ["odl-l2switch-all"], - "log:set": { - "TRACE": ["cosc-cvpn-ovs-rest"], - }, - "port": 1181 - } - mock_filter_installed.return_value = ["odl-l2switch-all"] - utils.process_odl_cmds(test_profile) - mock_run_odl.assert_has_calls([ - call(["feature:install", "odl-l2switch-all"]), - call(['log:set', 'TRACE', 'cosc-cvpn-ovs-rest']) - ]) - - @patch.object(utils, 'service_running') - @patch.object(utils, 'status_set') - def test_assess_status(self, status_set, service_running): - service_running.return_value = False - utils.assess_status() - service_running.assert_called_with('odl-controller') - status_set.assert_called_with('blocked', ANY) - - service_running.return_value = True - utils.assess_status() - service_running.assert_called_with('odl-controller') - status_set.assert_called_with('active', ANY) diff --git a/unit_tests/test_utils.py b/unit_tests/test_utils.py deleted file mode 100644 index 3fd6953..0000000 --- a/unit_tests/test_utils.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import unittest -import os -import yaml - -from contextlib import contextmanager -from mock import patch, MagicMock - -patch('charmhelpers.contrib.openstack.utils.set_os_workload_status').start() -patch('charmhelpers.core.hookenv.status_set').start() - - -def load_config(): - ''' - Walk backwords from __file__ looking for config.yaml, load and return the - 'options' section' - ''' - config = None - f = __file__ - while config is None: - d = os.path.dirname(f) - if os.path.isfile(os.path.join(d, 'config.yaml')): - config = os.path.join(d, 'config.yaml') - break - f = d - - if not config: - logging.error('Could not find config.yaml in any parent directory ' - 'of %s. ' % __file__) - raise Exception - - return yaml.safe_load(open(config).read())['options'] - - -def get_default_config(): - ''' - Load default charm config from config.yaml return as a dict. - If no default is set in config.yaml, its value is None. - ''' - default_config = {} - config = load_config() - for k, v in config.iteritems(): - if 'default' in v: - default_config[k] = v['default'] - else: - default_config[k] = None - return default_config - - -class CharmTestCase(unittest.TestCase): - - def setUp(self, obj, patches): - super(CharmTestCase, self).setUp() - self.patches = patches - self.obj = obj - self.test_config = TestConfig() - self.test_relation = TestRelation() - self.patch_all() - - def patch(self, method): - _m = patch.object(self.obj, method) - mock = _m.start() - self.addCleanup(_m.stop) - return mock - - def patch_all(self): - for method in self.patches: - setattr(self, method, self.patch(method)) - - -class TestConfig(object): - - def __init__(self): - self.config = get_default_config() - - def get(self, attr=None): - if not attr: - return self.get_all() - try: - return self.config[attr] - except KeyError: - return None - - def get_all(self): - return self.config - - def set(self, attr, value): - if attr not in self.config: - raise KeyError - self.config[attr] = value - - -class TestRelation(object): - - def __init__(self, relation_data={}): - self.relation_data = relation_data - - def set(self, relation_data): - self.relation_data = relation_data - - def get(self, attribute=None, unit=None, rid=None): - if attribute is None: - return self.relation_data - elif attribute in self.relation_data: - return self.relation_data[attribute] - return None - - -@contextmanager -def patch_open(): - '''Patch open() to allow mocking both open() itself and the file that is - yielded. - - Yields the mock for "open" and "file", respectively.''' - mock_open = MagicMock(spec=open) - mock_file = MagicMock(spec=file) - - @contextmanager - def stub_open(*args, **kwargs): - mock_open(*args, **kwargs) - yield mock_file - - with patch('__builtin__.open', stub_open): - yield mock_open, mock_file