diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py
index 4efe799..b9c7900 100644
--- a/hooks/charmhelpers/contrib/network/ip.py
+++ b/hooks/charmhelpers/contrib/network/ip.py
@@ -191,6 +191,15 @@ get_iface_for_address = partial(_get_for_address, key='iface')
get_netmask_for_address = partial(_get_for_address, key='netmask')
+def resolve_network_cidr(ip_address):
+ '''
+ Resolves the full address cidr of an ip_address based on
+ configured network interfaces
+ '''
+ netmask = get_netmask_for_address(ip_address)
+ return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
+
+
def format_ipv6_addr(address):
"""If address is IPv6, wrap it in '[]' otherwise return None.
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
index d2ede32..d21c9c7 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -126,7 +126,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
- 'cinder-backup']
+ 'cinder-backup', 'nexentaedge-data',
+ 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
+ 'cinder-nexentaedge', 'nexentaedge-mgmt']
if self.openstack:
for svc in services:
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
index 388b60e..ef3bdcc 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
@@ -27,7 +27,11 @@ import cinderclient.v1.client as cinder_client
import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client
import keystoneclient.v2_0 as keystone_client
-import novaclient.v1_1.client as nova_client
+from keystoneclient.auth.identity import v3 as keystone_id_v3
+from keystoneclient import session as keystone_session
+from keystoneclient.v3 import client as keystone_client_v3
+
+import novaclient.client as nova_client
import pika
import swiftclient
@@ -38,6 +42,8 @@ from charmhelpers.contrib.amulet.utils import (
DEBUG = logging.DEBUG
ERROR = logging.ERROR
+NOVA_CLIENT_VERSION = "2"
+
class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
@@ -139,7 +145,7 @@ class OpenStackAmuletUtils(AmuletUtils):
return "role {} does not exist".format(e['name'])
return ret
- def validate_user_data(self, expected, actual):
+ def validate_user_data(self, expected, actual, api_version=None):
"""Validate user data.
Validate a list of actual user data vs a list of expected user
@@ -150,10 +156,15 @@ class OpenStackAmuletUtils(AmuletUtils):
for e in expected:
found = False
for act in actual:
- a = {'enabled': act.enabled, 'name': act.name,
- 'email': act.email, 'tenantId': act.tenantId,
- 'id': act.id}
- if e['name'] == a['name']:
+ if e['name'] == act.name:
+ a = {'enabled': act.enabled, 'name': act.name,
+ 'email': act.email, 'id': act.id}
+ if api_version == 3:
+ a['default_project_id'] = getattr(act,
+ 'default_project_id',
+ 'none')
+ else:
+ a['tenantId'] = act.tenantId
found = True
ret = self._validate_dict_data(e, a)
if ret:
@@ -188,15 +199,30 @@ class OpenStackAmuletUtils(AmuletUtils):
return cinder_client.Client(username, password, tenant, ept)
def authenticate_keystone_admin(self, keystone_sentry, user, password,
- tenant):
+ tenant=None, api_version=None,
+ keystone_ip=None):
"""Authenticates admin user with the keystone admin endpoint."""
self.log.debug('Authenticating keystone admin...')
unit = keystone_sentry
- service_ip = unit.relation('shared-db',
- 'mysql:shared-db')['private-address']
- ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
+ if not keystone_ip:
+ keystone_ip = unit.relation('shared-db',
+ 'mysql:shared-db')['private-address']
+ base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
+ if not api_version or api_version == 2:
+ ep = base_ep + "/v2.0"
+ return keystone_client.Client(username=user, password=password,
+ tenant_name=tenant, auth_url=ep)
+ else:
+ ep = base_ep + "/v3"
+ auth = keystone_id_v3.Password(
+ user_domain_name='admin_domain',
+ username=user,
+ password=password,
+ domain_name='admin_domain',
+ auth_url=ep,
+ )
+ sess = keystone_session.Session(auth=auth)
+ return keystone_client_v3.Client(session=sess)
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
@@ -225,7 +251,8 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Authenticating nova user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
- return nova_client.Client(username=user, api_key=password,
+ return nova_client.Client(NOVA_CLIENT_VERSION,
+ username=user, api_key=password,
project_id=tenant, auth_url=ep)
def authenticate_swift_user(self, keystone, user, password, tenant):
diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py
index a8c6ab0..c07b33d 100644
--- a/hooks/charmhelpers/contrib/openstack/context.py
+++ b/hooks/charmhelpers/contrib/openstack/context.py
@@ -20,7 +20,7 @@ import os
import re
import time
from base64 import b64decode
-from subprocess import check_call
+from subprocess import check_call, CalledProcessError
import six
import yaml
@@ -45,6 +45,7 @@ from charmhelpers.core.hookenv import (
INFO,
WARNING,
ERROR,
+ status_set,
)
from charmhelpers.core.sysctl import create as sysctl_create
@@ -1479,3 +1480,104 @@ class NetworkServiceContext(OSContextGenerator):
if self.context_complete(ctxt):
return ctxt
return {}
+
+
+class InternalEndpointContext(OSContextGenerator):
+ """Internal endpoint context.
+
+ This context provides the endpoint type used for communication between
+ services e.g. between Nova and Cinder internally. Openstack uses Public
+ endpoints by default so this allows admins to optionally use internal
+ endpoints.
+ """
+ def __call__(self):
+ return {'use_internal_endpoints': config('use-internal-endpoints')}
+
+
+class AppArmorContext(OSContextGenerator):
+ """Base class for apparmor contexts."""
+
+ def __init__(self):
+ self._ctxt = None
+ self.aa_profile = None
+ self.aa_utils_packages = ['apparmor-utils']
+
+ @property
+ def ctxt(self):
+ if self._ctxt is not None:
+ return self._ctxt
+ self._ctxt = self._determine_ctxt()
+ return self._ctxt
+
+ def _determine_ctxt(self):
+ """
+ Validate aa-profile-mode settings is disable, enforce, or complain.
+
+ :return ctxt: Dictionary of the apparmor profile or None
+ """
+ if config('aa-profile-mode') in ['disable', 'enforce', 'complain']:
+ ctxt = {'aa-profile-mode': config('aa-profile-mode')}
+ else:
+ ctxt = None
+ return ctxt
+
+ def __call__(self):
+ return self.ctxt
+
+ def install_aa_utils(self):
+ """
+ Install packages required for apparmor configuration.
+ """
+ log("Installing apparmor utils.")
+ ensure_packages(self.aa_utils_packages)
+
+ def manually_disable_aa_profile(self):
+ """
+ Manually disable an apparmor profile.
+
+ If aa-profile-mode is set to disabled (default) this is required as the
+ template has been written but apparmor is yet unaware of the profile
+ and aa-disable aa-profile fails. Without this the profile would kick
+ into enforce mode on the next service restart.
+
+ """
+ profile_path = '/etc/apparmor.d'
+ disable_path = '/etc/apparmor.d/disable'
+ if not os.path.lexists(os.path.join(disable_path, self.aa_profile)):
+ os.symlink(os.path.join(profile_path, self.aa_profile),
+ os.path.join(disable_path, self.aa_profile))
+
+ def setup_aa_profile(self):
+ """
+ Setup an apparmor profile.
+ The ctxt dictionary will contain the apparmor profile mode and
+ the apparmor profile name.
+ Makes calls out to aa-disable, aa-complain, or aa-enforce to setup
+ the apparmor profile.
+ """
+ self()
+ if not self.ctxt:
+ log("Not enabling apparmor Profile")
+ return
+ self.install_aa_utils()
+ cmd = ['aa-{}'.format(self.ctxt['aa-profile-mode'])]
+ cmd.append(self.ctxt['aa-profile'])
+ log("Setting up the apparmor profile for {} in {} mode."
+ "".format(self.ctxt['aa-profile'], self.ctxt['aa-profile-mode']))
+ try:
+ check_call(cmd)
+ except CalledProcessError as e:
+ # If aa-profile-mode is set to disabled (default) manual
+ # disabling is required as the template has been written but
+ # apparmor is yet unaware of the profile and aa-disable aa-profile
+ # fails. If aa-disable learns to read profile files first this can
+ # be removed.
+ if self.ctxt['aa-profile-mode'] == 'disable':
+ log("Manually disabling the apparmor profile for {}."
+ "".format(self.ctxt['aa-profile']))
+ self.manually_disable_aa_profile()
+ return
+ status_set('blocked', "Apparmor profile {} failed to be set to {}."
+ "".format(self.ctxt['aa-profile'],
+ self.ctxt['aa-profile-mode']))
+ raise e
diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py
index 3dca6dc..532a1dc 100644
--- a/hooks/charmhelpers/contrib/openstack/ip.py
+++ b/hooks/charmhelpers/contrib/openstack/ip.py
@@ -14,16 +14,19 @@
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see .
+
from charmhelpers.core.hookenv import (
config,
unit_get,
service_name,
+ network_get_primary_address,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
get_ipv6_addr,
+ resolve_network_cidr,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
@@ -33,16 +36,19 @@ ADMIN = 'admin'
ADDRESS_MAP = {
PUBLIC: {
+ 'binding': 'public',
'config': 'os-public-network',
'fallback': 'public-address',
'override': 'os-public-hostname',
},
INTERNAL: {
+ 'binding': 'internal',
'config': 'os-internal-network',
'fallback': 'private-address',
'override': 'os-internal-hostname',
},
ADMIN: {
+ 'binding': 'admin',
'config': 'os-admin-network',
'fallback': 'private-address',
'override': 'os-admin-hostname',
@@ -110,7 +116,7 @@ def resolve_address(endpoint_type=PUBLIC):
correct network. If clustered with no nets defined, return primary vip.
If not clustered, return unit address ensuring address is on configured net
- split if one is configured.
+ split if one is configured, or a Juju 2.0 extra-binding has been used.
:param endpoint_type: Network endpoing type
"""
@@ -125,23 +131,45 @@ def resolve_address(endpoint_type=PUBLIC):
net_type = ADDRESS_MAP[endpoint_type]['config']
net_addr = config(net_type)
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
+ binding = ADDRESS_MAP[endpoint_type]['binding']
clustered = is_clustered()
- if clustered:
- if not net_addr:
- # If no net-splits defined, we expect a single vip
- resolved_address = vips[0]
- else:
+
+ if clustered and vips:
+ if net_addr:
for vip in vips:
if is_address_in_network(net_addr, vip):
resolved_address = vip
break
+ else:
+ # NOTE: endeavour to check vips against network space
+ # bindings
+ try:
+ bound_cidr = resolve_network_cidr(
+ network_get_primary_address(binding)
+ )
+ for vip in vips:
+ if is_address_in_network(bound_cidr, vip):
+ resolved_address = vip
+ break
+ except NotImplementedError:
+ # If no net-splits configured and no support for extra
+ # bindings/network spaces so we expect a single vip
+ resolved_address = vips[0]
else:
if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
else:
fallback_addr = unit_get(net_fallback)
- resolved_address = get_address_in_network(net_addr, fallback_addr)
+ if net_addr:
+ resolved_address = get_address_in_network(net_addr, fallback_addr)
+ else:
+ # NOTE: only try to use extra bindings if legacy network
+ # configuration is not in use
+ try:
+ resolved_address = network_get_primary_address(binding)
+ except NotImplementedError:
+ resolved_address = fallback_addr
if resolved_address is None:
raise ValueError("Unable to resolve a suitable IP address based on "
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
index 0b6da25..5dcebe7 100644
--- a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
+++ b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
@@ -1,20 +1,12 @@
{% if auth_host -%}
-{% if api_version == '3' -%}
[keystone_authtoken]
-auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
+auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
+auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
+auth_plugin = password
+project_domain_id = default
+user_domain_id = default
project_name = {{ admin_tenant_name }}
username = {{ admin_user }}
password = {{ admin_password }}
-project_domain_name = default
-user_domain_name = default
-auth_plugin = password
-{% else -%}
-[keystone_authtoken]
-identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }}
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
-admin_tenant_name = {{ admin_tenant_name }}
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
signing_dir = {{ signing_dir }}
{% endif -%}
-{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy
new file mode 100644
index 0000000..9356b2b
--- /dev/null
+++ b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy
@@ -0,0 +1,10 @@
+{% if auth_host -%}
+[keystone_authtoken]
+# Juno specific config (Bug #1557223)
+auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
+identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
+admin_tenant_name = {{ admin_tenant_name }}
+admin_user = {{ admin_user }}
+admin_password = {{ admin_password }}
+signing_dir = {{ signing_dir }}
+{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py
index 80dd2e0..61d5879 100644
--- a/hooks/charmhelpers/contrib/openstack/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/utils.py
@@ -24,6 +24,7 @@ import os
import sys
import re
import itertools
+import functools
import six
import tempfile
@@ -69,7 +70,15 @@ from charmhelpers.contrib.python.packages import (
pip_install,
)
-from charmhelpers.core.host import lsb_release, mounts, umount, service_running
+from charmhelpers.core.host import (
+ lsb_release,
+ mounts,
+ umount,
+ service_running,
+ service_pause,
+ service_resume,
+ restart_on_change_helper,
+)
from charmhelpers.fetch import apt_install, apt_cache, install_remote
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
@@ -128,7 +137,7 @@ SWIFT_CODENAMES = OrderedDict([
('liberty',
['2.3.0', '2.4.0', '2.5.0']),
('mitaka',
- ['2.5.0']),
+ ['2.5.0', '2.6.0', '2.7.0']),
])
# >= Liberty version->codename mapping
@@ -147,6 +156,7 @@ PACKAGE_CODENAMES = {
]),
'keystone': OrderedDict([
('8.0', 'liberty'),
+ ('8.1', 'liberty'),
('9.0', 'mitaka'),
]),
'horizon-common': OrderedDict([
@@ -763,7 +773,8 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
os.mkdir(parent_dir)
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
- repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth)
+ repo_dir = install_remote(
+ repo, dest=parent_dir, branch=branch, depth=depth)
venv = os.path.join(parent_dir, 'venv')
@@ -862,66 +873,155 @@ def os_workload_status(configs, required_interfaces, charm_func=None):
return wrap
-def set_os_workload_status(configs, required_interfaces, charm_func=None, services=None, ports=None):
- """
- Set workload status based on complete contexts.
- status-set missing or incomplete contexts
- and juju-log details of missing required data.
- charm_func is a charm specific function to run checking
- for charm specific requirements such as a VIP setting.
+def set_os_workload_status(configs, required_interfaces, charm_func=None,
+ services=None, ports=None):
+ """Set the state of the workload status for the charm.
- This function also checks for whether the services defined are ACTUALLY
- running and that the ports they advertise are open and being listened to.
+ This calls _determine_os_workload_status() to get the new state, message
+ and sets the status using status_set()
- @param services - OPTIONAL: a [{'service': , 'ports': []]
- The ports are optional.
- If services is a [] then ports are ignored.
- @param ports - OPTIONAL: an [] representing ports that shoudl be
- open.
- @returns None
+ @param configs: a templating.OSConfigRenderer() object
+ @param required_interfaces: {generic: [specific, specific2, ...]}
+ @param charm_func: a callable function that returns state, message. The
+ signature is charm_func(configs) -> (state, message)
+ @param services: list of strings OR dictionary specifying services/ports
+ @param ports: OPTIONAL list of port numbers.
+ @returns state, message: the new workload status, user message
"""
- incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
- state = 'active'
- missing_relations = []
- incomplete_relations = []
+ state, message = _determine_os_workload_status(
+ configs, required_interfaces, charm_func, services, ports)
+ status_set(state, message)
+
+
+def _determine_os_workload_status(
+ configs, required_interfaces, charm_func=None,
+ services=None, ports=None):
+ """Determine the state of the workload status for the charm.
+
+ This function returns the new workload status for the charm based
+ on the state of the interfaces, the paused state and whether the
+ services are actually running and any specified ports are open.
+
+ This checks:
+
+ 1. if the unit should be paused, that it is actually paused. If so the
+ state is 'maintenance' + message, else 'broken'.
+ 2. that the interfaces/relations are complete. If they are not then
+ it sets the state to either 'broken' or 'waiting' and an appropriate
+ message.
+ 3. If all the relation data is set, then it checks that the actual
+ services really are running. If not it sets the state to 'broken'.
+
+ If everything is okay then the state returns 'active'.
+
+ @param configs: a templating.OSConfigRenderer() object
+ @param required_interfaces: {generic: [specific, specific2, ...]}
+ @param charm_func: a callable function that returns state, message. The
+ signature is charm_func(configs) -> (state, message)
+ @param services: list of strings OR dictionary specifying services/ports
+ @param ports: OPTIONAL list of port numbers.
+ @returns state, message: the new workload status, user message
+ """
+ state, message = _ows_check_if_paused(services, ports)
+
+ if state is None:
+ state, message = _ows_check_generic_interfaces(
+ configs, required_interfaces)
+
+ if state != 'maintenance' and charm_func:
+ # _ows_check_charm_func() may modify the state, message
+ state, message = _ows_check_charm_func(
+ state, message, lambda: charm_func(configs))
+
+ if state is None:
+ state, message = _ows_check_services_running(services, ports)
+
+ if state is None:
+ state = 'active'
+ message = "Unit is ready"
+ juju_log(message, 'INFO')
+
+ return state, message
+
+
+def _ows_check_if_paused(services=None, ports=None):
+ """Check if the unit is supposed to be paused, and if so check that the
+ services/ports (if passed) are actually stopped/not being listened to.
+
+ if the unit isn't supposed to be paused, just return None, None
+
+ @param services: OPTIONAL services spec or list of service names.
+ @param ports: OPTIONAL list of port numbers.
+ @returns state, message or None, None
+ """
+ if is_unit_paused_set():
+ state, message = check_actually_paused(services=services,
+ ports=ports)
+ if state is None:
+ # we're paused okay, so set maintenance and return
+ state = "maintenance"
+ message = "Paused. Use 'resume' action to resume normal service."
+ return state, message
+ return None, None
+
+
+def _ows_check_generic_interfaces(configs, required_interfaces):
+ """Check the complete contexts to determine the workload status.
+
+ - Checks for missing or incomplete contexts
+ - juju log details of missing required data.
+ - determines the correct workload status
+ - creates an appropriate message for status_set(...)
+
+ if there are no problems then the function returns None, None
+
+ @param configs: a templating.OSConfigRenderer() object
+ @params required_interfaces: {generic_interface: [specific_interface], }
+ @returns state, message or None, None
+ """
+ incomplete_rel_data = incomplete_relation_data(configs,
+ required_interfaces)
+ state = None
message = None
- charm_state = None
- charm_message = None
+ missing_relations = set()
+ incomplete_relations = set()
- for generic_interface in incomplete_rel_data.keys():
+ for generic_interface, relations_states in incomplete_rel_data.items():
related_interface = None
missing_data = {}
# Related or not?
- for interface in incomplete_rel_data[generic_interface]:
- if incomplete_rel_data[generic_interface][interface].get('related'):
+ for interface, relation_state in relations_states.items():
+ if relation_state.get('related'):
related_interface = interface
- missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
- # No relation ID for the generic_interface
+ missing_data = relation_state.get('missing_data')
+ break
+ # No relation ID for the generic_interface?
if not related_interface:
juju_log("{} relation is missing and must be related for "
"functionality. ".format(generic_interface), 'WARN')
state = 'blocked'
- if generic_interface not in missing_relations:
- missing_relations.append(generic_interface)
+ missing_relations.add(generic_interface)
else:
- # Relation ID exists but no related unit
+ # Relation ID eists but no related unit
if not missing_data:
- # Edge case relation ID exists but departing
- if ('departed' in hook_name() or 'broken' in hook_name()) \
- and related_interface in hook_name():
+ # Edge case - relation ID exists but departings
+ _hook_name = hook_name()
+ if (('departed' in _hook_name or 'broken' in _hook_name) and
+ related_interface in _hook_name):
state = 'blocked'
- if generic_interface not in missing_relations:
- missing_relations.append(generic_interface)
+ missing_relations.add(generic_interface)
juju_log("{} relation's interface, {}, "
"relationship is departed or broken "
"and is required for functionality."
- "".format(generic_interface, related_interface), "WARN")
+ "".format(generic_interface, related_interface),
+ "WARN")
# Normal case relation ID exists but no related unit
# (joining)
else:
- juju_log("{} relations's interface, {}, is related but has "
- "no units in the relation."
- "".format(generic_interface, related_interface), "INFO")
+ juju_log("{} relations's interface, {}, is related but has"
+ " no units in the relation."
+ "".format(generic_interface, related_interface),
+ "INFO")
# Related unit exists and data missing on the relation
else:
juju_log("{} relation's interface, {}, is related awaiting "
@@ -930,9 +1030,8 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None, servic
", ".join(missing_data)), "INFO")
if state != 'blocked':
state = 'waiting'
- if generic_interface not in incomplete_relations \
- and generic_interface not in missing_relations:
- incomplete_relations.append(generic_interface)
+ if generic_interface not in missing_relations:
+ incomplete_relations.add(generic_interface)
if missing_relations:
message = "Missing relations: {}".format(", ".join(missing_relations))
@@ -945,9 +1044,22 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None, servic
"".format(", ".join(incomplete_relations))
state = 'waiting'
- # Run charm specific checks
- if charm_func:
- charm_state, charm_message = charm_func(configs)
+ return state, message
+
+
+def _ows_check_charm_func(state, message, charm_func_with_configs):
+ """Run a custom check function for the charm to see if it wants to
+ change the state. This is only run if not in 'maintenance' and
+ tests to see if the new state is more important that the previous
+ one determined by the interfaces/relations check.
+
+ @param state: the previously determined state so far.
+ @param message: the user orientated message so far.
+ @param charm_func: a callable function that returns state, message
+ @returns state, message strings.
+ """
+ if charm_func_with_configs:
+ charm_state, charm_message = charm_func_with_configs()
if charm_state != 'active' and charm_state != 'unknown':
state = workload_state_compare(state, charm_state)
if message:
@@ -956,72 +1068,151 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None, servic
message = "{}, {}".format(message, charm_message)
else:
message = charm_message
+ return state, message
- # If the charm thinks the unit is active, check that the actual services
- # really are active.
- if services is not None and state == 'active':
- # if we're passed the dict() then just grab the values as a list.
- if isinstance(services, dict):
- services = services.values()
- # either extract the list of services from the dictionary, or if
- # it is a simple string, use that. i.e. works with mixed lists.
- _s = []
- for s in services:
- if isinstance(s, dict) and 'service' in s:
- _s.append(s['service'])
- if isinstance(s, str):
- _s.append(s)
- services_running = [service_running(s) for s in _s]
- if not all(services_running):
- not_running = [s for s, running in zip(_s, services_running)
- if not running]
- message = ("Services not running that should be: {}"
- .format(", ".join(not_running)))
+
+def _ows_check_services_running(services, ports):
+ """Check that the services that should be running are actually running
+ and that any ports specified are being listened to.
+
+ @param services: list of strings OR dictionary specifying services/ports
+ @param ports: list of ports
+ @returns state, message: strings or None, None
+ """
+ messages = []
+ state = None
+ if services is not None:
+ services = _extract_services_list_helper(services)
+ services_running, running = _check_running_services(services)
+ if not all(running):
+ messages.append(
+ "Services not running that should be: {}"
+ .format(", ".join(_filter_tuples(services_running, False))))
state = 'blocked'
# also verify that the ports that should be open are open
# NB, that ServiceManager objects only OPTIONALLY have ports
- port_map = OrderedDict([(s['service'], s['ports'])
- for s in services if 'ports' in s])
- if state == 'active' and port_map:
- all_ports = list(itertools.chain(*port_map.values()))
- ports_open = [port_has_listener('0.0.0.0', p)
- for p in all_ports]
- if not all(ports_open):
- not_opened = [p for p, opened in zip(all_ports, ports_open)
- if not opened]
- map_not_open = OrderedDict()
- for service, ports in port_map.items():
- closed_ports = set(ports).intersection(not_opened)
- if closed_ports:
- map_not_open[service] = closed_ports
- # find which service has missing ports. They are in service
- # order which makes it a bit easier.
- message = (
- "Services with ports not open that should be: {}"
- .format(
- ", ".join([
- "{}: [{}]".format(
- service,
- ", ".join([str(v) for v in ports]))
- for service, ports in map_not_open.items()])))
- state = 'blocked'
-
- if ports is not None and state == 'active':
- # and we can also check ports which we don't know the service for
- ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
+ map_not_open, ports_open = (
+ _check_listening_on_services_ports(services))
if not all(ports_open):
- message = (
+ # find which service has missing ports. They are in service
+ # order which makes it a bit easier.
+ message_parts = {service: ", ".join([str(v) for v in open_ports])
+ for service, open_ports in map_not_open.items()}
+ message = ", ".join(
+ ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
+ messages.append(
+ "Services with ports not open that should be: {}"
+ .format(message))
+ state = 'blocked'
+
+ if ports is not None:
+ # and we can also check ports which we don't know the service for
+ ports_open, ports_open_bools = _check_listening_on_ports_list(ports)
+ if not all(ports_open_bools):
+ messages.append(
"Ports which should be open, but are not: {}"
- .format(", ".join([str(p) for p, v in zip(ports, ports_open)
+ .format(", ".join([str(p) for p, v in ports_open
if not v])))
state = 'blocked'
- # Set to active if all requirements have been met
- if state == 'active':
- message = "Unit is ready"
- juju_log(message, "INFO")
+ if state is not None:
+ message = "; ".join(messages)
+ return state, message
- status_set(state, message)
+ return None, None
+
+
+def _extract_services_list_helper(services):
+ """Extract a OrderedDict of {service: [ports]} of the supplied services
+ for use by the other functions.
+
+ The services object can either be:
+ - None : no services were passed (an empty dict is returned)
+ - a list of strings
+ - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
+ - An array of [{'service': service_name, ...}, ...]
+
+ @param services: see above
+ @returns OrderedDict(service: [ports], ...)
+ """
+ if services is None:
+ return {}
+ if isinstance(services, dict):
+ services = services.values()
+ # either extract the list of services from the dictionary, or if
+ # it is a simple string, use that. i.e. works with mixed lists.
+ _s = OrderedDict()
+ for s in services:
+ if isinstance(s, dict) and 'service' in s:
+ _s[s['service']] = s.get('ports', [])
+ if isinstance(s, str):
+ _s[s] = []
+ return _s
+
+
+def _check_running_services(services):
+ """Check that the services dict provided is actually running and provide
+ a list of (service, boolean) tuples for each service.
+
+ Returns both a zipped list of (service, boolean) and a list of booleans
+ in the same order as the services.
+
+ @param services: OrderedDict of strings: [ports], one for each service to
+ check.
+ @returns [(service, boolean), ...], : results for checks
+ [boolean] : just the result of the service checks
+ """
+ services_running = [service_running(s) for s in services]
+ return list(zip(services, services_running)), services_running
+
+
+def _check_listening_on_services_ports(services, test=False):
+ """Check that the unit is actually listening (has the port open) on the
+ ports that the service specifies are open. If test is True then the
+ function returns the services with ports that are open rather than
+ closed.
+
+ Returns an OrderedDict of service: ports and a list of booleans
+
+ @param services: OrderedDict(service: [port, ...], ...)
+ @param test: default=False, if False, test for closed, otherwise open.
+ @returns OrderedDict(service: [port-not-open, ...]...), [boolean]
+ """
+ test = not(not(test)) # ensure test is True or False
+ all_ports = list(itertools.chain(*services.values()))
+ ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
+ map_ports = OrderedDict()
+ matched_ports = [p for p, opened in zip(all_ports, ports_states)
+ if opened == test] # essentially opened xor test
+ for service, ports in services.items():
+ set_ports = set(ports).intersection(matched_ports)
+ if set_ports:
+ map_ports[service] = set_ports
+ return map_ports, ports_states
+
+
+def _check_listening_on_ports_list(ports):
+ """Check that the ports list given are being listened to
+
+ Returns a list of ports being listened to and a list of the
+ booleans.
+
+ @param ports: LIST or port numbers.
+ @returns [(port_num, boolean), ...], [boolean]
+ """
+ ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
+ return zip(ports, ports_open), ports_open
+
+
+def _filter_tuples(services_states, state):
+ """Return a simple list from a list of tuples according to the condition
+
+ @param services_states: LIST of (string, boolean): service and running
+ state.
+ @param state: Boolean to match the tuple against.
+ @returns [LIST of strings] that matched the tuple RHS.
+ """
+ return [s for s, b in services_states if b == state]
def workload_state_compare(current_workload_state, workload_state):
@@ -1046,8 +1237,7 @@ def workload_state_compare(current_workload_state, workload_state):
def incomplete_relation_data(configs, required_interfaces):
- """
- Check complete contexts against required_interfaces
+ """Check complete contexts against required_interfaces
Return dictionary of incomplete relation data.
configs is an OSConfigRenderer object with configs registered
@@ -1072,19 +1262,13 @@ def incomplete_relation_data(configs, required_interfaces):
'shared-db': {'related': True}}}
"""
complete_ctxts = configs.complete_contexts()
- incomplete_relations = []
- for svc_type in required_interfaces.keys():
- # Avoid duplicates
- found_ctxt = False
- for interface in required_interfaces[svc_type]:
- if interface in complete_ctxts:
- found_ctxt = True
- if not found_ctxt:
- incomplete_relations.append(svc_type)
- incomplete_context_data = {}
- for i in incomplete_relations:
- incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
- return incomplete_context_data
+ incomplete_relations = [
+ svc_type
+ for svc_type, interfaces in required_interfaces.items()
+ if not set(interfaces).intersection(complete_ctxts)]
+ return {
+ i: configs.get_incomplete_context_data(required_interfaces[i])
+ for i in incomplete_relations}
def do_action_openstack_upgrade(package, upgrade_callback, configs):
@@ -1145,3 +1329,247 @@ def remote_restart(rel_name, remote_service=None):
relation_set(relation_id=rid,
relation_settings=trigger,
)
+
+
+def check_actually_paused(services=None, ports=None):
+ """Check that services listed in the services object and and ports
+ are actually closed (not listened to), to verify that the unit is
+ properly paused.
+
+ @param services: See _extract_services_list_helper
+ @returns status, : string for status (None if okay)
+ message : string for problem for status_set
+ """
+ state = None
+ message = None
+ messages = []
+ if services is not None:
+ services = _extract_services_list_helper(services)
+ services_running, services_states = _check_running_services(services)
+ if any(services_states):
+ # there shouldn't be any running so this is a problem
+ messages.append("these services running: {}"
+ .format(", ".join(
+ _filter_tuples(services_running, True))))
+ state = "blocked"
+ ports_open, ports_open_bools = (
+ _check_listening_on_services_ports(services, True))
+ if any(ports_open_bools):
+ message_parts = {service: ", ".join([str(v) for v in open_ports])
+ for service, open_ports in ports_open.items()}
+ message = ", ".join(
+ ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
+ messages.append(
+ "these service:ports are open: {}".format(message))
+ state = 'blocked'
+ if ports is not None:
+ ports_open, bools = _check_listening_on_ports_list(ports)
+ if any(bools):
+ messages.append(
+ "these ports which should be closed, but are open: {}"
+ .format(", ".join([str(p) for p, v in ports_open if v])))
+ state = 'blocked'
+ if messages:
+ message = ("Services should be paused but {}"
+ .format(", ".join(messages)))
+ return state, message
+
+
+def set_unit_paused():
+ """Set the unit to a paused state in the local kv() store.
+ This does NOT actually pause the unit
+ """
+ with unitdata.HookData()() as t:
+ kv = t[0]
+ kv.set('unit-paused', True)
+
+
+def clear_unit_paused():
+ """Clear the unit from a paused state in the local kv() store
+ This does NOT actually restart any services - it only clears the
+ local state.
+ """
+ with unitdata.HookData()() as t:
+ kv = t[0]
+ kv.set('unit-paused', False)
+
+
+def is_unit_paused_set():
+ """Return the state of the kv().get('unit-paused').
+ This does NOT verify that the unit really is paused.
+
+ To help with units that don't have HookData() (testing)
+ if it excepts, return False
+ """
+ try:
+ with unitdata.HookData()() as t:
+ kv = t[0]
+ # transform something truth-y into a Boolean.
+ return not(not(kv.get('unit-paused')))
+ except:
+ return False
+
+
+def pause_unit(assess_status_func, services=None, ports=None,
+ charm_func=None):
+ """Pause a unit by stopping the services and setting 'unit-paused'
+ in the local kv() store.
+
+ Also checks that the services have stopped and ports are no longer
+ being listened to.
+
+ An optional charm_func() can be called that can either raise an
+ Exception or return non None, None to indicate that the unit
+ didn't pause cleanly.
+
+ The signature for charm_func is:
+ charm_func() -> message: string
+
+ charm_func() is executed after any services are stopped, if supplied.
+
+ The services object can either be:
+ - None : no services were passed (an empty dict is returned)
+ - a list of strings
+ - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
+ - An array of [{'service': service_name, ...}, ...]
+
+ @param assess_status_func: (f() -> message: string | None) or None
+ @param services: OPTIONAL see above
+ @param ports: OPTIONAL list of port
+ @param charm_func: function to run for custom charm pausing.
+ @returns None
+ @raises Exception(message) on an error for action_fail().
+ """
+ services = _extract_services_list_helper(services)
+ messages = []
+ if services:
+ for service in services.keys():
+ stopped = service_pause(service)
+ if not stopped:
+ messages.append("{} didn't stop cleanly.".format(service))
+ if charm_func:
+ try:
+ message = charm_func()
+ if message:
+ messages.append(message)
+ except Exception as e:
+ message.append(str(e))
+ set_unit_paused()
+ if assess_status_func:
+ message = assess_status_func()
+ if message:
+ messages.append(message)
+ if messages:
+ raise Exception("Couldn't pause: {}".format("; ".join(messages)))
+
+
+def resume_unit(assess_status_func, services=None, ports=None,
+ charm_func=None):
+ """Resume a unit by starting the services and clearning 'unit-paused'
+ in the local kv() store.
+
+ Also checks that the services have started and ports are being listened to.
+
+ An optional charm_func() can be called that can either raise an
+ Exception or return non None to indicate that the unit
+ didn't resume cleanly.
+
+ The signature for charm_func is:
+ charm_func() -> message: string
+
+ charm_func() is executed after any services are started, if supplied.
+
+ The services object can either be:
+ - None : no services were passed (an empty dict is returned)
+ - a list of strings
+ - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
+ - An array of [{'service': service_name, ...}, ...]
+
+ @param assess_status_func: (f() -> message: string | None) or None
+ @param services: OPTIONAL see above
+ @param ports: OPTIONAL list of port
+ @param charm_func: function to run for custom charm resuming.
+ @returns None
+ @raises Exception(message) on an error for action_fail().
+ """
+ services = _extract_services_list_helper(services)
+ messages = []
+ if services:
+ for service in services.keys():
+ started = service_resume(service)
+ if not started:
+ messages.append("{} didn't start cleanly.".format(service))
+ if charm_func:
+ try:
+ message = charm_func()
+ if message:
+ messages.append(message)
+ except Exception as e:
+ message.append(str(e))
+ clear_unit_paused()
+ if assess_status_func:
+ message = assess_status_func()
+ if message:
+ messages.append(message)
+ if messages:
+ raise Exception("Couldn't resume: {}".format("; ".join(messages)))
+
+
+def make_assess_status_func(*args, **kwargs):
+ """Creates an assess_status_func() suitable for handing to pause_unit()
+ and resume_unit().
+
+ This uses the _determine_os_workload_status(...) function to determine
+ what the workload_status should be for the unit. If the unit is
+ not in maintenance or active states, then the message is returned to
+ the caller. This is so an action that doesn't result in either a
+ complete pause or complete resume can signal failure with an action_fail()
+ """
+ def _assess_status_func():
+ state, message = _determine_os_workload_status(*args, **kwargs)
+ status_set(state, message)
+ if state not in ['maintenance', 'active']:
+ return message
+ return None
+
+ return _assess_status_func
+
+
+def pausable_restart_on_change(restart_map, stopstart=False,
+ restart_functions=None):
+ """A restart_on_change decorator that checks to see if the unit is
+ paused. If it is paused then the decorated function doesn't fire.
+
+ This is provided as a helper, as the @restart_on_change(...) decorator
+ is in core.host, yet the openstack specific helpers are in this file
+ (contrib.openstack.utils). Thus, this needs to be an optional feature
+ for openstack charms (or charms that wish to use the openstack
+ pause/resume type features).
+
+ It is used as follows:
+
+ from contrib.openstack.utils import (
+ pausable_restart_on_change as restart_on_change)
+
+ @restart_on_change(restart_map, stopstart=)
+ def some_hook(...):
+ pass
+
+ see core.utils.restart_on_change() for more details.
+
+ @param f: the function to decorate
+ @param restart_map: the restart map {conf_file: [services]}
+ @param stopstart: DEFAULT false; whether to stop, start or just restart
+ @returns decorator to use a restart_on_change with pausability
+ """
+ def wrap(f):
+ @functools.wraps(f)
+ def wrapped_f(*args, **kwargs):
+ if is_unit_paused_set():
+ return f(*args, **kwargs)
+ # otherwise, normal restart_on_change functionality
+ return restart_on_change_helper(
+ (lambda: f(*args, **kwargs)), restart_map, stopstart,
+ restart_functions)
+ return wrapped_f
+ return wrap
diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py
index fb1bee3..1b4b1de 100644
--- a/hooks/charmhelpers/contrib/storage/linux/ceph.py
+++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py
@@ -24,6 +24,8 @@
# Adam Gandelman
#
import bisect
+import errno
+import hashlib
import six
import os
@@ -163,7 +165,7 @@ class Pool(object):
:return: None
"""
# read-only is easy, writeback is much harder
- mode = get_cache_mode(cache_pool)
+ mode = get_cache_mode(self.service, cache_pool)
if mode == 'readonly':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
@@ -171,7 +173,7 @@ class Pool(object):
elif mode == 'writeback':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'])
# Flush the cache and wait for it to return
- check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
+ check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
@@ -259,6 +261,134 @@ class ErasurePool(Pool):
Returns json formatted output"""
+def get_mon_map(service):
+ """
+ Returns the current monitor map.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :return: json string. :raise: ValueError if the monmap fails to parse.
+ Also raises CalledProcessError if our ceph command fails
+ """
+ try:
+ mon_status = check_output(
+ ['ceph', '--id', service,
+ 'mon_status', '--format=json'])
+ try:
+ return json.loads(mon_status)
+ except ValueError as v:
+ log("Unable to parse mon_status json: {}. Error: {}".format(
+ mon_status, v.message))
+ raise
+ except CalledProcessError as e:
+ log("mon_status command failed with message: {}".format(
+ e.message))
+ raise
+
+
+def hash_monitor_names(service):
+ """
+ Uses the get_mon_map() function to get information about the monitor
+ cluster.
+ Hash the name of each monitor. Return a sorted list of monitor hashes
+ in an ascending order.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :rtype : dict. json dict of monitor name, ip address and rank
+ example: {
+ 'name': 'ip-172-31-13-165',
+ 'rank': 0,
+ 'addr': '172.31.13.165:6789/0'}
+ """
+ try:
+ hash_list = []
+ monitor_list = get_mon_map(service=service)
+ if monitor_list['monmap']['mons']:
+ for mon in monitor_list['monmap']['mons']:
+ hash_list.append(
+ hashlib.sha224(mon['name'].encode('utf-8')).hexdigest())
+ return sorted(hash_list)
+ else:
+ return None
+ except (ValueError, CalledProcessError):
+ raise
+
+
+def monitor_key_delete(service, key):
+ """
+ Delete a key and value pair from the monitor cluster
+ :param service: six.string_types. The Ceph user name to run the command under
+ Deletes a key value pair on the monitor cluster.
+ :param key: six.string_types. The key to delete.
+ """
+ try:
+ check_output(
+ ['ceph', '--id', service,
+ 'config-key', 'del', str(key)])
+ except CalledProcessError as e:
+ log("Monitor config-key put failed with message: {}".format(
+ e.output))
+ raise
+
+
+def monitor_key_set(service, key, value):
+ """
+ Sets a key value pair on the monitor cluster.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param key: six.string_types. The key to set.
+ :param value: The value to set. This will be converted to a string
+ before setting
+ """
+ try:
+ check_output(
+ ['ceph', '--id', service,
+ 'config-key', 'put', str(key), str(value)])
+ except CalledProcessError as e:
+ log("Monitor config-key put failed with message: {}".format(
+ e.output))
+ raise
+
+
+def monitor_key_get(service, key):
+ """
+ Gets the value of an existing key in the monitor cluster.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param key: six.string_types. The key to search for.
+ :return: Returns the value of that key or None if not found.
+ """
+ try:
+ output = check_output(
+ ['ceph', '--id', service,
+ 'config-key', 'get', str(key)])
+ return output
+ except CalledProcessError as e:
+ log("Monitor config-key get failed with message: {}".format(
+ e.output))
+ return None
+
+
+def monitor_key_exists(service, key):
+ """
+ Searches for the existence of a key in the monitor cluster.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param key: six.string_types. The key to search for
+ :return: Returns True if the key exists, False if not and raises an
+ exception if an unknown error occurs. :raise: CalledProcessError if
+ an unknown error occurs
+ """
+ try:
+ check_call(
+ ['ceph', '--id', service,
+ 'config-key', 'exists', str(key)])
+ # I can return true here regardless because Ceph returns
+ # ENOENT if the key wasn't found
+ return True
+ except CalledProcessError as e:
+ if e.returncode == errno.ENOENT:
+ return False
+ else:
+ log("Unknown error from ceph config-get exists: {} {}".format(
+ e.returncode, e.output))
+ raise
+
+
def get_erasure_profile(service, name):
"""
:param service: six.string_types. The Ceph user name to run the command under
diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py
index 2dd70bc..0132129 100644
--- a/hooks/charmhelpers/core/hookenv.py
+++ b/hooks/charmhelpers/core/hookenv.py
@@ -912,6 +912,24 @@ def payload_status_set(klass, pid, status):
subprocess.check_call(cmd)
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def resource_get(name):
+ """used to fetch the resource path of the given name.
+
+ must match a name of defined resource in metadata.yaml
+
+ returns either a path or False if resource not available
+ """
+ if not name:
+ return False
+
+ cmd = ['resource-get', name]
+ try:
+ return subprocess.check_output(cmd).decode('UTF-8')
+ except subprocess.CalledProcessError:
+ return False
+
+
@cached
def juju_version():
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""
@@ -976,3 +994,16 @@ def _run_atexit():
for callback, args, kwargs in reversed(_atexit):
callback(*args, **kwargs)
del _atexit[:]
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def network_get_primary_address(binding):
+ '''
+ Retrieve the primary network address for a named binding
+
+ :param binding: string. The name of a relation of extra-binding
+ :return: string. The primary IP address for the named binding
+ :raise: NotImplementedError if run on Juju < 2.0
+ '''
+ cmd = ['network-get', '--primary-address', binding]
+ return subprocess.check_output(cmd).strip()
diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py
index a772090..bfea6a1 100644
--- a/hooks/charmhelpers/core/host.py
+++ b/hooks/charmhelpers/core/host.py
@@ -30,6 +30,8 @@ import random
import string
import subprocess
import hashlib
+import functools
+import itertools
from contextlib import contextmanager
from collections import OrderedDict
@@ -126,6 +128,13 @@ def service(action, service_name):
return subprocess.call(cmd) == 0
+def systemv_services_running():
+ output = subprocess.check_output(
+ ['service', '--status-all'],
+ stderr=subprocess.STDOUT).decode('UTF-8')
+ return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row]
+
+
def service_running(service_name):
"""Determine whether a system service is running"""
if init_is_systemd():
@@ -138,11 +147,15 @@ def service_running(service_name):
except subprocess.CalledProcessError:
return False
else:
+ # This works for upstart scripts where the 'service' command
+ # returns a consistent string to represent running 'start/running'
if ("start/running" in output or "is running" in output or
"up and running" in output):
return True
- else:
- return False
+ # Check System V scripts init script return codes
+ if service_name in systemv_services_running():
+ return True
+ return False
def service_available(service_name):
@@ -410,7 +423,7 @@ class ChecksumError(ValueError):
pass
-def restart_on_change(restart_map, stopstart=False):
+def restart_on_change(restart_map, stopstart=False, restart_functions=None):
"""Restart services based on configuration files changing
This function is used a decorator, for example::
@@ -428,27 +441,58 @@ def restart_on_change(restart_map, stopstart=False):
restarted if any file matching the pattern got changed, created
or removed. Standard wildcards are supported, see documentation
for the 'glob' module for more information.
+
+ @param restart_map: {path_file_name: [service_name, ...]
+ @param stopstart: DEFAULT false; whether to stop, start OR restart
+ @param restart_functions: nonstandard functions to use to restart services
+ {svc: func, ...}
+ @returns result from decorated function
"""
def wrap(f):
+ @functools.wraps(f)
def wrapped_f(*args, **kwargs):
- checksums = {path: path_hash(path) for path in restart_map}
- f(*args, **kwargs)
- restarts = []
- for path in restart_map:
- if path_hash(path) != checksums[path]:
- restarts += restart_map[path]
- services_list = list(OrderedDict.fromkeys(restarts))
- if not stopstart:
- for service_name in services_list:
- service('restart', service_name)
- else:
- for action in ['stop', 'start']:
- for service_name in services_list:
- service(action, service_name)
+ return restart_on_change_helper(
+ (lambda: f(*args, **kwargs)), restart_map, stopstart,
+ restart_functions)
return wrapped_f
return wrap
+def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
+ restart_functions=None):
+ """Helper function to perform the restart_on_change function.
+
+ This is provided for decorators to restart services if files described
+ in the restart_map have changed after an invocation of lambda_f().
+
+ @param lambda_f: function to call.
+ @param restart_map: {file: [service, ...]}
+ @param stopstart: whether to stop, start or restart a service
+ @param restart_functions: nonstandard functions to use to restart services
+ {svc: func, ...}
+ @returns result of lambda_f()
+ """
+ if restart_functions is None:
+ restart_functions = {}
+ checksums = {path: path_hash(path) for path in restart_map}
+ r = lambda_f()
+ # create a list of lists of the services to restart
+ restarts = [restart_map[path]
+ for path in restart_map
+ if path_hash(path) != checksums[path]]
+ # create a flat list of ordered services without duplicates from lists
+ services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
+ if services_list:
+ actions = ('stop', 'start') if stopstart else ('restart',)
+ for service_name in services_list:
+ if service_name in restart_functions:
+ restart_functions[service_name](service_name)
+ else:
+ for action in actions:
+ service(action, service_name)
+ return r
+
+
def lsb_release():
"""Return /etc/lsb-release in a dict"""
d = {}
diff --git a/requirements.txt b/requirements.txt
index 426002d..6a3271b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,7 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
+pbr>=1.8.0,<1.9.0
PyYAML>=3.1.0
simplejson>=2.2.0
netifaces>=0.10.4
diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py
index ec386fb..6ece7d2 100644
--- a/tests/basic_deployment.py
+++ b/tests/basic_deployment.py
@@ -210,6 +210,9 @@ class CinderCephBasicDeployment(OpenStackAmuletDeployment):
services[self.ceph1_sentry] = ceph_services
services[self.ceph2_sentry] = ceph_services
+ if self._get_openstack_release() >= self.trusty_liberty:
+ services[self.keystone_sentry] = ['apache2']
+
ret = u.validate_services_by_name(services)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
@@ -518,6 +521,8 @@ class CinderCephBasicDeployment(OpenStackAmuletDeployment):
auth_uri = 'http://' + rel_ks_ci['auth_host'] + \
':' + rel_ks_ci['service_port'] + '/'
+ auth_url = ('http://%s:%s/' %
+ (rel_ks_ci['auth_host'], rel_ks_ci['auth_port']))
expected = {
'DEFAULT': {
@@ -534,7 +539,8 @@ class CinderCephBasicDeployment(OpenStackAmuletDeployment):
'admin_user': rel_ks_ci['service_username'],
'admin_password': rel_ks_ci['service_password'],
'admin_tenant_name': rel_ks_ci['service_tenant'],
- 'auth_uri': auth_uri
+ 'auth_uri': auth_uri,
+ 'signing_dir': '/var/cache/cinder'
},
'cinder-ceph': {
'volume_backend_name': 'cinder-ceph',
@@ -550,6 +556,23 @@ class CinderCephBasicDeployment(OpenStackAmuletDeployment):
'rabbit_password': rel_mq_ci['password'],
'rabbit_host': rel_mq_ci['hostname'],
}
+ if self._get_openstack_release() >= self.trusty_liberty:
+ expected['keystone_authtoken'] = {
+ 'auth_uri': auth_uri.rstrip('/'),
+ 'auth_url': auth_url.rstrip('/'),
+ 'auth_plugin': 'password',
+ 'project_domain_id': 'default',
+ 'user_domain_id': 'default',
+ 'project_name': 'services',
+ 'username': rel_ks_ci['service_username'],
+ 'password': rel_ks_ci['service_password'],
+ 'signing_dir': '/var/cache/cinder'
+ }
+
+ if self._get_openstack_release() == self.trusty_kilo:
+ expected['keystone_authtoken']['auth_uri'] = auth_uri
+ expected['keystone_authtoken']['identity_uri'] = \
+ auth_url.strip('/')
if self._get_openstack_release() >= self.trusty_kilo:
# Kilo or later
diff --git a/tests/charmhelpers/contrib/amulet/utils.py b/tests/charmhelpers/contrib/amulet/utils.py
index 2591a9b..7e5c25a 100644
--- a/tests/charmhelpers/contrib/amulet/utils.py
+++ b/tests/charmhelpers/contrib/amulet/utils.py
@@ -601,7 +601,7 @@ class AmuletUtils(object):
return ('Process name count mismatch. expected, actual: {}, '
'{}'.format(len(expected), len(actual)))
- for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
+ for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
zip(e_proc_names.items(), a_proc_names.items()):
if e_proc_name != a_proc_name:
return ('Process name mismatch. expected, actual: {}, '
@@ -610,25 +610,31 @@ class AmuletUtils(object):
a_pids_length = len(a_pids)
fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
- e_pids_length, a_pids_length,
+ e_pids, a_pids_length,
a_pids))
- # If expected is not bool, ensure PID quantities match
- if not isinstance(e_pids_length, bool) and \
- a_pids_length != e_pids_length:
+ # If expected is a list, ensure at least one PID quantity match
+ if isinstance(e_pids, list) and \
+ a_pids_length not in e_pids:
+ return fail_msg
+ # If expected is not bool and not list,
+ # ensure PID quantities match
+ elif not isinstance(e_pids, bool) and \
+ not isinstance(e_pids, list) and \
+ a_pids_length != e_pids:
return fail_msg
# If expected is bool True, ensure 1 or more PIDs exist
- elif isinstance(e_pids_length, bool) and \
- e_pids_length is True and a_pids_length < 1:
+ elif isinstance(e_pids, bool) and \
+ e_pids is True and a_pids_length < 1:
return fail_msg
# If expected is bool False, ensure 0 PIDs exist
- elif isinstance(e_pids_length, bool) and \
- e_pids_length is False and a_pids_length != 0:
+ elif isinstance(e_pids, bool) and \
+ e_pids is False and a_pids_length != 0:
return fail_msg
else:
self.log.debug('PID check OK: {} {} {}: '
'{}'.format(e_sentry_name, e_proc_name,
- e_pids_length, a_pids))
+ e_pids, a_pids))
return None
def validate_list_of_identical_dicts(self, list_of_dicts):
@@ -782,15 +788,20 @@ class AmuletUtils(object):
# amulet juju action helpers:
def run_action(self, unit_sentry, action,
- _check_output=subprocess.check_output):
+ _check_output=subprocess.check_output,
+ params=None):
"""Run the named action on a given unit sentry.
+ params a dict of parameters to use
_check_output parameter is used for dependency injection.
@return action_id.
"""
unit_id = unit_sentry.info["unit_name"]
command = ["juju", "action", "do", "--format=json", unit_id, action]
+ if params is not None:
+ for key, value in params.iteritems():
+ command.append("{}={}".format(key, value))
self.log.info("Running command: %s\n" % " ".join(command))
output = _check_output(command, universal_newlines=True)
data = json.loads(output)
diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
index d2ede32..d21c9c7 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -126,7 +126,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
- 'cinder-backup']
+ 'cinder-backup', 'nexentaedge-data',
+ 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
+ 'cinder-nexentaedge', 'nexentaedge-mgmt']
if self.openstack:
for svc in services:
diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py
index 388b60e..ef3bdcc 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py
@@ -27,7 +27,11 @@ import cinderclient.v1.client as cinder_client
import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client
import keystoneclient.v2_0 as keystone_client
-import novaclient.v1_1.client as nova_client
+from keystoneclient.auth.identity import v3 as keystone_id_v3
+from keystoneclient import session as keystone_session
+from keystoneclient.v3 import client as keystone_client_v3
+
+import novaclient.client as nova_client
import pika
import swiftclient
@@ -38,6 +42,8 @@ from charmhelpers.contrib.amulet.utils import (
DEBUG = logging.DEBUG
ERROR = logging.ERROR
+NOVA_CLIENT_VERSION = "2"
+
class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
@@ -139,7 +145,7 @@ class OpenStackAmuletUtils(AmuletUtils):
return "role {} does not exist".format(e['name'])
return ret
- def validate_user_data(self, expected, actual):
+ def validate_user_data(self, expected, actual, api_version=None):
"""Validate user data.
Validate a list of actual user data vs a list of expected user
@@ -150,10 +156,15 @@ class OpenStackAmuletUtils(AmuletUtils):
for e in expected:
found = False
for act in actual:
- a = {'enabled': act.enabled, 'name': act.name,
- 'email': act.email, 'tenantId': act.tenantId,
- 'id': act.id}
- if e['name'] == a['name']:
+ if e['name'] == act.name:
+ a = {'enabled': act.enabled, 'name': act.name,
+ 'email': act.email, 'id': act.id}
+ if api_version == 3:
+ a['default_project_id'] = getattr(act,
+ 'default_project_id',
+ 'none')
+ else:
+ a['tenantId'] = act.tenantId
found = True
ret = self._validate_dict_data(e, a)
if ret:
@@ -188,15 +199,30 @@ class OpenStackAmuletUtils(AmuletUtils):
return cinder_client.Client(username, password, tenant, ept)
def authenticate_keystone_admin(self, keystone_sentry, user, password,
- tenant):
+ tenant=None, api_version=None,
+ keystone_ip=None):
"""Authenticates admin user with the keystone admin endpoint."""
self.log.debug('Authenticating keystone admin...')
unit = keystone_sentry
- service_ip = unit.relation('shared-db',
- 'mysql:shared-db')['private-address']
- ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
+ if not keystone_ip:
+ keystone_ip = unit.relation('shared-db',
+ 'mysql:shared-db')['private-address']
+ base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
+ if not api_version or api_version == 2:
+ ep = base_ep + "/v2.0"
+ return keystone_client.Client(username=user, password=password,
+ tenant_name=tenant, auth_url=ep)
+ else:
+ ep = base_ep + "/v3"
+ auth = keystone_id_v3.Password(
+ user_domain_name='admin_domain',
+ username=user,
+ password=password,
+ domain_name='admin_domain',
+ auth_url=ep,
+ )
+ sess = keystone_session.Session(auth=auth)
+ return keystone_client_v3.Client(session=sess)
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
@@ -225,7 +251,8 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Authenticating nova user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
- return nova_client.Client(username=user, api_key=password,
+ return nova_client.Client(NOVA_CLIENT_VERSION,
+ username=user, api_key=password,
project_id=tenant, auth_url=ep)
def authenticate_swift_user(self, keystone, user, password, tenant):