Update tox, requirements, add initial func tests framework
This commit is contained in:
parent
70c1c02ba4
commit
718676787a
21
.gitignore
vendored
21
.gitignore
vendored
@ -1,7 +1,16 @@
|
|||||||
*pyc
|
|
||||||
.bzr
|
|
||||||
bin
|
|
||||||
.coverage
|
.coverage
|
||||||
.venv
|
bin
|
||||||
.testrepository/
|
tags
|
||||||
.tox/
|
.tox
|
||||||
|
.testrepository
|
||||||
|
*.sw[nop]
|
||||||
|
*.pyc
|
||||||
|
.unit-state.db
|
||||||
|
tests/*.img
|
||||||
|
trusty
|
||||||
|
.idea
|
||||||
|
.stestr
|
||||||
|
.local
|
||||||
|
.pydevproject
|
||||||
|
func-results.json
|
||||||
|
__pycache__
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
|
||||||
<?eclipse-pydev version="1.0"?><pydev_project>
|
|
||||||
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
|
|
||||||
<path>/glance-simplestreams-sync</path>
|
|
||||||
</pydev_pathproperty>
|
|
||||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
|
|
||||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
|
|
||||||
</pydev_project>
|
|
21
Makefile
21
Makefile
@ -1,23 +1,24 @@
|
|||||||
#!/usr/bin/make
|
#!/usr/bin/make
|
||||||
PYTHON := /usr/bin/env python
|
PYTHON := /usr/bin/env python
|
||||||
CHARM_DIR := $(PWD)
|
|
||||||
HOOKS_DIR := $(PWD)/hooks
|
|
||||||
TEST_PREFIX := PYTHONPATH=$(HOOKS_DIR)
|
|
||||||
|
|
||||||
clean:
|
|
||||||
find . -name '*.pyc' -delete
|
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
@tox -e pep8
|
@tox -e pep8
|
||||||
|
|
||||||
unit_tests:
|
test:
|
||||||
|
@echo Starting unit tests...
|
||||||
@tox -e py27
|
@tox -e py27
|
||||||
|
|
||||||
|
functional_test:
|
||||||
|
@echo Starting functional tests...
|
||||||
|
@tox -e func27
|
||||||
|
|
||||||
bin/charm_helpers_sync.py:
|
bin/charm_helpers_sync.py:
|
||||||
@mkdir -p bin
|
@mkdir -p bin
|
||||||
@bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
|
@curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py
|
||||||
> bin/charm_helpers_sync.py
|
|
||||||
|
|
||||||
sync: bin/charm_helpers_sync.py
|
sync: bin/charm_helpers_sync.py
|
||||||
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
|
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
|
||||||
|
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
|
||||||
|
|
||||||
|
all: test lint
|
||||||
|
0
actions/.gitkeep
Normal file
0
actions/.gitkeep
Normal file
7
charm-helpers-tests.yaml
Normal file
7
charm-helpers-tests.yaml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
repo: https://github.com/juju/charm-helpers
|
||||||
|
destination: tests/charmhelpers
|
||||||
|
include:
|
||||||
|
- core
|
||||||
|
- contrib.amulet
|
||||||
|
- contrib.openstack.amulet
|
||||||
|
- osplatform
|
0
lib/.gitkeep
Normal file
0
lib/.gitkeep
Normal file
@ -10,4 +10,3 @@ Jinja2>=2.6 # BSD License (3 clause)
|
|||||||
six>=1.9.0
|
six>=1.9.0
|
||||||
dnspython>=1.12.0
|
dnspython>=1.12.0
|
||||||
psutil>=1.1.1,<2.0.0
|
psutil>=1.1.1,<2.0.0
|
||||||
python-neutronclient>=2.6.0
|
|
||||||
|
@ -11,13 +11,17 @@ requests==2.6.0
|
|||||||
# Liberty client lower constraints
|
# Liberty client lower constraints
|
||||||
amulet>=1.14.3,<2.0
|
amulet>=1.14.3,<2.0
|
||||||
bundletester>=0.6.1,<1.0
|
bundletester>=0.6.1,<1.0
|
||||||
python-ceilometerclient>=1.5.0,<2.0
|
python-ceilometerclient>=1.5.0
|
||||||
python-cinderclient>=1.4.0,<2.0
|
python-cinderclient>=1.4.0
|
||||||
python-glanceclient>=1.1.0,<2.0
|
python-glanceclient>=1.1.0
|
||||||
python-heatclient>=0.8.0,<1.0
|
python-heatclient>=0.8.0
|
||||||
python-novaclient>=2.30.1,<3.0
|
python-keystoneclient>=1.7.1
|
||||||
python-openstackclient>=1.7.0,<2.0
|
python-neutronclient>=3.1.0
|
||||||
python-swiftclient>=2.6.0,<3.0
|
python-novaclient>=2.30.1
|
||||||
|
python-openstackclient>=1.7.0
|
||||||
|
python-swiftclient>=2.6.0
|
||||||
pika>=0.10.0,<1.0
|
pika>=0.10.0,<1.0
|
||||||
distro-info
|
distro-info
|
||||||
# END: Amulet OpenStack Charm Helper Requirements
|
# END: Amulet OpenStack Charm Helper Requirements
|
||||||
|
# NOTE: workaround for 14.04 pip/tox
|
||||||
|
pytz
|
||||||
|
9
tests/README.md
Normal file
9
tests/README.md
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
# Overview
|
||||||
|
|
||||||
|
This directory provides Amulet tests to verify basic deployment functionality
|
||||||
|
from the perspective of this charm, its requirements and its features, as
|
||||||
|
exercised in a subset of the full OpenStack deployment test bundle topology.
|
||||||
|
|
||||||
|
For full details on functional testing of OpenStack charms please refer to
|
||||||
|
the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing)
|
||||||
|
section of the OpenStack Charm Guide.
|
681
tests/basic_deployment.py
Normal file
681
tests/basic_deployment.py
Normal file
@ -0,0 +1,681 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright 2016 Canonical Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Basic glance amulet functional tests.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
import time
|
||||||
|
|
||||||
|
from charmhelpers.contrib.openstack.amulet.deployment import (
|
||||||
|
OpenStackAmuletDeployment
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.openstack.amulet.utils import (
|
||||||
|
OpenStackAmuletUtils,
|
||||||
|
DEBUG,
|
||||||
|
# ERROR
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use DEBUG to turn on debug logging
|
||||||
|
u = OpenStackAmuletUtils(DEBUG)
|
||||||
|
|
||||||
|
|
||||||
|
class GlanceBasicDeployment(OpenStackAmuletDeployment):
|
||||||
|
"""Amulet tests on a basic file-backed glance deployment. Verify
|
||||||
|
relations, service status, endpoint service catalog, create and
|
||||||
|
delete new image."""
|
||||||
|
|
||||||
|
SERVICES = ('apache2', 'haproxy', 'glance-api', 'glance-registry')
|
||||||
|
|
||||||
|
def __init__(self, series=None, openstack=None, source=None,
|
||||||
|
stable=False):
|
||||||
|
"""Deploy the entire test environment."""
|
||||||
|
super(GlanceBasicDeployment, self).__init__(series, openstack,
|
||||||
|
source, stable)
|
||||||
|
self._add_services()
|
||||||
|
self._add_relations()
|
||||||
|
self._configure_services()
|
||||||
|
self._deploy()
|
||||||
|
|
||||||
|
u.log.info('Waiting on extended status checks...')
|
||||||
|
exclude_services = []
|
||||||
|
self._auto_wait_for_status(exclude_services=exclude_services)
|
||||||
|
|
||||||
|
self.d.sentry.wait()
|
||||||
|
self._initialize_tests()
|
||||||
|
|
||||||
|
def _assert_services(self, should_run):
|
||||||
|
u.get_unit_process_ids(
|
||||||
|
{self.glance_sentry: self.SERVICES},
|
||||||
|
expect_success=should_run)
|
||||||
|
|
||||||
|
def _add_services(self):
|
||||||
|
"""Add services
|
||||||
|
|
||||||
|
Add the services that we're testing, where glance is local,
|
||||||
|
and the rest of the service are from lp branches that are
|
||||||
|
compatible with the local charm (e.g. stable or next).
|
||||||
|
"""
|
||||||
|
this_service = {'name': 'glance-simplestreams-sync'}
|
||||||
|
other_services = [
|
||||||
|
{'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},
|
||||||
|
{'name': 'glance'},
|
||||||
|
{'name': 'rabbitmq-server'},
|
||||||
|
{'name': 'keystone'},
|
||||||
|
]
|
||||||
|
super(GlanceBasicDeployment, self)._add_services(this_service,
|
||||||
|
other_services)
|
||||||
|
|
||||||
|
def _add_relations(self):
|
||||||
|
"""Add relations for the services."""
|
||||||
|
relations = {'glance:identity-service': 'keystone:identity-service',
|
||||||
|
'glance:shared-db': 'percona-cluster:shared-db',
|
||||||
|
'keystone:shared-db': 'percona-cluster:shared-db',
|
||||||
|
'glance:amqp': 'rabbitmq-server:amqp'}
|
||||||
|
super(GlanceBasicDeployment, self)._add_relations(relations)
|
||||||
|
|
||||||
|
def _configure_services(self):
|
||||||
|
"""Configure all of the services."""
|
||||||
|
glance_config = {}
|
||||||
|
keystone_config = {
|
||||||
|
'admin-password': 'openstack',
|
||||||
|
'admin-token': 'ubuntutesting',
|
||||||
|
}
|
||||||
|
pxc_config = {
|
||||||
|
'dataset-size': '25%',
|
||||||
|
'max-connections': 1000,
|
||||||
|
'root-password': 'ChangeMe123',
|
||||||
|
'sst-password': 'ChangeMe123',
|
||||||
|
}
|
||||||
|
configs = {
|
||||||
|
'glance': glance_config,
|
||||||
|
'keystone': keystone_config,
|
||||||
|
'percona-cluster': pxc_config,
|
||||||
|
}
|
||||||
|
super(GlanceBasicDeployment, self)._configure_services(configs)
|
||||||
|
|
||||||
|
def _initialize_tests(self):
|
||||||
|
"""Perform final initialization before tests get run."""
|
||||||
|
# Access the sentries for inspecting service units
|
||||||
|
self.pxc_sentry = self.d.sentry['percona-cluster'][0]
|
||||||
|
self.glance_sentry = self.d.sentry['glance'][0]
|
||||||
|
self.keystone_sentry = self.d.sentry['keystone'][0]
|
||||||
|
self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]
|
||||||
|
u.log.debug('openstack release val: {}'.format(
|
||||||
|
self._get_openstack_release()))
|
||||||
|
u.log.debug('openstack release str: {}'.format(
|
||||||
|
self._get_openstack_release_string()))
|
||||||
|
|
||||||
|
# Authenticate admin with keystone
|
||||||
|
self.keystone_session, self.keystone = u.get_default_keystone_session(
|
||||||
|
self.keystone_sentry,
|
||||||
|
openstack_release=self._get_openstack_release())
|
||||||
|
|
||||||
|
# Authenticate admin with glance endpoint
|
||||||
|
self.glance = u.authenticate_glance_admin(self.keystone)
|
||||||
|
|
||||||
|
def test_100_services(self):
|
||||||
|
"""Verify that the expected services are running on the
|
||||||
|
corresponding service units."""
|
||||||
|
services = {
|
||||||
|
self.keystone_sentry: ['keystone'],
|
||||||
|
self.glance_sentry: ['glance-api', 'glance-registry'],
|
||||||
|
self.rabbitmq_sentry: ['rabbitmq-server']
|
||||||
|
}
|
||||||
|
if self._get_openstack_release() >= self.trusty_liberty:
|
||||||
|
services[self.keystone_sentry] = ['apache2']
|
||||||
|
ret = u.validate_services_by_name(services)
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||||
|
|
||||||
|
def test_102_service_catalog(self):
|
||||||
|
"""Verify that the service catalog endpoint data is valid."""
|
||||||
|
u.log.debug('Checking keystone service catalog...')
|
||||||
|
endpoint_check = {
|
||||||
|
'adminURL': u.valid_url,
|
||||||
|
'id': u.not_null,
|
||||||
|
'region': 'RegionOne',
|
||||||
|
'publicURL': u.valid_url,
|
||||||
|
'internalURL': u.valid_url
|
||||||
|
}
|
||||||
|
expected = {
|
||||||
|
'image': [endpoint_check],
|
||||||
|
'identity': [endpoint_check]
|
||||||
|
}
|
||||||
|
actual = self.keystone.service_catalog.get_endpoints()
|
||||||
|
|
||||||
|
ret = u.validate_svc_catalog_endpoint_data(
|
||||||
|
expected,
|
||||||
|
actual,
|
||||||
|
openstack_release=self._get_openstack_release())
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||||
|
|
||||||
|
def test_104_glance_endpoint(self):
|
||||||
|
"""Verify the glance endpoint data."""
|
||||||
|
u.log.debug('Checking glance api endpoint data...')
|
||||||
|
endpoints = self.keystone.endpoints.list()
|
||||||
|
admin_port = internal_port = public_port = '9292'
|
||||||
|
expected = {
|
||||||
|
'id': u.not_null,
|
||||||
|
'region': 'RegionOne',
|
||||||
|
'adminurl': u.valid_url,
|
||||||
|
'internalurl': u.valid_url,
|
||||||
|
'publicurl': u.valid_url,
|
||||||
|
'service_id': u.not_null
|
||||||
|
}
|
||||||
|
ret = u.validate_endpoint_data(
|
||||||
|
endpoints,
|
||||||
|
admin_port,
|
||||||
|
internal_port,
|
||||||
|
public_port,
|
||||||
|
expected,
|
||||||
|
openstack_release=self._get_openstack_release())
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL,
|
||||||
|
msg='glance endpoint: {}'.format(ret))
|
||||||
|
|
||||||
|
def test_106_keystone_endpoint(self):
|
||||||
|
"""Verify the keystone endpoint data."""
|
||||||
|
u.log.debug('Checking keystone api endpoint data...')
|
||||||
|
endpoints = self.keystone.endpoints.list()
|
||||||
|
admin_port = '35357'
|
||||||
|
internal_port = public_port = '5000'
|
||||||
|
expected = {
|
||||||
|
'id': u.not_null,
|
||||||
|
'region': 'RegionOne',
|
||||||
|
'adminurl': u.valid_url,
|
||||||
|
'internalurl': u.valid_url,
|
||||||
|
'publicurl': u.valid_url,
|
||||||
|
'service_id': u.not_null
|
||||||
|
}
|
||||||
|
ret = u.validate_endpoint_data(
|
||||||
|
endpoints,
|
||||||
|
admin_port,
|
||||||
|
internal_port,
|
||||||
|
public_port,
|
||||||
|
expected,
|
||||||
|
openstack_release=self._get_openstack_release())
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL,
|
||||||
|
msg='keystone endpoint: {}'.format(ret))
|
||||||
|
|
||||||
|
def test_110_users(self):
|
||||||
|
"""Verify expected users."""
|
||||||
|
u.log.debug('Checking keystone users...')
|
||||||
|
if self._get_openstack_release() >= self.xenial_queens:
|
||||||
|
expected = [
|
||||||
|
{'name': 'glance',
|
||||||
|
'enabled': True,
|
||||||
|
'default_project_id': u.not_null,
|
||||||
|
'id': u.not_null,
|
||||||
|
'email': 'juju@localhost'}
|
||||||
|
]
|
||||||
|
domain = self.keystone.domains.find(name='service_domain')
|
||||||
|
actual = self.keystone.users.list(domain=domain)
|
||||||
|
api_version = 3
|
||||||
|
else:
|
||||||
|
expected = [
|
||||||
|
{'name': 'glance',
|
||||||
|
'enabled': True,
|
||||||
|
'tenantId': u.not_null,
|
||||||
|
'id': u.not_null,
|
||||||
|
'email': 'juju@localhost'},
|
||||||
|
{'name': 'admin',
|
||||||
|
'enabled': True,
|
||||||
|
'tenantId': u.not_null,
|
||||||
|
'id': u.not_null,
|
||||||
|
'email': 'juju@localhost'}
|
||||||
|
]
|
||||||
|
actual = self.keystone.users.list()
|
||||||
|
api_version = 2
|
||||||
|
ret = u.validate_user_data(expected, actual, api_version)
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||||
|
|
||||||
|
def test_115_memcache(self):
|
||||||
|
u.validate_memcache(self.glance_sentry,
|
||||||
|
'/etc/glance/glance-api.conf',
|
||||||
|
self._get_openstack_release(),
|
||||||
|
earliest_release=self.trusty_mitaka)
|
||||||
|
u.validate_memcache(self.glance_sentry,
|
||||||
|
'/etc/glance/glance-registry.conf',
|
||||||
|
self._get_openstack_release(),
|
||||||
|
earliest_release=self.trusty_mitaka)
|
||||||
|
|
||||||
|
def test_200_mysql_glance_db_relation(self):
|
||||||
|
"""Verify the mysql:glance shared-db relation data"""
|
||||||
|
u.log.debug('Checking mysql to glance shared-db relation data...')
|
||||||
|
unit = self.pxc_sentry
|
||||||
|
relation = ['shared-db', 'glance:shared-db']
|
||||||
|
expected = {
|
||||||
|
'private-address': u.valid_ip,
|
||||||
|
'db_host': u.valid_ip
|
||||||
|
}
|
||||||
|
ret = u.validate_relation_data(unit, relation, expected)
|
||||||
|
if ret:
|
||||||
|
message = u.relation_error('mysql shared-db', ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
|
||||||
|
def test_201_glance_mysql_db_relation(self):
|
||||||
|
"""Verify the glance:mysql shared-db relation data"""
|
||||||
|
u.log.debug('Checking glance to mysql shared-db relation data...')
|
||||||
|
unit = self.glance_sentry
|
||||||
|
relation = ['shared-db', 'percona-cluster:shared-db']
|
||||||
|
expected = {
|
||||||
|
'private-address': u.valid_ip,
|
||||||
|
'hostname': u.valid_ip,
|
||||||
|
'username': 'glance',
|
||||||
|
'database': 'glance'
|
||||||
|
}
|
||||||
|
ret = u.validate_relation_data(unit, relation, expected)
|
||||||
|
if ret:
|
||||||
|
message = u.relation_error('glance shared-db', ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
|
||||||
|
def test_202_keystone_glance_id_relation(self):
|
||||||
|
"""Verify the keystone:glance identity-service relation data"""
|
||||||
|
u.log.debug('Checking keystone to glance id relation data...')
|
||||||
|
unit = self.keystone_sentry
|
||||||
|
relation = ['identity-service',
|
||||||
|
'glance:identity-service']
|
||||||
|
expected = {
|
||||||
|
'service_protocol': 'http',
|
||||||
|
'service_tenant': 'services',
|
||||||
|
'admin_token': 'ubuntutesting',
|
||||||
|
'service_password': u.not_null,
|
||||||
|
'service_port': '5000',
|
||||||
|
'auth_port': '35357',
|
||||||
|
'auth_protocol': 'http',
|
||||||
|
'private-address': u.valid_ip,
|
||||||
|
'auth_host': u.valid_ip,
|
||||||
|
'service_username': 'glance',
|
||||||
|
'service_tenant_id': u.not_null,
|
||||||
|
'service_host': u.valid_ip
|
||||||
|
}
|
||||||
|
ret = u.validate_relation_data(unit, relation, expected)
|
||||||
|
if ret:
|
||||||
|
message = u.relation_error('keystone identity-service', ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
|
||||||
|
def test_203_glance_keystone_id_relation(self):
|
||||||
|
"""Verify the glance:keystone identity-service relation data"""
|
||||||
|
u.log.debug('Checking glance to keystone relation data...')
|
||||||
|
unit = self.glance_sentry
|
||||||
|
relation = ['identity-service',
|
||||||
|
'keystone:identity-service']
|
||||||
|
expected = {
|
||||||
|
'service': 'glance',
|
||||||
|
'region': 'RegionOne',
|
||||||
|
'public_url': u.valid_url,
|
||||||
|
'internal_url': u.valid_url,
|
||||||
|
'admin_url': u.valid_url,
|
||||||
|
'private-address': u.valid_ip
|
||||||
|
}
|
||||||
|
ret = u.validate_relation_data(unit, relation, expected)
|
||||||
|
if ret:
|
||||||
|
message = u.relation_error('glance identity-service', ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
|
||||||
|
def test_204_rabbitmq_glance_amqp_relation(self):
|
||||||
|
"""Verify the rabbitmq-server:glance amqp relation data"""
|
||||||
|
u.log.debug('Checking rmq to glance amqp relation data...')
|
||||||
|
unit = self.rabbitmq_sentry
|
||||||
|
relation = ['amqp', 'glance:amqp']
|
||||||
|
expected = {
|
||||||
|
'private-address': u.valid_ip,
|
||||||
|
'password': u.not_null,
|
||||||
|
'hostname': u.valid_ip
|
||||||
|
}
|
||||||
|
ret = u.validate_relation_data(unit, relation, expected)
|
||||||
|
if ret:
|
||||||
|
message = u.relation_error('rabbitmq amqp', ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
|
||||||
|
def test_205_glance_rabbitmq_amqp_relation(self):
|
||||||
|
"""Verify the glance:rabbitmq-server amqp relation data"""
|
||||||
|
u.log.debug('Checking glance to rmq amqp relation data...')
|
||||||
|
unit = self.glance_sentry
|
||||||
|
relation = ['amqp', 'rabbitmq-server:amqp']
|
||||||
|
expected = {
|
||||||
|
'private-address': u.valid_ip,
|
||||||
|
'vhost': 'openstack',
|
||||||
|
'username': u.not_null
|
||||||
|
}
|
||||||
|
ret = u.validate_relation_data(unit, relation, expected)
|
||||||
|
if ret:
|
||||||
|
message = u.relation_error('glance amqp', ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
|
||||||
|
def _get_keystone_authtoken_expected_dict(self, rel_ks_gl):
|
||||||
|
"""Return expected authtoken dict for OS release"""
|
||||||
|
auth_uri = ('http://%s:%s/' %
|
||||||
|
(rel_ks_gl['auth_host'], rel_ks_gl['service_port']))
|
||||||
|
auth_url = ('http://%s:%s/' %
|
||||||
|
(rel_ks_gl['auth_host'], rel_ks_gl['auth_port']))
|
||||||
|
if self._get_openstack_release() >= self.xenial_queens:
|
||||||
|
expected = {
|
||||||
|
'keystone_authtoken': {
|
||||||
|
'auth_uri': auth_uri.rstrip('/'),
|
||||||
|
'auth_url': auth_url.rstrip('/'),
|
||||||
|
'auth_type': 'password',
|
||||||
|
'project_domain_name': 'service_domain',
|
||||||
|
'user_domain_name': 'service_domain',
|
||||||
|
'project_name': 'services',
|
||||||
|
'username': rel_ks_gl['service_username'],
|
||||||
|
'password': rel_ks_gl['service_password'],
|
||||||
|
'signing_dir': '/var/cache/glance'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
elif self._get_openstack_release() >= self.trusty_mitaka:
|
||||||
|
expected = {
|
||||||
|
'keystone_authtoken': {
|
||||||
|
'auth_uri': auth_uri.rstrip('/'),
|
||||||
|
'auth_url': auth_url.rstrip('/'),
|
||||||
|
'auth_type': 'password',
|
||||||
|
'project_domain_name': 'default',
|
||||||
|
'user_domain_name': 'default',
|
||||||
|
'project_name': 'services',
|
||||||
|
'username': rel_ks_gl['service_username'],
|
||||||
|
'password': rel_ks_gl['service_password'],
|
||||||
|
'signing_dir': '/var/cache/glance'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
elif self._get_openstack_release() >= self.trusty_liberty:
|
||||||
|
expected = {
|
||||||
|
'keystone_authtoken': {
|
||||||
|
'auth_uri': auth_uri.rstrip('/'),
|
||||||
|
'auth_url': auth_url.rstrip('/'),
|
||||||
|
'auth_plugin': 'password',
|
||||||
|
'project_domain_id': 'default',
|
||||||
|
'user_domain_id': 'default',
|
||||||
|
'project_name': 'services',
|
||||||
|
'username': rel_ks_gl['service_username'],
|
||||||
|
'password': rel_ks_gl['service_password'],
|
||||||
|
'signing_dir': '/var/cache/glance'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
elif self._get_openstack_release() >= self.trusty_kilo:
|
||||||
|
expected = {
|
||||||
|
'keystone_authtoken': {
|
||||||
|
'project_name': 'services',
|
||||||
|
'username': 'glance',
|
||||||
|
'password': rel_ks_gl['service_password'],
|
||||||
|
'auth_uri': u.valid_url,
|
||||||
|
'auth_url': u.valid_url,
|
||||||
|
'signing_dir': '/var/cache/glance',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
expected = {
|
||||||
|
'keystone_authtoken': {
|
||||||
|
'auth_uri': u.valid_url,
|
||||||
|
'auth_host': rel_ks_gl['auth_host'],
|
||||||
|
'auth_port': rel_ks_gl['auth_port'],
|
||||||
|
'auth_protocol': rel_ks_gl['auth_protocol'],
|
||||||
|
'admin_tenant_name': 'services',
|
||||||
|
'admin_user': 'glance',
|
||||||
|
'admin_password': rel_ks_gl['service_password'],
|
||||||
|
'signing_dir': '/var/cache/glance',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return expected
|
||||||
|
|
||||||
|
def test_300_glance_api_default_config(self):
|
||||||
|
"""Verify default section configs in glance-api.conf and
|
||||||
|
compare some of the parameters to relation data."""
|
||||||
|
u.log.debug('Checking glance api config file...')
|
||||||
|
unit = self.glance_sentry
|
||||||
|
unit_ks = self.keystone_sentry
|
||||||
|
rel_mq_gl = self.rabbitmq_sentry.relation('amqp', 'glance:amqp')
|
||||||
|
rel_ks_gl = unit_ks.relation('identity-service',
|
||||||
|
'glance:identity-service')
|
||||||
|
rel_my_gl = self.pxc_sentry.relation('shared-db', 'glance:shared-db')
|
||||||
|
db_uri = "mysql://{}:{}@{}/{}".format('glance', rel_my_gl['password'],
|
||||||
|
rel_my_gl['db_host'], 'glance')
|
||||||
|
conf = '/etc/glance/glance-api.conf'
|
||||||
|
expected = {
|
||||||
|
'DEFAULT': {
|
||||||
|
'debug': 'False',
|
||||||
|
'verbose': 'False',
|
||||||
|
'use_syslog': 'False',
|
||||||
|
'log_file': '/var/log/glance/api.log',
|
||||||
|
'bind_host': '0.0.0.0',
|
||||||
|
'bind_port': '9282',
|
||||||
|
'registry_host': '0.0.0.0',
|
||||||
|
'registry_port': '9191',
|
||||||
|
'registry_client_protocol': 'http',
|
||||||
|
'delayed_delete': 'False',
|
||||||
|
'scrub_time': '43200',
|
||||||
|
'notification_driver': 'rabbit',
|
||||||
|
'scrubber_datadir': '/var/lib/glance/scrubber',
|
||||||
|
'image_cache_dir': '/var/lib/glance/image-cache/',
|
||||||
|
'db_enforce_mysql_charset': 'False'
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
expected.update(self._get_keystone_authtoken_expected_dict(rel_ks_gl))
|
||||||
|
|
||||||
|
if self._get_openstack_release() >= self.trusty_kilo:
|
||||||
|
# Kilo or later
|
||||||
|
expected['oslo_messaging_rabbit'] = {
|
||||||
|
'rabbit_userid': 'glance',
|
||||||
|
'rabbit_virtual_host': 'openstack',
|
||||||
|
'rabbit_password': rel_mq_gl['password'],
|
||||||
|
'rabbit_host': rel_mq_gl['hostname']
|
||||||
|
}
|
||||||
|
expected['glance_store'] = {
|
||||||
|
'filesystem_store_datadir': '/var/lib/glance/images/',
|
||||||
|
'stores': 'glance.store.filesystem.'
|
||||||
|
'Store,glance.store.http.Store',
|
||||||
|
'default_store': 'file'
|
||||||
|
}
|
||||||
|
expected['database'] = {
|
||||||
|
'idle_timeout': '3600',
|
||||||
|
'connection': db_uri
|
||||||
|
}
|
||||||
|
|
||||||
|
if self._get_openstack_release() >= self.trusty_mitaka:
|
||||||
|
del expected['DEFAULT']['notification_driver']
|
||||||
|
connection_uri = (
|
||||||
|
"rabbit://glance:{}@{}:5672/"
|
||||||
|
"openstack".format(rel_mq_gl['password'],
|
||||||
|
rel_mq_gl['hostname'])
|
||||||
|
)
|
||||||
|
expected['oslo_messaging_notifications'] = {
|
||||||
|
'driver': 'messagingv2',
|
||||||
|
'transport_url': connection_uri
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
expected['DEFAULT']['notification_driver'] = 'messagingv2'
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Juno or earlier
|
||||||
|
expected['DEFAULT'].update({
|
||||||
|
'rabbit_userid': 'glance',
|
||||||
|
'rabbit_virtual_host': 'openstack',
|
||||||
|
'rabbit_password': rel_mq_gl['password'],
|
||||||
|
'rabbit_host': rel_mq_gl['hostname'],
|
||||||
|
'filesystem_store_datadir': '/var/lib/glance/images/',
|
||||||
|
'default_store': 'file',
|
||||||
|
})
|
||||||
|
expected['database'] = {
|
||||||
|
'sql_idle_timeout': '3600',
|
||||||
|
'connection': db_uri
|
||||||
|
}
|
||||||
|
|
||||||
|
for section, pairs in expected.iteritems():
|
||||||
|
ret = u.validate_config_data(unit, conf, section, pairs)
|
||||||
|
if ret:
|
||||||
|
message = "glance api config error: {}".format(ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
|
||||||
|
def test_302_glance_registry_default_config(self):
|
||||||
|
"""Verify configs in glance-registry.conf"""
|
||||||
|
u.log.debug('Checking glance registry config file...')
|
||||||
|
unit = self.glance_sentry
|
||||||
|
unit_ks = self.keystone_sentry
|
||||||
|
rel_ks_gl = unit_ks.relation('identity-service',
|
||||||
|
'glance:identity-service')
|
||||||
|
rel_my_gl = self.pxc_sentry.relation('shared-db', 'glance:shared-db')
|
||||||
|
db_uri = "mysql://{}:{}@{}/{}".format('glance', rel_my_gl['password'],
|
||||||
|
rel_my_gl['db_host'], 'glance')
|
||||||
|
conf = '/etc/glance/glance-registry.conf'
|
||||||
|
|
||||||
|
expected = {
|
||||||
|
'DEFAULT': {
|
||||||
|
'use_syslog': 'False',
|
||||||
|
'log_file': '/var/log/glance/registry.log',
|
||||||
|
'debug': 'False',
|
||||||
|
'verbose': 'False',
|
||||||
|
'bind_host': '0.0.0.0',
|
||||||
|
'bind_port': '9191'
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if self._get_openstack_release() >= self.trusty_kilo:
|
||||||
|
# Kilo or later
|
||||||
|
expected['database'] = {
|
||||||
|
'idle_timeout': '3600',
|
||||||
|
'connection': db_uri
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
# Juno or earlier
|
||||||
|
expected['database'] = {
|
||||||
|
'idle_timeout': '3600',
|
||||||
|
'connection': db_uri
|
||||||
|
}
|
||||||
|
|
||||||
|
expected.update(self._get_keystone_authtoken_expected_dict(rel_ks_gl))
|
||||||
|
|
||||||
|
for section, pairs in expected.iteritems():
|
||||||
|
ret = u.validate_config_data(unit, conf, section, pairs)
|
||||||
|
if ret:
|
||||||
|
message = "glance registry paste config error: {}".format(ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
|
||||||
|
def test_410_glance_image_create_delete(self):
|
||||||
|
"""Create new cirros image in glance, verify, then delete it."""
|
||||||
|
u.log.debug('Creating, checking and deleting glance image...')
|
||||||
|
img_new = u.create_cirros_image(self.glance, "cirros-image-1")
|
||||||
|
img_id = img_new.id
|
||||||
|
u.delete_resource(self.glance.images, img_id, msg="glance image")
|
||||||
|
|
||||||
|
def test_411_set_disk_format(self):
|
||||||
|
sleep_time = 30
|
||||||
|
if self._get_openstack_release() >= self.trusty_kilo:
|
||||||
|
section = 'image_format'
|
||||||
|
elif self._get_openstack_release() > self.trusty_icehouse:
|
||||||
|
section = 'DEFAULT'
|
||||||
|
else:
|
||||||
|
u.log.debug('Test not supported before juno')
|
||||||
|
return
|
||||||
|
sentry = self.glance_sentry
|
||||||
|
juju_service = 'glance'
|
||||||
|
|
||||||
|
# Expected default and alternate values
|
||||||
|
set_default = {
|
||||||
|
'disk-formats': 'ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,root-tar'}
|
||||||
|
set_alternate = {'disk-formats': 'qcow2'}
|
||||||
|
|
||||||
|
# Config file affected by juju set config change
|
||||||
|
conf_file = '/etc/glance/glance-api.conf'
|
||||||
|
|
||||||
|
# Make config change, check for service restarts
|
||||||
|
u.log.debug('Setting disk format {}...'.format(juju_service))
|
||||||
|
self.d.configure(juju_service, set_alternate)
|
||||||
|
|
||||||
|
u.log.debug('Sleeping to let hooks fire')
|
||||||
|
time.sleep(sleep_time)
|
||||||
|
u.log.debug("Checking disk format option has updated")
|
||||||
|
ret = u.validate_config_data(
|
||||||
|
sentry,
|
||||||
|
conf_file,
|
||||||
|
section,
|
||||||
|
{'disk_formats': 'qcow2'})
|
||||||
|
if ret:
|
||||||
|
msg = "disk_formats was not updated in section {} in {}".format(
|
||||||
|
section,
|
||||||
|
conf_file)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
self.d.configure(juju_service, set_default)
|
||||||
|
|
||||||
|
def test_900_glance_restart_on_config_change(self):
|
||||||
|
"""Verify that the specified services are restarted when the config
|
||||||
|
is changed."""
|
||||||
|
sentry = self.glance_sentry
|
||||||
|
juju_service = 'glance'
|
||||||
|
|
||||||
|
# Expected default and alternate values
|
||||||
|
set_default = {'use-syslog': 'False'}
|
||||||
|
set_alternate = {'use-syslog': 'True'}
|
||||||
|
|
||||||
|
# Config file affected by juju set config change
|
||||||
|
conf_file = '/etc/glance/glance-api.conf'
|
||||||
|
|
||||||
|
# Services which are expected to restart upon config change
|
||||||
|
services = {
|
||||||
|
'glance-api': conf_file,
|
||||||
|
'glance-registry': conf_file,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Make config change, check for service restarts
|
||||||
|
u.log.debug('Making config change on {}...'.format(juju_service))
|
||||||
|
mtime = u.get_sentry_time(sentry)
|
||||||
|
self.d.configure(juju_service, set_alternate)
|
||||||
|
|
||||||
|
sleep_time = 30
|
||||||
|
for s, conf_file in services.iteritems():
|
||||||
|
u.log.debug("Checking that service restarted: {}".format(s))
|
||||||
|
if not u.validate_service_config_changed(sentry, mtime, s,
|
||||||
|
conf_file,
|
||||||
|
retry_count=4,
|
||||||
|
retry_sleep_time=20,
|
||||||
|
sleep_time=sleep_time):
|
||||||
|
self.d.configure(juju_service, set_default)
|
||||||
|
msg = "service {} didn't restart after config change".format(s)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
sleep_time = 0
|
||||||
|
|
||||||
|
self.d.configure(juju_service, set_default)
|
||||||
|
|
||||||
|
def test_901_pause_resume(self):
|
||||||
|
"""Test pause and resume actions."""
|
||||||
|
u.log.debug('Checking pause and resume actions...')
|
||||||
|
|
||||||
|
unit = self.d.sentry['glance'][0]
|
||||||
|
unit_name = unit.info['unit_name']
|
||||||
|
u.log.debug("Unit name: {}".format(unit_name))
|
||||||
|
|
||||||
|
u.log.debug('Checking for active status on {}'.format(unit_name))
|
||||||
|
assert u.status_get(unit)[0] == "active"
|
||||||
|
|
||||||
|
u.log.debug('Running pause action on {}'.format(unit_name))
|
||||||
|
self._assert_services(should_run=True)
|
||||||
|
action_id = u.run_action(unit, "pause")
|
||||||
|
u.log.debug('Waiting on action {}'.format(action_id))
|
||||||
|
assert u.wait_on_action(action_id), "Pause action failed."
|
||||||
|
self._assert_services(should_run=False)
|
||||||
|
|
||||||
|
u.log.debug('Running resume action on {}'.format(unit_name))
|
||||||
|
action_id = u.run_action(unit, "resume")
|
||||||
|
u.log.debug('Waiting on action {}'.format(action_id))
|
||||||
|
assert u.wait_on_action(action_id), "Resume action failed"
|
||||||
|
self._assert_services(should_run=True)
|
97
tests/charmhelpers/__init__.py
Normal file
97
tests/charmhelpers/__init__.py
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# Bootstrap charm-helpers, installing its dependencies if necessary using
|
||||||
|
# only standard libraries.
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import inspect
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
try:
|
||||||
|
import six # flake8: noqa
|
||||||
|
except ImportError:
|
||||||
|
if sys.version_info.major == 2:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
|
||||||
|
else:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
|
||||||
|
import six # flake8: noqa
|
||||||
|
|
||||||
|
try:
|
||||||
|
import yaml # flake8: noqa
|
||||||
|
except ImportError:
|
||||||
|
if sys.version_info.major == 2:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
|
||||||
|
else:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
||||||
|
import yaml # flake8: noqa
|
||||||
|
|
||||||
|
|
||||||
|
# Holds a list of mapping of mangled function names that have been deprecated
|
||||||
|
# using the @deprecate decorator below. This is so that the warning is only
|
||||||
|
# printed once for each usage of the function.
|
||||||
|
__deprecated_functions = {}
|
||||||
|
|
||||||
|
|
||||||
|
def deprecate(warning, date=None, log=None):
|
||||||
|
"""Add a deprecation warning the first time the function is used.
|
||||||
|
The date, which is a string in semi-ISO8660 format indicate the year-month
|
||||||
|
that the function is officially going to be removed.
|
||||||
|
|
||||||
|
usage:
|
||||||
|
|
||||||
|
@deprecate('use core/fetch/add_source() instead', '2017-04')
|
||||||
|
def contributed_add_source_thing(...):
|
||||||
|
...
|
||||||
|
|
||||||
|
And it then prints to the log ONCE that the function is deprecated.
|
||||||
|
The reason for passing the logging function (log) is so that hookenv.log
|
||||||
|
can be used for a charm if needed.
|
||||||
|
|
||||||
|
:param warning: String to indicat where it has moved ot.
|
||||||
|
:param date: optional sting, in YYYY-MM format to indicate when the
|
||||||
|
function will definitely (probably) be removed.
|
||||||
|
:param log: The log function to call to log. If not, logs to stdout
|
||||||
|
"""
|
||||||
|
def wrap(f):
|
||||||
|
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapped_f(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
module = inspect.getmodule(f)
|
||||||
|
file = inspect.getsourcefile(f)
|
||||||
|
lines = inspect.getsourcelines(f)
|
||||||
|
f_name = "{}-{}-{}..{}-{}".format(
|
||||||
|
module.__name__, file, lines[0], lines[-1], f.__name__)
|
||||||
|
except (IOError, TypeError):
|
||||||
|
# assume it was local, so just use the name of the function
|
||||||
|
f_name = f.__name__
|
||||||
|
if f_name not in __deprecated_functions:
|
||||||
|
__deprecated_functions[f_name] = True
|
||||||
|
s = "DEPRECATION WARNING: Function {} is being removed".format(
|
||||||
|
f.__name__)
|
||||||
|
if date:
|
||||||
|
s = "{} on/around {}".format(s, date)
|
||||||
|
if warning:
|
||||||
|
s = "{} : {}".format(s, warning)
|
||||||
|
if log:
|
||||||
|
log(s)
|
||||||
|
else:
|
||||||
|
print(s)
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
return wrapped_f
|
||||||
|
return wrap
|
13
tests/charmhelpers/contrib/__init__.py
Normal file
13
tests/charmhelpers/contrib/__init__.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
13
tests/charmhelpers/contrib/amulet/__init__.py
Normal file
13
tests/charmhelpers/contrib/amulet/__init__.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
97
tests/charmhelpers/contrib/amulet/deployment.py
Normal file
97
tests/charmhelpers/contrib/amulet/deployment.py
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
import os
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
class AmuletDeployment(object):
|
||||||
|
"""Amulet deployment.
|
||||||
|
|
||||||
|
This class provides generic Amulet deployment and test runner
|
||||||
|
methods.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, series=None):
|
||||||
|
"""Initialize the deployment environment."""
|
||||||
|
self.series = None
|
||||||
|
|
||||||
|
if series:
|
||||||
|
self.series = series
|
||||||
|
self.d = amulet.Deployment(series=self.series)
|
||||||
|
else:
|
||||||
|
self.d = amulet.Deployment()
|
||||||
|
|
||||||
|
def _add_services(self, this_service, other_services):
|
||||||
|
"""Add services.
|
||||||
|
|
||||||
|
Add services to the deployment where this_service is the local charm
|
||||||
|
that we're testing and other_services are the other services that
|
||||||
|
are being used in the local amulet tests.
|
||||||
|
"""
|
||||||
|
if this_service['name'] != os.path.basename(os.getcwd()):
|
||||||
|
s = this_service['name']
|
||||||
|
msg = "The charm's root directory name needs to be {}".format(s)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
if 'units' not in this_service:
|
||||||
|
this_service['units'] = 1
|
||||||
|
|
||||||
|
self.d.add(this_service['name'], units=this_service['units'],
|
||||||
|
constraints=this_service.get('constraints'))
|
||||||
|
|
||||||
|
for svc in other_services:
|
||||||
|
if 'location' in svc:
|
||||||
|
branch_location = svc['location']
|
||||||
|
elif self.series:
|
||||||
|
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
|
||||||
|
else:
|
||||||
|
branch_location = None
|
||||||
|
|
||||||
|
if 'units' not in svc:
|
||||||
|
svc['units'] = 1
|
||||||
|
|
||||||
|
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
|
||||||
|
constraints=svc.get('constraints'))
|
||||||
|
|
||||||
|
def _add_relations(self, relations):
|
||||||
|
"""Add all of the relations for the services."""
|
||||||
|
for k, v in six.iteritems(relations):
|
||||||
|
self.d.relate(k, v)
|
||||||
|
|
||||||
|
def _configure_services(self, configs):
|
||||||
|
"""Configure all of the services."""
|
||||||
|
for service, config in six.iteritems(configs):
|
||||||
|
self.d.configure(service, config)
|
||||||
|
|
||||||
|
def _deploy(self):
|
||||||
|
"""Deploy environment and wait for all hooks to finish executing."""
|
||||||
|
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900))
|
||||||
|
try:
|
||||||
|
self.d.setup(timeout=timeout)
|
||||||
|
self.d.sentry.wait(timeout=timeout)
|
||||||
|
except amulet.helpers.TimeoutError:
|
||||||
|
amulet.raise_status(
|
||||||
|
amulet.FAIL,
|
||||||
|
msg="Deployment timed out ({}s)".format(timeout)
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def run_tests(self):
|
||||||
|
"""Run all of the methods that are prefixed with 'test_'."""
|
||||||
|
for test in dir(self):
|
||||||
|
if test.startswith('test_'):
|
||||||
|
getattr(self, test)()
|
821
tests/charmhelpers/contrib/amulet/utils.py
Normal file
821
tests/charmhelpers/contrib/amulet/utils.py
Normal file
@ -0,0 +1,821 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import socket
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
import distro_info
|
||||||
|
import six
|
||||||
|
from six.moves import configparser
|
||||||
|
if six.PY3:
|
||||||
|
from urllib import parse as urlparse
|
||||||
|
else:
|
||||||
|
import urlparse
|
||||||
|
|
||||||
|
|
||||||
|
class AmuletUtils(object):
|
||||||
|
"""Amulet utilities.
|
||||||
|
|
||||||
|
This class provides common utility functions that are used by Amulet
|
||||||
|
tests.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, log_level=logging.ERROR):
|
||||||
|
self.log = self.get_logger(level=log_level)
|
||||||
|
self.ubuntu_releases = self.get_ubuntu_releases()
|
||||||
|
|
||||||
|
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
|
||||||
|
"""Get a logger object that will log to stdout."""
|
||||||
|
log = logging
|
||||||
|
logger = log.getLogger(name)
|
||||||
|
fmt = log.Formatter("%(asctime)s %(funcName)s "
|
||||||
|
"%(levelname)s: %(message)s")
|
||||||
|
|
||||||
|
handler = log.StreamHandler(stream=sys.stdout)
|
||||||
|
handler.setLevel(level)
|
||||||
|
handler.setFormatter(fmt)
|
||||||
|
|
||||||
|
logger.addHandler(handler)
|
||||||
|
logger.setLevel(level)
|
||||||
|
|
||||||
|
return logger
|
||||||
|
|
||||||
|
def valid_ip(self, ip):
|
||||||
|
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def valid_url(self, url):
|
||||||
|
p = re.compile(
|
||||||
|
r'^(?:http|ftp)s?://'
|
||||||
|
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
|
||||||
|
r'localhost|'
|
||||||
|
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
|
||||||
|
r'(?::\d+)?'
|
||||||
|
r'(?:/?|[/?]\S+)$',
|
||||||
|
re.IGNORECASE)
|
||||||
|
if p.match(url):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_ubuntu_release_from_sentry(self, sentry_unit):
|
||||||
|
"""Get Ubuntu release codename from sentry unit.
|
||||||
|
|
||||||
|
:param sentry_unit: amulet sentry/service unit pointer
|
||||||
|
:returns: list of strings - release codename, failure message
|
||||||
|
"""
|
||||||
|
msg = None
|
||||||
|
cmd = 'lsb_release -cs'
|
||||||
|
release, code = sentry_unit.run(cmd)
|
||||||
|
if code == 0:
|
||||||
|
self.log.debug('{} lsb_release: {}'.format(
|
||||||
|
sentry_unit.info['unit_name'], release))
|
||||||
|
else:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, release, code))
|
||||||
|
if release not in self.ubuntu_releases:
|
||||||
|
msg = ("Release ({}) not found in Ubuntu releases "
|
||||||
|
"({})".format(release, self.ubuntu_releases))
|
||||||
|
return release, msg
|
||||||
|
|
||||||
|
def validate_services(self, commands):
|
||||||
|
"""Validate that lists of commands succeed on service units. Can be
|
||||||
|
used to verify system services are running on the corresponding
|
||||||
|
service units.
|
||||||
|
|
||||||
|
:param commands: dict with sentry keys and arbitrary command list vals
|
||||||
|
:returns: None if successful, Failure string message otherwise
|
||||||
|
"""
|
||||||
|
self.log.debug('Checking status of system services...')
|
||||||
|
|
||||||
|
# /!\ DEPRECATION WARNING (beisner):
|
||||||
|
# New and existing tests should be rewritten to use
|
||||||
|
# validate_services_by_name() as it is aware of init systems.
|
||||||
|
self.log.warn('DEPRECATION WARNING: use '
|
||||||
|
'validate_services_by_name instead of validate_services '
|
||||||
|
'due to init system differences.')
|
||||||
|
|
||||||
|
for k, v in six.iteritems(commands):
|
||||||
|
for cmd in v:
|
||||||
|
output, code = k.run(cmd)
|
||||||
|
self.log.debug('{} `{}` returned '
|
||||||
|
'{}'.format(k.info['unit_name'],
|
||||||
|
cmd, code))
|
||||||
|
if code != 0:
|
||||||
|
return "command `{}` returned {}".format(cmd, str(code))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_services_by_name(self, sentry_services):
|
||||||
|
"""Validate system service status by service name, automatically
|
||||||
|
detecting init system based on Ubuntu release codename.
|
||||||
|
|
||||||
|
:param sentry_services: dict with sentry keys and svc list values
|
||||||
|
:returns: None if successful, Failure string message otherwise
|
||||||
|
"""
|
||||||
|
self.log.debug('Checking status of system services...')
|
||||||
|
|
||||||
|
# Point at which systemd became a thing
|
||||||
|
systemd_switch = self.ubuntu_releases.index('vivid')
|
||||||
|
|
||||||
|
for sentry_unit, services_list in six.iteritems(sentry_services):
|
||||||
|
# Get lsb_release codename from unit
|
||||||
|
release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
|
||||||
|
if ret:
|
||||||
|
return ret
|
||||||
|
|
||||||
|
for service_name in services_list:
|
||||||
|
if (self.ubuntu_releases.index(release) >= systemd_switch or
|
||||||
|
service_name in ['rabbitmq-server', 'apache2',
|
||||||
|
'memcached']):
|
||||||
|
# init is systemd (or regular sysv)
|
||||||
|
cmd = 'sudo service {} status'.format(service_name)
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
service_running = code == 0
|
||||||
|
elif self.ubuntu_releases.index(release) < systemd_switch:
|
||||||
|
# init is upstart
|
||||||
|
cmd = 'sudo status {}'.format(service_name)
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
service_running = code == 0 and "start/running" in output
|
||||||
|
|
||||||
|
self.log.debug('{} `{}` returned '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code))
|
||||||
|
if not service_running:
|
||||||
|
return u"command `{}` returned {} {}".format(
|
||||||
|
cmd, output, str(code))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _get_config(self, unit, filename):
|
||||||
|
"""Get a ConfigParser object for parsing a unit's config file."""
|
||||||
|
file_contents = unit.file_contents(filename)
|
||||||
|
|
||||||
|
# NOTE(beisner): by default, ConfigParser does not handle options
|
||||||
|
# with no value, such as the flags used in the mysql my.cnf file.
|
||||||
|
# https://bugs.python.org/issue7005
|
||||||
|
config = configparser.ConfigParser(allow_no_value=True)
|
||||||
|
config.readfp(io.StringIO(file_contents))
|
||||||
|
return config
|
||||||
|
|
||||||
|
def validate_config_data(self, sentry_unit, config_file, section,
|
||||||
|
expected):
|
||||||
|
"""Validate config file data.
|
||||||
|
|
||||||
|
Verify that the specified section of the config file contains
|
||||||
|
the expected option key:value pairs.
|
||||||
|
|
||||||
|
Compare expected dictionary data vs actual dictionary data.
|
||||||
|
The values in the 'expected' dictionary can be strings, bools, ints,
|
||||||
|
longs, or can be a function that evaluates a variable and returns a
|
||||||
|
bool.
|
||||||
|
"""
|
||||||
|
self.log.debug('Validating config file data ({} in {} on {})'
|
||||||
|
'...'.format(section, config_file,
|
||||||
|
sentry_unit.info['unit_name']))
|
||||||
|
config = self._get_config(sentry_unit, config_file)
|
||||||
|
|
||||||
|
if section != 'DEFAULT' and not config.has_section(section):
|
||||||
|
return "section [{}] does not exist".format(section)
|
||||||
|
|
||||||
|
for k in expected.keys():
|
||||||
|
if not config.has_option(section, k):
|
||||||
|
return "section [{}] is missing option {}".format(section, k)
|
||||||
|
|
||||||
|
actual = config.get(section, k)
|
||||||
|
v = expected[k]
|
||||||
|
if (isinstance(v, six.string_types) or
|
||||||
|
isinstance(v, bool) or
|
||||||
|
isinstance(v, six.integer_types)):
|
||||||
|
# handle explicit values
|
||||||
|
if actual != v:
|
||||||
|
return "section [{}] {}:{} != expected {}:{}".format(
|
||||||
|
section, k, actual, k, expected[k])
|
||||||
|
# handle function pointers, such as not_null or valid_ip
|
||||||
|
elif not v(actual):
|
||||||
|
return "section [{}] {}:{} != expected {}:{}".format(
|
||||||
|
section, k, actual, k, expected[k])
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _validate_dict_data(self, expected, actual):
|
||||||
|
"""Validate dictionary data.
|
||||||
|
|
||||||
|
Compare expected dictionary data vs actual dictionary data.
|
||||||
|
The values in the 'expected' dictionary can be strings, bools, ints,
|
||||||
|
longs, or can be a function that evaluates a variable and returns a
|
||||||
|
bool.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
self.log.debug('expected: {}'.format(repr(expected)))
|
||||||
|
|
||||||
|
for k, v in six.iteritems(expected):
|
||||||
|
if k in actual:
|
||||||
|
if (isinstance(v, six.string_types) or
|
||||||
|
isinstance(v, bool) or
|
||||||
|
isinstance(v, six.integer_types)):
|
||||||
|
# handle explicit values
|
||||||
|
if v != actual[k]:
|
||||||
|
return "{}:{}".format(k, actual[k])
|
||||||
|
# handle function pointers, such as not_null or valid_ip
|
||||||
|
elif not v(actual[k]):
|
||||||
|
return "{}:{}".format(k, actual[k])
|
||||||
|
else:
|
||||||
|
return "key '{}' does not exist".format(k)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_relation_data(self, sentry_unit, relation, expected):
|
||||||
|
"""Validate actual relation data based on expected relation data."""
|
||||||
|
actual = sentry_unit.relation(relation[0], relation[1])
|
||||||
|
return self._validate_dict_data(expected, actual)
|
||||||
|
|
||||||
|
def _validate_list_data(self, expected, actual):
|
||||||
|
"""Compare expected list vs actual list data."""
|
||||||
|
for e in expected:
|
||||||
|
if e not in actual:
|
||||||
|
return "expected item {} not found in actual list".format(e)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def not_null(self, string):
|
||||||
|
if string is not None:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _get_file_mtime(self, sentry_unit, filename):
|
||||||
|
"""Get last modification time of file."""
|
||||||
|
return sentry_unit.file_stat(filename)['mtime']
|
||||||
|
|
||||||
|
def _get_dir_mtime(self, sentry_unit, directory):
|
||||||
|
"""Get last modification time of directory."""
|
||||||
|
return sentry_unit.directory_stat(directory)['mtime']
|
||||||
|
|
||||||
|
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
|
||||||
|
"""Get start time of a process based on the last modification time
|
||||||
|
of the /proc/pid directory.
|
||||||
|
|
||||||
|
:sentry_unit: The sentry unit to check for the service on
|
||||||
|
:service: service name to look for in process table
|
||||||
|
:pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||||
|
:returns: epoch time of service process start
|
||||||
|
:param commands: list of bash commands
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:returns: None if successful; Failure message otherwise
|
||||||
|
"""
|
||||||
|
if pgrep_full is not None:
|
||||||
|
# /!\ DEPRECATION WARNING (beisner):
|
||||||
|
# No longer implemented, as pidof is now used instead of pgrep.
|
||||||
|
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
||||||
|
self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
|
||||||
|
'longer implemented re: lp 1474030.')
|
||||||
|
|
||||||
|
pid_list = self.get_process_id_list(sentry_unit, service)
|
||||||
|
pid = pid_list[0]
|
||||||
|
proc_dir = '/proc/{}'.format(pid)
|
||||||
|
self.log.debug('Pid for {} on {}: {}'.format(
|
||||||
|
service, sentry_unit.info['unit_name'], pid))
|
||||||
|
|
||||||
|
return self._get_dir_mtime(sentry_unit, proc_dir)
|
||||||
|
|
||||||
|
def service_restarted(self, sentry_unit, service, filename,
|
||||||
|
pgrep_full=None, sleep_time=20):
|
||||||
|
"""Check if service was restarted.
|
||||||
|
|
||||||
|
Compare a service's start time vs a file's last modification time
|
||||||
|
(such as a config file for that service) to determine if the service
|
||||||
|
has been restarted.
|
||||||
|
"""
|
||||||
|
# /!\ DEPRECATION WARNING (beisner):
|
||||||
|
# This method is prone to races in that no before-time is known.
|
||||||
|
# Use validate_service_config_changed instead.
|
||||||
|
|
||||||
|
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||||
|
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||||
|
# deprecation WARNS. lp1474030
|
||||||
|
self.log.warn('DEPRECATION WARNING: use '
|
||||||
|
'validate_service_config_changed instead of '
|
||||||
|
'service_restarted due to known races.')
|
||||||
|
|
||||||
|
time.sleep(sleep_time)
|
||||||
|
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
|
||||||
|
self._get_file_mtime(sentry_unit, filename)):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def service_restarted_since(self, sentry_unit, mtime, service,
|
||||||
|
pgrep_full=None, sleep_time=20,
|
||||||
|
retry_count=30, retry_sleep_time=10):
|
||||||
|
"""Check if service was been started after a given time.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sentry_unit (sentry): The sentry unit to check for the service on
|
||||||
|
mtime (float): The epoch time to check against
|
||||||
|
service (string): service name to look for in process table
|
||||||
|
pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||||
|
sleep_time (int): Initial sleep time (s) before looking for file
|
||||||
|
retry_sleep_time (int): Time (s) to sleep between retries
|
||||||
|
retry_count (int): If file is not found, how many times to retry
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if service found and its start time it newer than mtime,
|
||||||
|
False if service is older than mtime or if service was
|
||||||
|
not found.
|
||||||
|
"""
|
||||||
|
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||||
|
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||||
|
# deprecation WARNS. lp1474030
|
||||||
|
|
||||||
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
self.log.debug('Checking that %s service restarted since %s on '
|
||||||
|
'%s' % (service, mtime, unit_name))
|
||||||
|
time.sleep(sleep_time)
|
||||||
|
proc_start_time = None
|
||||||
|
tries = 0
|
||||||
|
while tries <= retry_count and not proc_start_time:
|
||||||
|
try:
|
||||||
|
proc_start_time = self._get_proc_start_time(sentry_unit,
|
||||||
|
service,
|
||||||
|
pgrep_full)
|
||||||
|
self.log.debug('Attempt {} to get {} proc start time on {} '
|
||||||
|
'OK'.format(tries, service, unit_name))
|
||||||
|
except IOError as e:
|
||||||
|
# NOTE(beisner) - race avoidance, proc may not exist yet.
|
||||||
|
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
||||||
|
self.log.debug('Attempt {} to get {} proc start time on {} '
|
||||||
|
'failed\n{}'.format(tries, service,
|
||||||
|
unit_name, e))
|
||||||
|
time.sleep(retry_sleep_time)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if not proc_start_time:
|
||||||
|
self.log.warn('No proc start time found, assuming service did '
|
||||||
|
'not start')
|
||||||
|
return False
|
||||||
|
if proc_start_time >= mtime:
|
||||||
|
self.log.debug('Proc start time is newer than provided mtime'
|
||||||
|
'(%s >= %s) on %s (OK)' % (proc_start_time,
|
||||||
|
mtime, unit_name))
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.log.warn('Proc start time (%s) is older than provided mtime '
|
||||||
|
'(%s) on %s, service did not '
|
||||||
|
'restart' % (proc_start_time, mtime, unit_name))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def config_updated_since(self, sentry_unit, filename, mtime,
|
||||||
|
sleep_time=20, retry_count=30,
|
||||||
|
retry_sleep_time=10):
|
||||||
|
"""Check if file was modified after a given time.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sentry_unit (sentry): The sentry unit to check the file mtime on
|
||||||
|
filename (string): The file to check mtime of
|
||||||
|
mtime (float): The epoch time to check against
|
||||||
|
sleep_time (int): Initial sleep time (s) before looking for file
|
||||||
|
retry_sleep_time (int): Time (s) to sleep between retries
|
||||||
|
retry_count (int): If file is not found, how many times to retry
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if file was modified more recently than mtime, False if
|
||||||
|
file was modified before mtime, or if file not found.
|
||||||
|
"""
|
||||||
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
self.log.debug('Checking that %s updated since %s on '
|
||||||
|
'%s' % (filename, mtime, unit_name))
|
||||||
|
time.sleep(sleep_time)
|
||||||
|
file_mtime = None
|
||||||
|
tries = 0
|
||||||
|
while tries <= retry_count and not file_mtime:
|
||||||
|
try:
|
||||||
|
file_mtime = self._get_file_mtime(sentry_unit, filename)
|
||||||
|
self.log.debug('Attempt {} to get {} file mtime on {} '
|
||||||
|
'OK'.format(tries, filename, unit_name))
|
||||||
|
except IOError as e:
|
||||||
|
# NOTE(beisner) - race avoidance, file may not exist yet.
|
||||||
|
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
||||||
|
self.log.debug('Attempt {} to get {} file mtime on {} '
|
||||||
|
'failed\n{}'.format(tries, filename,
|
||||||
|
unit_name, e))
|
||||||
|
time.sleep(retry_sleep_time)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if not file_mtime:
|
||||||
|
self.log.warn('Could not determine file mtime, assuming '
|
||||||
|
'file does not exist')
|
||||||
|
return False
|
||||||
|
|
||||||
|
if file_mtime >= mtime:
|
||||||
|
self.log.debug('File mtime is newer than provided mtime '
|
||||||
|
'(%s >= %s) on %s (OK)' % (file_mtime,
|
||||||
|
mtime, unit_name))
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.log.warn('File mtime is older than provided mtime'
|
||||||
|
'(%s < on %s) on %s' % (file_mtime,
|
||||||
|
mtime, unit_name))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def validate_service_config_changed(self, sentry_unit, mtime, service,
|
||||||
|
filename, pgrep_full=None,
|
||||||
|
sleep_time=20, retry_count=30,
|
||||||
|
retry_sleep_time=10):
|
||||||
|
"""Check service and file were updated after mtime
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sentry_unit (sentry): The sentry unit to check for the service on
|
||||||
|
mtime (float): The epoch time to check against
|
||||||
|
service (string): service name to look for in process table
|
||||||
|
filename (string): The file to check mtime of
|
||||||
|
pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||||
|
sleep_time (int): Initial sleep in seconds to pass to test helpers
|
||||||
|
retry_count (int): If service is not found, how many times to retry
|
||||||
|
retry_sleep_time (int): Time in seconds to wait between retries
|
||||||
|
|
||||||
|
Typical Usage:
|
||||||
|
u = OpenStackAmuletUtils(ERROR)
|
||||||
|
...
|
||||||
|
mtime = u.get_sentry_time(self.cinder_sentry)
|
||||||
|
self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
|
||||||
|
if not u.validate_service_config_changed(self.cinder_sentry,
|
||||||
|
mtime,
|
||||||
|
'cinder-api',
|
||||||
|
'/etc/cinder/cinder.conf')
|
||||||
|
amulet.raise_status(amulet.FAIL, msg='update failed')
|
||||||
|
Returns:
|
||||||
|
bool: True if both service and file where updated/restarted after
|
||||||
|
mtime, False if service is older than mtime or if service was
|
||||||
|
not found or if filename was modified before mtime.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||||
|
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||||
|
# deprecation WARNS. lp1474030
|
||||||
|
|
||||||
|
service_restart = self.service_restarted_since(
|
||||||
|
sentry_unit, mtime,
|
||||||
|
service,
|
||||||
|
pgrep_full=pgrep_full,
|
||||||
|
sleep_time=sleep_time,
|
||||||
|
retry_count=retry_count,
|
||||||
|
retry_sleep_time=retry_sleep_time)
|
||||||
|
|
||||||
|
config_update = self.config_updated_since(
|
||||||
|
sentry_unit,
|
||||||
|
filename,
|
||||||
|
mtime,
|
||||||
|
sleep_time=sleep_time,
|
||||||
|
retry_count=retry_count,
|
||||||
|
retry_sleep_time=retry_sleep_time)
|
||||||
|
|
||||||
|
return service_restart and config_update
|
||||||
|
|
||||||
|
def get_sentry_time(self, sentry_unit):
|
||||||
|
"""Return current epoch time on a sentry"""
|
||||||
|
cmd = "date +'%s'"
|
||||||
|
return float(sentry_unit.run(cmd)[0])
|
||||||
|
|
||||||
|
def relation_error(self, name, data):
|
||||||
|
return 'unexpected relation data in {} - {}'.format(name, data)
|
||||||
|
|
||||||
|
def endpoint_error(self, name, data):
|
||||||
|
return 'unexpected endpoint data in {} - {}'.format(name, data)
|
||||||
|
|
||||||
|
def get_ubuntu_releases(self):
|
||||||
|
"""Return a list of all Ubuntu releases in order of release."""
|
||||||
|
_d = distro_info.UbuntuDistroInfo()
|
||||||
|
_release_list = _d.all
|
||||||
|
return _release_list
|
||||||
|
|
||||||
|
def file_to_url(self, file_rel_path):
|
||||||
|
"""Convert a relative file path to a file URL."""
|
||||||
|
_abs_path = os.path.abspath(file_rel_path)
|
||||||
|
return urlparse.urlparse(_abs_path, scheme='file').geturl()
|
||||||
|
|
||||||
|
def check_commands_on_units(self, commands, sentry_units):
|
||||||
|
"""Check that all commands in a list exit zero on all
|
||||||
|
sentry units in a list.
|
||||||
|
|
||||||
|
:param commands: list of bash commands
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:returns: None if successful; Failure message otherwise
|
||||||
|
"""
|
||||||
|
self.log.debug('Checking exit codes for {} commands on {} '
|
||||||
|
'sentry units...'.format(len(commands),
|
||||||
|
len(sentry_units)))
|
||||||
|
for sentry_unit in sentry_units:
|
||||||
|
for cmd in commands:
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code == 0:
|
||||||
|
self.log.debug('{} `{}` returned {} '
|
||||||
|
'(OK)'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code))
|
||||||
|
else:
|
||||||
|
return ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_process_id_list(self, sentry_unit, process_name,
|
||||||
|
expect_success=True):
|
||||||
|
"""Get a list of process ID(s) from a single sentry juju unit
|
||||||
|
for a single process name.
|
||||||
|
|
||||||
|
:param sentry_unit: Amulet sentry instance (juju unit)
|
||||||
|
:param process_name: Process name
|
||||||
|
:param expect_success: If False, expect the PID to be missing,
|
||||||
|
raise if it is present.
|
||||||
|
:returns: List of process IDs
|
||||||
|
"""
|
||||||
|
cmd = 'pidof -x "{}"'.format(process_name)
|
||||||
|
if not expect_success:
|
||||||
|
cmd += " || exit 0 && exit 1"
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code != 0:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
return str(output).split()
|
||||||
|
|
||||||
|
def get_unit_process_ids(self, unit_processes, expect_success=True):
|
||||||
|
"""Construct a dict containing unit sentries, process names, and
|
||||||
|
process IDs.
|
||||||
|
|
||||||
|
:param unit_processes: A dictionary of Amulet sentry instance
|
||||||
|
to list of process names.
|
||||||
|
:param expect_success: if False expect the processes to not be
|
||||||
|
running, raise if they are.
|
||||||
|
:returns: Dictionary of Amulet sentry instance to dictionary
|
||||||
|
of process names to PIDs.
|
||||||
|
"""
|
||||||
|
pid_dict = {}
|
||||||
|
for sentry_unit, process_list in six.iteritems(unit_processes):
|
||||||
|
pid_dict[sentry_unit] = {}
|
||||||
|
for process in process_list:
|
||||||
|
pids = self.get_process_id_list(
|
||||||
|
sentry_unit, process, expect_success=expect_success)
|
||||||
|
pid_dict[sentry_unit].update({process: pids})
|
||||||
|
return pid_dict
|
||||||
|
|
||||||
|
def validate_unit_process_ids(self, expected, actual):
|
||||||
|
"""Validate process id quantities for services on units."""
|
||||||
|
self.log.debug('Checking units for running processes...')
|
||||||
|
self.log.debug('Expected PIDs: {}'.format(expected))
|
||||||
|
self.log.debug('Actual PIDs: {}'.format(actual))
|
||||||
|
|
||||||
|
if len(actual) != len(expected):
|
||||||
|
return ('Unit count mismatch. expected, actual: {}, '
|
||||||
|
'{} '.format(len(expected), len(actual)))
|
||||||
|
|
||||||
|
for (e_sentry, e_proc_names) in six.iteritems(expected):
|
||||||
|
e_sentry_name = e_sentry.info['unit_name']
|
||||||
|
if e_sentry in actual.keys():
|
||||||
|
a_proc_names = actual[e_sentry]
|
||||||
|
else:
|
||||||
|
return ('Expected sentry ({}) not found in actual dict data.'
|
||||||
|
'{}'.format(e_sentry_name, e_sentry))
|
||||||
|
|
||||||
|
if len(e_proc_names.keys()) != len(a_proc_names.keys()):
|
||||||
|
return ('Process name count mismatch. expected, actual: {}, '
|
||||||
|
'{}'.format(len(expected), len(actual)))
|
||||||
|
|
||||||
|
for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
|
||||||
|
zip(e_proc_names.items(), a_proc_names.items()):
|
||||||
|
if e_proc_name != a_proc_name:
|
||||||
|
return ('Process name mismatch. expected, actual: {}, '
|
||||||
|
'{}'.format(e_proc_name, a_proc_name))
|
||||||
|
|
||||||
|
a_pids_length = len(a_pids)
|
||||||
|
fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
|
||||||
|
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
|
||||||
|
e_pids, a_pids_length,
|
||||||
|
a_pids))
|
||||||
|
|
||||||
|
# If expected is a list, ensure at least one PID quantity match
|
||||||
|
if isinstance(e_pids, list) and \
|
||||||
|
a_pids_length not in e_pids:
|
||||||
|
return fail_msg
|
||||||
|
# If expected is not bool and not list,
|
||||||
|
# ensure PID quantities match
|
||||||
|
elif not isinstance(e_pids, bool) and \
|
||||||
|
not isinstance(e_pids, list) and \
|
||||||
|
a_pids_length != e_pids:
|
||||||
|
return fail_msg
|
||||||
|
# If expected is bool True, ensure 1 or more PIDs exist
|
||||||
|
elif isinstance(e_pids, bool) and \
|
||||||
|
e_pids is True and a_pids_length < 1:
|
||||||
|
return fail_msg
|
||||||
|
# If expected is bool False, ensure 0 PIDs exist
|
||||||
|
elif isinstance(e_pids, bool) and \
|
||||||
|
e_pids is False and a_pids_length != 0:
|
||||||
|
return fail_msg
|
||||||
|
else:
|
||||||
|
self.log.debug('PID check OK: {} {} {}: '
|
||||||
|
'{}'.format(e_sentry_name, e_proc_name,
|
||||||
|
e_pids, a_pids))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_list_of_identical_dicts(self, list_of_dicts):
|
||||||
|
"""Check that all dicts within a list are identical."""
|
||||||
|
hashes = []
|
||||||
|
for _dict in list_of_dicts:
|
||||||
|
hashes.append(hash(frozenset(_dict.items())))
|
||||||
|
|
||||||
|
self.log.debug('Hashes: {}'.format(hashes))
|
||||||
|
if len(set(hashes)) == 1:
|
||||||
|
self.log.debug('Dicts within list are identical')
|
||||||
|
else:
|
||||||
|
return 'Dicts within list are not identical'
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_sectionless_conf(self, file_contents, expected):
|
||||||
|
"""A crude conf parser. Useful to inspect configuration files which
|
||||||
|
do not have section headers (as would be necessary in order to use
|
||||||
|
the configparser). Such as openstack-dashboard or rabbitmq confs."""
|
||||||
|
for line in file_contents.split('\n'):
|
||||||
|
if '=' in line:
|
||||||
|
args = line.split('=')
|
||||||
|
if len(args) <= 1:
|
||||||
|
continue
|
||||||
|
key = args[0].strip()
|
||||||
|
value = args[1].strip()
|
||||||
|
if key in expected.keys():
|
||||||
|
if expected[key] != value:
|
||||||
|
msg = ('Config mismatch. Expected, actual: {}, '
|
||||||
|
'{}'.format(expected[key], value))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
def get_unit_hostnames(self, units):
|
||||||
|
"""Return a dict of juju unit names to hostnames."""
|
||||||
|
host_names = {}
|
||||||
|
for unit in units:
|
||||||
|
host_names[unit.info['unit_name']] = \
|
||||||
|
str(unit.file_contents('/etc/hostname').strip())
|
||||||
|
self.log.debug('Unit host names: {}'.format(host_names))
|
||||||
|
return host_names
|
||||||
|
|
||||||
|
def run_cmd_unit(self, sentry_unit, cmd):
|
||||||
|
"""Run a command on a unit, return the output and exit code."""
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code == 0:
|
||||||
|
self.log.debug('{} `{}` command returned {} '
|
||||||
|
'(OK)'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code))
|
||||||
|
else:
|
||||||
|
msg = ('{} `{}` command returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
return str(output), code
|
||||||
|
|
||||||
|
def file_exists_on_unit(self, sentry_unit, file_name):
|
||||||
|
"""Check if a file exists on a unit."""
|
||||||
|
try:
|
||||||
|
sentry_unit.file_stat(file_name)
|
||||||
|
return True
|
||||||
|
except IOError:
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
msg = 'Error checking file {}: {}'.format(file_name, e)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
def file_contents_safe(self, sentry_unit, file_name,
|
||||||
|
max_wait=60, fatal=False):
|
||||||
|
"""Get file contents from a sentry unit. Wrap amulet file_contents
|
||||||
|
with retry logic to address races where a file checks as existing,
|
||||||
|
but no longer exists by the time file_contents is called.
|
||||||
|
Return None if file not found. Optionally raise if fatal is True."""
|
||||||
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
file_contents = False
|
||||||
|
tries = 0
|
||||||
|
while not file_contents and tries < (max_wait / 4):
|
||||||
|
try:
|
||||||
|
file_contents = sentry_unit.file_contents(file_name)
|
||||||
|
except IOError:
|
||||||
|
self.log.debug('Attempt {} to open file {} from {} '
|
||||||
|
'failed'.format(tries, file_name,
|
||||||
|
unit_name))
|
||||||
|
time.sleep(4)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if file_contents:
|
||||||
|
return file_contents
|
||||||
|
elif not fatal:
|
||||||
|
return None
|
||||||
|
elif fatal:
|
||||||
|
msg = 'Failed to get file contents from unit.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg)
|
||||||
|
|
||||||
|
def port_knock_tcp(self, host="localhost", port=22, timeout=15):
|
||||||
|
"""Open a TCP socket to check for a listening sevice on a host.
|
||||||
|
|
||||||
|
:param host: host name or IP address, default to localhost
|
||||||
|
:param port: TCP port number, default to 22
|
||||||
|
:param timeout: Connect timeout, default to 15 seconds
|
||||||
|
:returns: True if successful, False if connect failed
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Resolve host name if possible
|
||||||
|
try:
|
||||||
|
connect_host = socket.gethostbyname(host)
|
||||||
|
host_human = "{} ({})".format(connect_host, host)
|
||||||
|
except socket.error as e:
|
||||||
|
self.log.warn('Unable to resolve address: '
|
||||||
|
'{} ({}) Trying anyway!'.format(host, e))
|
||||||
|
connect_host = host
|
||||||
|
host_human = connect_host
|
||||||
|
|
||||||
|
# Attempt socket connection
|
||||||
|
try:
|
||||||
|
knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
knock.settimeout(timeout)
|
||||||
|
knock.connect((connect_host, port))
|
||||||
|
knock.close()
|
||||||
|
self.log.debug('Socket connect OK for host '
|
||||||
|
'{} on port {}.'.format(host_human, port))
|
||||||
|
return True
|
||||||
|
except socket.error as e:
|
||||||
|
self.log.debug('Socket connect FAIL for'
|
||||||
|
' {} port {} ({})'.format(host_human, port, e))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def port_knock_units(self, sentry_units, port=22,
|
||||||
|
timeout=15, expect_success=True):
|
||||||
|
"""Open a TCP socket to check for a listening sevice on each
|
||||||
|
listed juju unit.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:param port: TCP port number, default to 22
|
||||||
|
:param timeout: Connect timeout, default to 15 seconds
|
||||||
|
:expect_success: True by default, set False to invert logic
|
||||||
|
:returns: None if successful, Failure message otherwise
|
||||||
|
"""
|
||||||
|
for unit in sentry_units:
|
||||||
|
host = unit.info['public-address']
|
||||||
|
connected = self.port_knock_tcp(host, port, timeout)
|
||||||
|
if not connected and expect_success:
|
||||||
|
return 'Socket connect failed.'
|
||||||
|
elif connected and not expect_success:
|
||||||
|
return 'Socket connected unexpectedly.'
|
||||||
|
|
||||||
|
def get_uuid_epoch_stamp(self):
|
||||||
|
"""Returns a stamp string based on uuid4 and epoch time. Useful in
|
||||||
|
generating test messages which need to be unique-ish."""
|
||||||
|
return '[{}-{}]'.format(uuid.uuid4(), time.time())
|
||||||
|
|
||||||
|
# amulet juju action helpers:
|
||||||
|
def run_action(self, unit_sentry, action,
|
||||||
|
_check_output=subprocess.check_output,
|
||||||
|
params=None):
|
||||||
|
"""Translate to amulet's built in run_action(). Deprecated.
|
||||||
|
|
||||||
|
Run the named action on a given unit sentry.
|
||||||
|
|
||||||
|
params a dict of parameters to use
|
||||||
|
_check_output parameter is no longer used
|
||||||
|
|
||||||
|
@return action_id.
|
||||||
|
"""
|
||||||
|
self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been '
|
||||||
|
'deprecated for amulet.run_action')
|
||||||
|
return unit_sentry.run_action(action, action_args=params)
|
||||||
|
|
||||||
|
def wait_on_action(self, action_id, _check_output=subprocess.check_output):
|
||||||
|
"""Wait for a given action, returning if it completed or not.
|
||||||
|
|
||||||
|
action_id a string action uuid
|
||||||
|
_check_output parameter is no longer used
|
||||||
|
"""
|
||||||
|
data = amulet.actions.get_action_output(action_id, full_output=True)
|
||||||
|
return data.get(u"status") == "completed"
|
||||||
|
|
||||||
|
def status_get(self, unit):
|
||||||
|
"""Return the current service status of this unit."""
|
||||||
|
raw_status, return_code = unit.run(
|
||||||
|
"status-get --format=json --include-data")
|
||||||
|
if return_code != 0:
|
||||||
|
return ("unknown", "")
|
||||||
|
status = json.loads(raw_status)
|
||||||
|
return (status["status"], status["message"])
|
13
tests/charmhelpers/contrib/openstack/__init__.py
Normal file
13
tests/charmhelpers/contrib/openstack/__init__.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
13
tests/charmhelpers/contrib/openstack/amulet/__init__.py
Normal file
13
tests/charmhelpers/contrib/openstack/amulet/__init__.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
354
tests/charmhelpers/contrib/openstack/amulet/deployment.py
Normal file
354
tests/charmhelpers/contrib/openstack/amulet/deployment.py
Normal file
@ -0,0 +1,354 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import six
|
||||||
|
from collections import OrderedDict
|
||||||
|
from charmhelpers.contrib.amulet.deployment import (
|
||||||
|
AmuletDeployment
|
||||||
|
)
|
||||||
|
from charmhelpers.contrib.openstack.amulet.utils import (
|
||||||
|
OPENSTACK_RELEASES_PAIRS
|
||||||
|
)
|
||||||
|
|
||||||
|
DEBUG = logging.DEBUG
|
||||||
|
ERROR = logging.ERROR
|
||||||
|
|
||||||
|
|
||||||
|
class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
|
"""OpenStack amulet deployment.
|
||||||
|
|
||||||
|
This class inherits from AmuletDeployment and has additional support
|
||||||
|
that is specifically for use by OpenStack charms.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, series=None, openstack=None, source=None,
|
||||||
|
stable=True, log_level=DEBUG):
|
||||||
|
"""Initialize the deployment environment."""
|
||||||
|
super(OpenStackAmuletDeployment, self).__init__(series)
|
||||||
|
self.log = self.get_logger(level=log_level)
|
||||||
|
self.log.info('OpenStackAmuletDeployment: init')
|
||||||
|
self.openstack = openstack
|
||||||
|
self.source = source
|
||||||
|
self.stable = stable
|
||||||
|
|
||||||
|
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
|
||||||
|
"""Get a logger object that will log to stdout."""
|
||||||
|
log = logging
|
||||||
|
logger = log.getLogger(name)
|
||||||
|
fmt = log.Formatter("%(asctime)s %(funcName)s "
|
||||||
|
"%(levelname)s: %(message)s")
|
||||||
|
|
||||||
|
handler = log.StreamHandler(stream=sys.stdout)
|
||||||
|
handler.setLevel(level)
|
||||||
|
handler.setFormatter(fmt)
|
||||||
|
|
||||||
|
logger.addHandler(handler)
|
||||||
|
logger.setLevel(level)
|
||||||
|
|
||||||
|
return logger
|
||||||
|
|
||||||
|
def _determine_branch_locations(self, other_services):
|
||||||
|
"""Determine the branch locations for the other services.
|
||||||
|
|
||||||
|
Determine if the local branch being tested is derived from its
|
||||||
|
stable or next (dev) branch, and based on this, use the corresonding
|
||||||
|
stable or next branches for the other_services."""
|
||||||
|
|
||||||
|
self.log.info('OpenStackAmuletDeployment: determine branch locations')
|
||||||
|
|
||||||
|
# Charms outside the ~openstack-charmers
|
||||||
|
base_charms = {
|
||||||
|
'mysql': ['trusty'],
|
||||||
|
'mongodb': ['trusty'],
|
||||||
|
'nrpe': ['trusty', 'xenial'],
|
||||||
|
}
|
||||||
|
|
||||||
|
for svc in other_services:
|
||||||
|
# If a location has been explicitly set, use it
|
||||||
|
if svc.get('location'):
|
||||||
|
continue
|
||||||
|
if svc['name'] in base_charms:
|
||||||
|
# NOTE: not all charms have support for all series we
|
||||||
|
# want/need to test against, so fix to most recent
|
||||||
|
# that each base charm supports
|
||||||
|
target_series = self.series
|
||||||
|
if self.series not in base_charms[svc['name']]:
|
||||||
|
target_series = base_charms[svc['name']][-1]
|
||||||
|
svc['location'] = 'cs:{}/{}'.format(target_series,
|
||||||
|
svc['name'])
|
||||||
|
elif self.stable:
|
||||||
|
svc['location'] = 'cs:{}/{}'.format(self.series,
|
||||||
|
svc['name'])
|
||||||
|
else:
|
||||||
|
svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
|
||||||
|
self.series,
|
||||||
|
svc['name']
|
||||||
|
)
|
||||||
|
|
||||||
|
return other_services
|
||||||
|
|
||||||
|
def _add_services(self, this_service, other_services, use_source=None,
|
||||||
|
no_origin=None):
|
||||||
|
"""Add services to the deployment and optionally set
|
||||||
|
openstack-origin/source.
|
||||||
|
|
||||||
|
:param this_service dict: Service dictionary describing the service
|
||||||
|
whose amulet tests are being run
|
||||||
|
:param other_services dict: List of service dictionaries describing
|
||||||
|
the services needed to support the target
|
||||||
|
service
|
||||||
|
:param use_source list: List of services which use the 'source' config
|
||||||
|
option rather than 'openstack-origin'
|
||||||
|
:param no_origin list: List of services which do not support setting
|
||||||
|
the Cloud Archive.
|
||||||
|
Service Dict:
|
||||||
|
{
|
||||||
|
'name': str charm-name,
|
||||||
|
'units': int number of units,
|
||||||
|
'constraints': dict of juju constraints,
|
||||||
|
'location': str location of charm,
|
||||||
|
}
|
||||||
|
eg
|
||||||
|
this_service = {
|
||||||
|
'name': 'openvswitch-odl',
|
||||||
|
'constraints': {'mem': '8G'},
|
||||||
|
}
|
||||||
|
other_services = [
|
||||||
|
{
|
||||||
|
'name': 'nova-compute',
|
||||||
|
'units': 2,
|
||||||
|
'constraints': {'mem': '4G'},
|
||||||
|
'location': cs:~bob/xenial/nova-compute
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'name': 'mysql',
|
||||||
|
'constraints': {'mem': '2G'},
|
||||||
|
},
|
||||||
|
{'neutron-api-odl'}]
|
||||||
|
use_source = ['mysql']
|
||||||
|
no_origin = ['neutron-api-odl']
|
||||||
|
"""
|
||||||
|
self.log.info('OpenStackAmuletDeployment: adding services')
|
||||||
|
|
||||||
|
other_services = self._determine_branch_locations(other_services)
|
||||||
|
|
||||||
|
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
||||||
|
other_services)
|
||||||
|
|
||||||
|
services = other_services
|
||||||
|
services.append(this_service)
|
||||||
|
|
||||||
|
use_source = use_source or []
|
||||||
|
no_origin = no_origin or []
|
||||||
|
|
||||||
|
# Charms which should use the source config option
|
||||||
|
use_source = list(set(
|
||||||
|
use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
||||||
|
'ceph-osd', 'ceph-radosgw', 'ceph-mon',
|
||||||
|
'ceph-proxy', 'percona-cluster', 'lxd']))
|
||||||
|
|
||||||
|
# Charms which can not use openstack-origin, ie. many subordinates
|
||||||
|
no_origin = list(set(
|
||||||
|
no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch',
|
||||||
|
'nrpe', 'openvswitch-odl', 'neutron-api-odl',
|
||||||
|
'odl-controller', 'cinder-backup', 'nexentaedge-data',
|
||||||
|
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
|
||||||
|
'cinder-nexentaedge', 'nexentaedge-mgmt']))
|
||||||
|
|
||||||
|
if self.openstack:
|
||||||
|
for svc in services:
|
||||||
|
if svc['name'] not in use_source + no_origin:
|
||||||
|
config = {'openstack-origin': self.openstack}
|
||||||
|
self.d.configure(svc['name'], config)
|
||||||
|
|
||||||
|
if self.source:
|
||||||
|
for svc in services:
|
||||||
|
if svc['name'] in use_source and svc['name'] not in no_origin:
|
||||||
|
config = {'source': self.source}
|
||||||
|
self.d.configure(svc['name'], config)
|
||||||
|
|
||||||
|
def _configure_services(self, configs):
|
||||||
|
"""Configure all of the services."""
|
||||||
|
self.log.info('OpenStackAmuletDeployment: configure services')
|
||||||
|
for service, config in six.iteritems(configs):
|
||||||
|
self.d.configure(service, config)
|
||||||
|
|
||||||
|
def _auto_wait_for_status(self, message=None, exclude_services=None,
|
||||||
|
include_only=None, timeout=None):
|
||||||
|
"""Wait for all units to have a specific extended status, except
|
||||||
|
for any defined as excluded. Unless specified via message, any
|
||||||
|
status containing any case of 'ready' will be considered a match.
|
||||||
|
|
||||||
|
Examples of message usage:
|
||||||
|
|
||||||
|
Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
|
||||||
|
message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
|
||||||
|
|
||||||
|
Wait for all units to reach this status (exact match):
|
||||||
|
message = re.compile('^Unit is ready and clustered$')
|
||||||
|
|
||||||
|
Wait for all units to reach any one of these (exact match):
|
||||||
|
message = re.compile('Unit is ready|OK|Ready')
|
||||||
|
|
||||||
|
Wait for at least one unit to reach this status (exact match):
|
||||||
|
message = {'ready'}
|
||||||
|
|
||||||
|
See Amulet's sentry.wait_for_messages() for message usage detail.
|
||||||
|
https://github.com/juju/amulet/blob/master/amulet/sentry.py
|
||||||
|
|
||||||
|
:param message: Expected status match
|
||||||
|
:param exclude_services: List of juju service names to ignore,
|
||||||
|
not to be used in conjuction with include_only.
|
||||||
|
:param include_only: List of juju service names to exclusively check,
|
||||||
|
not to be used in conjuction with exclude_services.
|
||||||
|
:param timeout: Maximum time in seconds to wait for status match
|
||||||
|
:returns: None. Raises if timeout is hit.
|
||||||
|
"""
|
||||||
|
if not timeout:
|
||||||
|
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
|
||||||
|
self.log.info('Waiting for extended status on units for {}s...'
|
||||||
|
''.format(timeout))
|
||||||
|
|
||||||
|
all_services = self.d.services.keys()
|
||||||
|
|
||||||
|
if exclude_services and include_only:
|
||||||
|
raise ValueError('exclude_services can not be used '
|
||||||
|
'with include_only')
|
||||||
|
|
||||||
|
if message:
|
||||||
|
if isinstance(message, re._pattern_type):
|
||||||
|
match = message.pattern
|
||||||
|
else:
|
||||||
|
match = message
|
||||||
|
|
||||||
|
self.log.debug('Custom extended status wait match: '
|
||||||
|
'{}'.format(match))
|
||||||
|
else:
|
||||||
|
self.log.debug('Default extended status wait match: contains '
|
||||||
|
'READY (case-insensitive)')
|
||||||
|
message = re.compile('.*ready.*', re.IGNORECASE)
|
||||||
|
|
||||||
|
if exclude_services:
|
||||||
|
self.log.debug('Excluding services from extended status match: '
|
||||||
|
'{}'.format(exclude_services))
|
||||||
|
else:
|
||||||
|
exclude_services = []
|
||||||
|
|
||||||
|
if include_only:
|
||||||
|
services = include_only
|
||||||
|
else:
|
||||||
|
services = list(set(all_services) - set(exclude_services))
|
||||||
|
|
||||||
|
self.log.debug('Waiting up to {}s for extended status on services: '
|
||||||
|
'{}'.format(timeout, services))
|
||||||
|
service_messages = {service: message for service in services}
|
||||||
|
|
||||||
|
# Check for idleness
|
||||||
|
self.d.sentry.wait(timeout=timeout)
|
||||||
|
# Check for error states and bail early
|
||||||
|
self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
|
||||||
|
# Check for ready messages
|
||||||
|
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
|
||||||
|
|
||||||
|
self.log.info('OK')
|
||||||
|
|
||||||
|
def _get_openstack_release(self):
|
||||||
|
"""Get openstack release.
|
||||||
|
|
||||||
|
Return an integer representing the enum value of the openstack
|
||||||
|
release.
|
||||||
|
"""
|
||||||
|
# Must be ordered by OpenStack release (not by Ubuntu release):
|
||||||
|
for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
|
||||||
|
setattr(self, os_pair, i)
|
||||||
|
|
||||||
|
releases = {
|
||||||
|
('trusty', None): self.trusty_icehouse,
|
||||||
|
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
||||||
|
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
||||||
|
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
|
||||||
|
('xenial', None): self.xenial_mitaka,
|
||||||
|
('xenial', 'cloud:xenial-newton'): self.xenial_newton,
|
||||||
|
('xenial', 'cloud:xenial-ocata'): self.xenial_ocata,
|
||||||
|
('xenial', 'cloud:xenial-pike'): self.xenial_pike,
|
||||||
|
('xenial', 'cloud:xenial-queens'): self.xenial_queens,
|
||||||
|
('yakkety', None): self.yakkety_newton,
|
||||||
|
('zesty', None): self.zesty_ocata,
|
||||||
|
('artful', None): self.artful_pike,
|
||||||
|
('bionic', None): self.bionic_queens,
|
||||||
|
}
|
||||||
|
return releases[(self.series, self.openstack)]
|
||||||
|
|
||||||
|
def _get_openstack_release_string(self):
|
||||||
|
"""Get openstack release string.
|
||||||
|
|
||||||
|
Return a string representing the openstack release.
|
||||||
|
"""
|
||||||
|
releases = OrderedDict([
|
||||||
|
('trusty', 'icehouse'),
|
||||||
|
('xenial', 'mitaka'),
|
||||||
|
('yakkety', 'newton'),
|
||||||
|
('zesty', 'ocata'),
|
||||||
|
('artful', 'pike'),
|
||||||
|
('bionic', 'queens'),
|
||||||
|
])
|
||||||
|
if self.openstack:
|
||||||
|
os_origin = self.openstack.split(':')[1]
|
||||||
|
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
||||||
|
else:
|
||||||
|
return releases[self.series]
|
||||||
|
|
||||||
|
def get_ceph_expected_pools(self, radosgw=False):
|
||||||
|
"""Return a list of expected ceph pools in a ceph + cinder + glance
|
||||||
|
test scenario, based on OpenStack release and whether ceph radosgw
|
||||||
|
is flagged as present or not."""
|
||||||
|
|
||||||
|
if self._get_openstack_release() == self.trusty_icehouse:
|
||||||
|
# Icehouse
|
||||||
|
pools = [
|
||||||
|
'data',
|
||||||
|
'metadata',
|
||||||
|
'rbd',
|
||||||
|
'cinder-ceph',
|
||||||
|
'glance'
|
||||||
|
]
|
||||||
|
elif (self.trusty_kilo <= self._get_openstack_release() <=
|
||||||
|
self.zesty_ocata):
|
||||||
|
# Kilo through Ocata
|
||||||
|
pools = [
|
||||||
|
'rbd',
|
||||||
|
'cinder-ceph',
|
||||||
|
'glance'
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
# Pike and later
|
||||||
|
pools = [
|
||||||
|
'cinder-ceph',
|
||||||
|
'glance'
|
||||||
|
]
|
||||||
|
|
||||||
|
if radosgw:
|
||||||
|
pools.extend([
|
||||||
|
'.rgw.root',
|
||||||
|
'.rgw.control',
|
||||||
|
'.rgw',
|
||||||
|
'.rgw.gc',
|
||||||
|
'.users.uid'
|
||||||
|
])
|
||||||
|
|
||||||
|
return pools
|
1513
tests/charmhelpers/contrib/openstack/amulet/utils.py
Normal file
1513
tests/charmhelpers/contrib/openstack/amulet/utils.py
Normal file
File diff suppressed because it is too large
Load Diff
13
tests/charmhelpers/core/__init__.py
Normal file
13
tests/charmhelpers/core/__init__.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
55
tests/charmhelpers/core/decorators.py
Normal file
55
tests/charmhelpers/core/decorators.py
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
#
|
||||||
|
# Copyright 2014 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Edward Hope-Morley <opentastic@gmail.com>
|
||||||
|
#
|
||||||
|
|
||||||
|
import time
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
INFO,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
|
||||||
|
"""If the decorated function raises exception exc_type, allow num_retries
|
||||||
|
retry attempts before raise the exception.
|
||||||
|
"""
|
||||||
|
def _retry_on_exception_inner_1(f):
|
||||||
|
def _retry_on_exception_inner_2(*args, **kwargs):
|
||||||
|
retries = num_retries
|
||||||
|
multiplier = 1
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
except exc_type:
|
||||||
|
if not retries:
|
||||||
|
raise
|
||||||
|
|
||||||
|
delay = base_delay * multiplier
|
||||||
|
multiplier += 1
|
||||||
|
log("Retrying '%s' %d more times (delay=%s)" %
|
||||||
|
(f.__name__, retries, delay), level=INFO)
|
||||||
|
retries -= 1
|
||||||
|
if delay:
|
||||||
|
time.sleep(delay)
|
||||||
|
|
||||||
|
return _retry_on_exception_inner_2
|
||||||
|
|
||||||
|
return _retry_on_exception_inner_1
|
43
tests/charmhelpers/core/files.py
Normal file
43
tests/charmhelpers/core/files.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
|
||||||
|
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
|
def sed(filename, before, after, flags='g'):
|
||||||
|
"""
|
||||||
|
Search and replaces the given pattern on filename.
|
||||||
|
|
||||||
|
:param filename: relative or absolute file path.
|
||||||
|
:param before: expression to be replaced (see 'man sed')
|
||||||
|
:param after: expression to replace with (see 'man sed')
|
||||||
|
:param flags: sed-compatible regex flags in example, to make
|
||||||
|
the search and replace case insensitive, specify ``flags="i"``.
|
||||||
|
The ``g`` flag is always specified regardless, so you do not
|
||||||
|
need to remember to include it when overriding this parameter.
|
||||||
|
:returns: If the sed command exit code was zero then return,
|
||||||
|
otherwise raise CalledProcessError.
|
||||||
|
"""
|
||||||
|
expression = r's/{0}/{1}/{2}'.format(before,
|
||||||
|
after, flags)
|
||||||
|
|
||||||
|
return subprocess.check_call(["sed", "-i", "-r", "-e",
|
||||||
|
expression,
|
||||||
|
os.path.expanduser(filename)])
|
132
tests/charmhelpers/core/fstab.py
Normal file
132
tests/charmhelpers/core/fstab.py
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import io
|
||||||
|
import os
|
||||||
|
|
||||||
|
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
||||||
|
|
||||||
|
|
||||||
|
class Fstab(io.FileIO):
|
||||||
|
"""This class extends file in order to implement a file reader/writer
|
||||||
|
for file `/etc/fstab`
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Entry(object):
|
||||||
|
"""Entry class represents a non-comment line on the `/etc/fstab` file
|
||||||
|
"""
|
||||||
|
def __init__(self, device, mountpoint, filesystem,
|
||||||
|
options, d=0, p=0):
|
||||||
|
self.device = device
|
||||||
|
self.mountpoint = mountpoint
|
||||||
|
self.filesystem = filesystem
|
||||||
|
|
||||||
|
if not options:
|
||||||
|
options = "defaults"
|
||||||
|
|
||||||
|
self.options = options
|
||||||
|
self.d = int(d)
|
||||||
|
self.p = int(p)
|
||||||
|
|
||||||
|
def __eq__(self, o):
|
||||||
|
return str(self) == str(o)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "{} {} {} {} {} {}".format(self.device,
|
||||||
|
self.mountpoint,
|
||||||
|
self.filesystem,
|
||||||
|
self.options,
|
||||||
|
self.d,
|
||||||
|
self.p)
|
||||||
|
|
||||||
|
DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
|
||||||
|
|
||||||
|
def __init__(self, path=None):
|
||||||
|
if path:
|
||||||
|
self._path = path
|
||||||
|
else:
|
||||||
|
self._path = self.DEFAULT_PATH
|
||||||
|
super(Fstab, self).__init__(self._path, 'rb+')
|
||||||
|
|
||||||
|
def _hydrate_entry(self, line):
|
||||||
|
# NOTE: use split with no arguments to split on any
|
||||||
|
# whitespace including tabs
|
||||||
|
return Fstab.Entry(*filter(
|
||||||
|
lambda x: x not in ('', None),
|
||||||
|
line.strip("\n").split()))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def entries(self):
|
||||||
|
self.seek(0)
|
||||||
|
for line in self.readlines():
|
||||||
|
line = line.decode('us-ascii')
|
||||||
|
try:
|
||||||
|
if line.strip() and not line.strip().startswith("#"):
|
||||||
|
yield self._hydrate_entry(line)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_entry_by_attr(self, attr, value):
|
||||||
|
for entry in self.entries:
|
||||||
|
e_attr = getattr(entry, attr)
|
||||||
|
if e_attr == value:
|
||||||
|
return entry
|
||||||
|
return None
|
||||||
|
|
||||||
|
def add_entry(self, entry):
|
||||||
|
if self.get_entry_by_attr('device', entry.device):
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.write((str(entry) + '\n').encode('us-ascii'))
|
||||||
|
self.truncate()
|
||||||
|
return entry
|
||||||
|
|
||||||
|
def remove_entry(self, entry):
|
||||||
|
self.seek(0)
|
||||||
|
|
||||||
|
lines = [l.decode('us-ascii') for l in self.readlines()]
|
||||||
|
|
||||||
|
found = False
|
||||||
|
for index, line in enumerate(lines):
|
||||||
|
if line.strip() and not line.strip().startswith("#"):
|
||||||
|
if self._hydrate_entry(line) == entry:
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if not found:
|
||||||
|
return False
|
||||||
|
|
||||||
|
lines.remove(line)
|
||||||
|
|
||||||
|
self.seek(0)
|
||||||
|
self.write(''.join(lines).encode('us-ascii'))
|
||||||
|
self.truncate()
|
||||||
|
return True
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def remove_by_mountpoint(cls, mountpoint, path=None):
|
||||||
|
fstab = cls(path=path)
|
||||||
|
entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
|
||||||
|
if entry:
|
||||||
|
return fstab.remove_entry(entry)
|
||||||
|
return False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def add(cls, device, mountpoint, filesystem, options=None, path=None):
|
||||||
|
return cls(path=path).add_entry(Fstab.Entry(device,
|
||||||
|
mountpoint, filesystem,
|
||||||
|
options=options))
|
1270
tests/charmhelpers/core/hookenv.py
Normal file
1270
tests/charmhelpers/core/hookenv.py
Normal file
File diff suppressed because it is too large
Load Diff
1028
tests/charmhelpers/core/host.py
Normal file
1028
tests/charmhelpers/core/host.py
Normal file
File diff suppressed because it is too large
Load Diff
0
tests/charmhelpers/core/host_factory/__init__.py
Normal file
0
tests/charmhelpers/core/host_factory/__init__.py
Normal file
72
tests/charmhelpers/core/host_factory/centos.py
Normal file
72
tests/charmhelpers/core/host_factory/centos.py
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
import subprocess
|
||||||
|
import yum
|
||||||
|
import os
|
||||||
|
|
||||||
|
from charmhelpers.core.strutils import BasicStringComparator
|
||||||
|
|
||||||
|
|
||||||
|
class CompareHostReleases(BasicStringComparator):
|
||||||
|
"""Provide comparisons of Host releases.
|
||||||
|
|
||||||
|
Use in the form of
|
||||||
|
|
||||||
|
if CompareHostReleases(release) > 'trusty':
|
||||||
|
# do something with mitaka
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, item):
|
||||||
|
raise NotImplementedError(
|
||||||
|
"CompareHostReleases() is not implemented for CentOS")
|
||||||
|
|
||||||
|
|
||||||
|
def service_available(service_name):
|
||||||
|
# """Determine whether a system service is available."""
|
||||||
|
if os.path.isdir('/run/systemd/system'):
|
||||||
|
cmd = ['systemctl', 'is-enabled', service_name]
|
||||||
|
else:
|
||||||
|
cmd = ['service', service_name, 'is-enabled']
|
||||||
|
return subprocess.call(cmd) == 0
|
||||||
|
|
||||||
|
|
||||||
|
def add_new_group(group_name, system_group=False, gid=None):
|
||||||
|
cmd = ['groupadd']
|
||||||
|
if gid:
|
||||||
|
cmd.extend(['--gid', str(gid)])
|
||||||
|
if system_group:
|
||||||
|
cmd.append('-r')
|
||||||
|
cmd.append(group_name)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def lsb_release():
|
||||||
|
"""Return /etc/os-release in a dict."""
|
||||||
|
d = {}
|
||||||
|
with open('/etc/os-release', 'r') as lsb:
|
||||||
|
for l in lsb:
|
||||||
|
s = l.split('=')
|
||||||
|
if len(s) != 2:
|
||||||
|
continue
|
||||||
|
d[s[0].strip()] = s[1].strip()
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||||
|
"""Compare supplied revno with the revno of the installed package.
|
||||||
|
|
||||||
|
* 1 => Installed revno is greater than supplied arg
|
||||||
|
* 0 => Installed revno is the same as supplied arg
|
||||||
|
* -1 => Installed revno is less than supplied arg
|
||||||
|
|
||||||
|
This function imports YumBase function if the pkgcache argument
|
||||||
|
is None.
|
||||||
|
"""
|
||||||
|
if not pkgcache:
|
||||||
|
y = yum.YumBase()
|
||||||
|
packages = y.doPackageLists()
|
||||||
|
pkgcache = {i.Name: i.version for i in packages['installed']}
|
||||||
|
pkg = pkgcache[package]
|
||||||
|
if pkg > revno:
|
||||||
|
return 1
|
||||||
|
if pkg < revno:
|
||||||
|
return -1
|
||||||
|
return 0
|
90
tests/charmhelpers/core/host_factory/ubuntu.py
Normal file
90
tests/charmhelpers/core/host_factory/ubuntu.py
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
import subprocess
|
||||||
|
|
||||||
|
from charmhelpers.core.strutils import BasicStringComparator
|
||||||
|
|
||||||
|
|
||||||
|
UBUNTU_RELEASES = (
|
||||||
|
'lucid',
|
||||||
|
'maverick',
|
||||||
|
'natty',
|
||||||
|
'oneiric',
|
||||||
|
'precise',
|
||||||
|
'quantal',
|
||||||
|
'raring',
|
||||||
|
'saucy',
|
||||||
|
'trusty',
|
||||||
|
'utopic',
|
||||||
|
'vivid',
|
||||||
|
'wily',
|
||||||
|
'xenial',
|
||||||
|
'yakkety',
|
||||||
|
'zesty',
|
||||||
|
'artful',
|
||||||
|
'bionic',
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CompareHostReleases(BasicStringComparator):
|
||||||
|
"""Provide comparisons of Ubuntu releases.
|
||||||
|
|
||||||
|
Use in the form of
|
||||||
|
|
||||||
|
if CompareHostReleases(release) > 'trusty':
|
||||||
|
# do something with mitaka
|
||||||
|
"""
|
||||||
|
_list = UBUNTU_RELEASES
|
||||||
|
|
||||||
|
|
||||||
|
def service_available(service_name):
|
||||||
|
"""Determine whether a system service is available"""
|
||||||
|
try:
|
||||||
|
subprocess.check_output(
|
||||||
|
['service', service_name, 'status'],
|
||||||
|
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
return b'unrecognized service' not in e.output
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def add_new_group(group_name, system_group=False, gid=None):
|
||||||
|
cmd = ['addgroup']
|
||||||
|
if gid:
|
||||||
|
cmd.extend(['--gid', str(gid)])
|
||||||
|
if system_group:
|
||||||
|
cmd.append('--system')
|
||||||
|
else:
|
||||||
|
cmd.extend([
|
||||||
|
'--group',
|
||||||
|
])
|
||||||
|
cmd.append(group_name)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def lsb_release():
|
||||||
|
"""Return /etc/lsb-release in a dict"""
|
||||||
|
d = {}
|
||||||
|
with open('/etc/lsb-release', 'r') as lsb:
|
||||||
|
for l in lsb:
|
||||||
|
k, v = l.split('=')
|
||||||
|
d[k.strip()] = v.strip()
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||||
|
"""Compare supplied revno with the revno of the installed package.
|
||||||
|
|
||||||
|
* 1 => Installed revno is greater than supplied arg
|
||||||
|
* 0 => Installed revno is the same as supplied arg
|
||||||
|
* -1 => Installed revno is less than supplied arg
|
||||||
|
|
||||||
|
This function imports apt_cache function from charmhelpers.fetch if
|
||||||
|
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
|
||||||
|
you call this function, or pass an apt_pkg.Cache() instance.
|
||||||
|
"""
|
||||||
|
import apt_pkg
|
||||||
|
if not pkgcache:
|
||||||
|
from charmhelpers.fetch import apt_cache
|
||||||
|
pkgcache = apt_cache()
|
||||||
|
pkg = pkgcache[package]
|
||||||
|
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
69
tests/charmhelpers/core/hugepage.py
Normal file
69
tests/charmhelpers/core/hugepage.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
from charmhelpers.core import fstab
|
||||||
|
from charmhelpers.core import sysctl
|
||||||
|
from charmhelpers.core.host import (
|
||||||
|
add_group,
|
||||||
|
add_user_to_group,
|
||||||
|
fstab_mount,
|
||||||
|
mkdir,
|
||||||
|
)
|
||||||
|
from charmhelpers.core.strutils import bytes_from_string
|
||||||
|
from subprocess import check_output
|
||||||
|
|
||||||
|
|
||||||
|
def hugepage_support(user, group='hugetlb', nr_hugepages=256,
|
||||||
|
max_map_count=65536, mnt_point='/run/hugepages/kvm',
|
||||||
|
pagesize='2MB', mount=True, set_shmmax=False):
|
||||||
|
"""Enable hugepages on system.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user (str) -- Username to allow access to hugepages to
|
||||||
|
group (str) -- Group name to own hugepages
|
||||||
|
nr_hugepages (int) -- Number of pages to reserve
|
||||||
|
max_map_count (int) -- Number of Virtual Memory Areas a process can own
|
||||||
|
mnt_point (str) -- Directory to mount hugepages on
|
||||||
|
pagesize (str) -- Size of hugepages
|
||||||
|
mount (bool) -- Whether to Mount hugepages
|
||||||
|
"""
|
||||||
|
group_info = add_group(group)
|
||||||
|
gid = group_info.gr_gid
|
||||||
|
add_user_to_group(user, group)
|
||||||
|
if max_map_count < 2 * nr_hugepages:
|
||||||
|
max_map_count = 2 * nr_hugepages
|
||||||
|
sysctl_settings = {
|
||||||
|
'vm.nr_hugepages': nr_hugepages,
|
||||||
|
'vm.max_map_count': max_map_count,
|
||||||
|
'vm.hugetlb_shm_group': gid,
|
||||||
|
}
|
||||||
|
if set_shmmax:
|
||||||
|
shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
|
||||||
|
shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
|
||||||
|
if shmmax_minsize > shmmax_current:
|
||||||
|
sysctl_settings['kernel.shmmax'] = shmmax_minsize
|
||||||
|
sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
|
||||||
|
mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
|
||||||
|
lfstab = fstab.Fstab()
|
||||||
|
fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
|
||||||
|
if fstab_entry:
|
||||||
|
lfstab.remove_entry(fstab_entry)
|
||||||
|
entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
|
||||||
|
'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
|
||||||
|
lfstab.add_entry(entry)
|
||||||
|
if mount:
|
||||||
|
fstab_mount(mnt_point)
|
72
tests/charmhelpers/core/kernel.py
Normal file
72
tests/charmhelpers/core/kernel.py
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from charmhelpers.osplatform import get_platform
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
INFO
|
||||||
|
)
|
||||||
|
|
||||||
|
__platform__ = get_platform()
|
||||||
|
if __platform__ == "ubuntu":
|
||||||
|
from charmhelpers.core.kernel_factory.ubuntu import (
|
||||||
|
persistent_modprobe,
|
||||||
|
update_initramfs,
|
||||||
|
) # flake8: noqa -- ignore F401 for this import
|
||||||
|
elif __platform__ == "centos":
|
||||||
|
from charmhelpers.core.kernel_factory.centos import (
|
||||||
|
persistent_modprobe,
|
||||||
|
update_initramfs,
|
||||||
|
) # flake8: noqa -- ignore F401 for this import
|
||||||
|
|
||||||
|
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
||||||
|
|
||||||
|
|
||||||
|
def modprobe(module, persist=True):
|
||||||
|
"""Load a kernel module and configure for auto-load on reboot."""
|
||||||
|
cmd = ['modprobe', module]
|
||||||
|
|
||||||
|
log('Loading kernel module %s' % module, level=INFO)
|
||||||
|
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
if persist:
|
||||||
|
persistent_modprobe(module)
|
||||||
|
|
||||||
|
|
||||||
|
def rmmod(module, force=False):
|
||||||
|
"""Remove a module from the linux kernel"""
|
||||||
|
cmd = ['rmmod']
|
||||||
|
if force:
|
||||||
|
cmd.append('-f')
|
||||||
|
cmd.append(module)
|
||||||
|
log('Removing kernel module %s' % module, level=INFO)
|
||||||
|
return subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def lsmod():
|
||||||
|
"""Shows what kernel modules are currently loaded"""
|
||||||
|
return subprocess.check_output(['lsmod'],
|
||||||
|
universal_newlines=True)
|
||||||
|
|
||||||
|
|
||||||
|
def is_module_loaded(module):
|
||||||
|
"""Checks if a kernel module is already loaded"""
|
||||||
|
matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
|
||||||
|
return len(matches) > 0
|
0
tests/charmhelpers/core/kernel_factory/__init__.py
Normal file
0
tests/charmhelpers/core/kernel_factory/__init__.py
Normal file
17
tests/charmhelpers/core/kernel_factory/centos.py
Normal file
17
tests/charmhelpers/core/kernel_factory/centos.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def persistent_modprobe(module):
|
||||||
|
"""Load a kernel module and configure for auto-load on reboot."""
|
||||||
|
if not os.path.exists('/etc/rc.modules'):
|
||||||
|
open('/etc/rc.modules', 'a')
|
||||||
|
os.chmod('/etc/rc.modules', 111)
|
||||||
|
with open('/etc/rc.modules', 'r+') as modules:
|
||||||
|
if module not in modules.read():
|
||||||
|
modules.write('modprobe %s\n' % module)
|
||||||
|
|
||||||
|
|
||||||
|
def update_initramfs(version='all'):
|
||||||
|
"""Updates an initramfs image."""
|
||||||
|
return subprocess.check_call(["dracut", "-f", version])
|
13
tests/charmhelpers/core/kernel_factory/ubuntu.py
Normal file
13
tests/charmhelpers/core/kernel_factory/ubuntu.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
|
def persistent_modprobe(module):
|
||||||
|
"""Load a kernel module and configure for auto-load on reboot."""
|
||||||
|
with open('/etc/modules', 'r+') as modules:
|
||||||
|
if module not in modules.read():
|
||||||
|
modules.write(module + "\n")
|
||||||
|
|
||||||
|
|
||||||
|
def update_initramfs(version='all'):
|
||||||
|
"""Updates an initramfs image."""
|
||||||
|
return subprocess.check_call(["update-initramfs", "-k", version, "-u"])
|
16
tests/charmhelpers/core/services/__init__.py
Normal file
16
tests/charmhelpers/core/services/__init__.py
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from .base import * # NOQA
|
||||||
|
from .helpers import * # NOQA
|
360
tests/charmhelpers/core/services/base.py
Normal file
360
tests/charmhelpers/core/services/base.py
Normal file
@ -0,0 +1,360 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
from inspect import getargspec
|
||||||
|
from collections import Iterable, OrderedDict
|
||||||
|
|
||||||
|
from charmhelpers.core import host
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['ServiceManager', 'ManagerCallback',
|
||||||
|
'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
|
||||||
|
'service_restart', 'service_stop']
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceManager(object):
|
||||||
|
def __init__(self, services=None):
|
||||||
|
"""
|
||||||
|
Register a list of services, given their definitions.
|
||||||
|
|
||||||
|
Service definitions are dicts in the following formats (all keys except
|
||||||
|
'service' are optional)::
|
||||||
|
|
||||||
|
{
|
||||||
|
"service": <service name>,
|
||||||
|
"required_data": <list of required data contexts>,
|
||||||
|
"provided_data": <list of provided data contexts>,
|
||||||
|
"data_ready": <one or more callbacks>,
|
||||||
|
"data_lost": <one or more callbacks>,
|
||||||
|
"start": <one or more callbacks>,
|
||||||
|
"stop": <one or more callbacks>,
|
||||||
|
"ports": <list of ports to manage>,
|
||||||
|
}
|
||||||
|
|
||||||
|
The 'required_data' list should contain dicts of required data (or
|
||||||
|
dependency managers that act like dicts and know how to collect the data).
|
||||||
|
Only when all items in the 'required_data' list are populated are the list
|
||||||
|
of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
|
||||||
|
information.
|
||||||
|
|
||||||
|
The 'provided_data' list should contain relation data providers, most likely
|
||||||
|
a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
|
||||||
|
that will indicate a set of data to set on a given relation.
|
||||||
|
|
||||||
|
The 'data_ready' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when all items in 'required_data' pass `is_ready()`.
|
||||||
|
Each callback will be called with the service name as the only parameter.
|
||||||
|
After all of the 'data_ready' callbacks are called, the 'start' callbacks
|
||||||
|
are fired.
|
||||||
|
|
||||||
|
The 'data_lost' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when a 'required_data' item no longer passes
|
||||||
|
`is_ready()`. Each callback will be called with the service name as the
|
||||||
|
only parameter. After all of the 'data_lost' callbacks are called,
|
||||||
|
the 'stop' callbacks are fired.
|
||||||
|
|
||||||
|
The 'start' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when starting the service, after the 'data_ready'
|
||||||
|
callbacks are complete. Each callback will be called with the service
|
||||||
|
name as the only parameter. This defaults to
|
||||||
|
`[host.service_start, services.open_ports]`.
|
||||||
|
|
||||||
|
The 'stop' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when stopping the service. If the service is
|
||||||
|
being stopped because it no longer has all of its 'required_data', this
|
||||||
|
will be called after all of the 'data_lost' callbacks are complete.
|
||||||
|
Each callback will be called with the service name as the only parameter.
|
||||||
|
This defaults to `[services.close_ports, host.service_stop]`.
|
||||||
|
|
||||||
|
The 'ports' value should be a list of ports to manage. The default
|
||||||
|
'start' handler will open the ports after the service is started,
|
||||||
|
and the default 'stop' handler will close the ports prior to stopping
|
||||||
|
the service.
|
||||||
|
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
The following registers an Upstart service called bingod that depends on
|
||||||
|
a mongodb relation and which runs a custom `db_migrate` function prior to
|
||||||
|
restarting the service, and a Runit service called spadesd::
|
||||||
|
|
||||||
|
manager = services.ServiceManager([
|
||||||
|
{
|
||||||
|
'service': 'bingod',
|
||||||
|
'ports': [80, 443],
|
||||||
|
'required_data': [MongoRelation(), config(), {'my': 'data'}],
|
||||||
|
'data_ready': [
|
||||||
|
services.template(source='bingod.conf'),
|
||||||
|
services.template(source='bingod.ini',
|
||||||
|
target='/etc/bingod.ini',
|
||||||
|
owner='bingo', perms=0400),
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'service': 'spadesd',
|
||||||
|
'data_ready': services.template(source='spadesd_run.j2',
|
||||||
|
target='/etc/sv/spadesd/run',
|
||||||
|
perms=0555),
|
||||||
|
'start': runit_start,
|
||||||
|
'stop': runit_stop,
|
||||||
|
},
|
||||||
|
])
|
||||||
|
manager.manage()
|
||||||
|
"""
|
||||||
|
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
|
||||||
|
self._ready = None
|
||||||
|
self.services = OrderedDict()
|
||||||
|
for service in services or []:
|
||||||
|
service_name = service['service']
|
||||||
|
self.services[service_name] = service
|
||||||
|
|
||||||
|
def manage(self):
|
||||||
|
"""
|
||||||
|
Handle the current hook by doing The Right Thing with the registered services.
|
||||||
|
"""
|
||||||
|
hookenv._run_atstart()
|
||||||
|
try:
|
||||||
|
hook_name = hookenv.hook_name()
|
||||||
|
if hook_name == 'stop':
|
||||||
|
self.stop_services()
|
||||||
|
else:
|
||||||
|
self.reconfigure_services()
|
||||||
|
self.provide_data()
|
||||||
|
except SystemExit as x:
|
||||||
|
if x.code is None or x.code == 0:
|
||||||
|
hookenv._run_atexit()
|
||||||
|
hookenv._run_atexit()
|
||||||
|
|
||||||
|
def provide_data(self):
|
||||||
|
"""
|
||||||
|
Set the relation data for each provider in the ``provided_data`` list.
|
||||||
|
|
||||||
|
A provider must have a `name` attribute, which indicates which relation
|
||||||
|
to set data on, and a `provide_data()` method, which returns a dict of
|
||||||
|
data to set.
|
||||||
|
|
||||||
|
The `provide_data()` method can optionally accept two parameters:
|
||||||
|
|
||||||
|
* ``remote_service`` The name of the remote service that the data will
|
||||||
|
be provided to. The `provide_data()` method will be called once
|
||||||
|
for each connected service (not unit). This allows the method to
|
||||||
|
tailor its data to the given service.
|
||||||
|
* ``service_ready`` Whether or not the service definition had all of
|
||||||
|
its requirements met, and thus the ``data_ready`` callbacks run.
|
||||||
|
|
||||||
|
Note that the ``provided_data`` methods are now called **after** the
|
||||||
|
``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
|
||||||
|
a chance to generate any data necessary for the providing to the remote
|
||||||
|
services.
|
||||||
|
"""
|
||||||
|
for service_name, service in self.services.items():
|
||||||
|
service_ready = self.is_ready(service_name)
|
||||||
|
for provider in service.get('provided_data', []):
|
||||||
|
for relid in hookenv.relation_ids(provider.name):
|
||||||
|
units = hookenv.related_units(relid)
|
||||||
|
if not units:
|
||||||
|
continue
|
||||||
|
remote_service = units[0].split('/')[0]
|
||||||
|
argspec = getargspec(provider.provide_data)
|
||||||
|
if len(argspec.args) > 1:
|
||||||
|
data = provider.provide_data(remote_service, service_ready)
|
||||||
|
else:
|
||||||
|
data = provider.provide_data()
|
||||||
|
if data:
|
||||||
|
hookenv.relation_set(relid, data)
|
||||||
|
|
||||||
|
def reconfigure_services(self, *service_names):
|
||||||
|
"""
|
||||||
|
Update all files for one or more registered services, and,
|
||||||
|
if ready, optionally restart them.
|
||||||
|
|
||||||
|
If no service names are given, reconfigures all registered services.
|
||||||
|
"""
|
||||||
|
for service_name in service_names or self.services.keys():
|
||||||
|
if self.is_ready(service_name):
|
||||||
|
self.fire_event('data_ready', service_name)
|
||||||
|
self.fire_event('start', service_name, default=[
|
||||||
|
service_restart,
|
||||||
|
manage_ports])
|
||||||
|
self.save_ready(service_name)
|
||||||
|
else:
|
||||||
|
if self.was_ready(service_name):
|
||||||
|
self.fire_event('data_lost', service_name)
|
||||||
|
self.fire_event('stop', service_name, default=[
|
||||||
|
manage_ports,
|
||||||
|
service_stop])
|
||||||
|
self.save_lost(service_name)
|
||||||
|
|
||||||
|
def stop_services(self, *service_names):
|
||||||
|
"""
|
||||||
|
Stop one or more registered services, by name.
|
||||||
|
|
||||||
|
If no service names are given, stops all registered services.
|
||||||
|
"""
|
||||||
|
for service_name in service_names or self.services.keys():
|
||||||
|
self.fire_event('stop', service_name, default=[
|
||||||
|
manage_ports,
|
||||||
|
service_stop])
|
||||||
|
|
||||||
|
def get_service(self, service_name):
|
||||||
|
"""
|
||||||
|
Given the name of a registered service, return its service definition.
|
||||||
|
"""
|
||||||
|
service = self.services.get(service_name)
|
||||||
|
if not service:
|
||||||
|
raise KeyError('Service not registered: %s' % service_name)
|
||||||
|
return service
|
||||||
|
|
||||||
|
def fire_event(self, event_name, service_name, default=None):
|
||||||
|
"""
|
||||||
|
Fire a data_ready, data_lost, start, or stop event on a given service.
|
||||||
|
"""
|
||||||
|
service = self.get_service(service_name)
|
||||||
|
callbacks = service.get(event_name, default)
|
||||||
|
if not callbacks:
|
||||||
|
return
|
||||||
|
if not isinstance(callbacks, Iterable):
|
||||||
|
callbacks = [callbacks]
|
||||||
|
for callback in callbacks:
|
||||||
|
if isinstance(callback, ManagerCallback):
|
||||||
|
callback(self, service_name, event_name)
|
||||||
|
else:
|
||||||
|
callback(service_name)
|
||||||
|
|
||||||
|
def is_ready(self, service_name):
|
||||||
|
"""
|
||||||
|
Determine if a registered service is ready, by checking its 'required_data'.
|
||||||
|
|
||||||
|
A 'required_data' item can be any mapping type, and is considered ready
|
||||||
|
if `bool(item)` evaluates as True.
|
||||||
|
"""
|
||||||
|
service = self.get_service(service_name)
|
||||||
|
reqs = service.get('required_data', [])
|
||||||
|
return all(bool(req) for req in reqs)
|
||||||
|
|
||||||
|
def _load_ready_file(self):
|
||||||
|
if self._ready is not None:
|
||||||
|
return
|
||||||
|
if os.path.exists(self._ready_file):
|
||||||
|
with open(self._ready_file) as fp:
|
||||||
|
self._ready = set(json.load(fp))
|
||||||
|
else:
|
||||||
|
self._ready = set()
|
||||||
|
|
||||||
|
def _save_ready_file(self):
|
||||||
|
if self._ready is None:
|
||||||
|
return
|
||||||
|
with open(self._ready_file, 'w') as fp:
|
||||||
|
json.dump(list(self._ready), fp)
|
||||||
|
|
||||||
|
def save_ready(self, service_name):
|
||||||
|
"""
|
||||||
|
Save an indicator that the given service is now data_ready.
|
||||||
|
"""
|
||||||
|
self._load_ready_file()
|
||||||
|
self._ready.add(service_name)
|
||||||
|
self._save_ready_file()
|
||||||
|
|
||||||
|
def save_lost(self, service_name):
|
||||||
|
"""
|
||||||
|
Save an indicator that the given service is no longer data_ready.
|
||||||
|
"""
|
||||||
|
self._load_ready_file()
|
||||||
|
self._ready.discard(service_name)
|
||||||
|
self._save_ready_file()
|
||||||
|
|
||||||
|
def was_ready(self, service_name):
|
||||||
|
"""
|
||||||
|
Determine if the given service was previously data_ready.
|
||||||
|
"""
|
||||||
|
self._load_ready_file()
|
||||||
|
return service_name in self._ready
|
||||||
|
|
||||||
|
|
||||||
|
class ManagerCallback(object):
|
||||||
|
"""
|
||||||
|
Special case of a callback that takes the `ServiceManager` instance
|
||||||
|
in addition to the service name.
|
||||||
|
|
||||||
|
Subclasses should implement `__call__` which should accept three parameters:
|
||||||
|
|
||||||
|
* `manager` The `ServiceManager` instance
|
||||||
|
* `service_name` The name of the service it's being triggered for
|
||||||
|
* `event_name` The name of the event that this callback is handling
|
||||||
|
"""
|
||||||
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
class PortManagerCallback(ManagerCallback):
|
||||||
|
"""
|
||||||
|
Callback class that will open or close ports, for use as either
|
||||||
|
a start or stop action.
|
||||||
|
"""
|
||||||
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
service = manager.get_service(service_name)
|
||||||
|
new_ports = service.get('ports', [])
|
||||||
|
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
|
||||||
|
if os.path.exists(port_file):
|
||||||
|
with open(port_file) as fp:
|
||||||
|
old_ports = fp.read().split(',')
|
||||||
|
for old_port in old_ports:
|
||||||
|
if bool(old_port) and not self.ports_contains(old_port, new_ports):
|
||||||
|
hookenv.close_port(old_port)
|
||||||
|
with open(port_file, 'w') as fp:
|
||||||
|
fp.write(','.join(str(port) for port in new_ports))
|
||||||
|
for port in new_ports:
|
||||||
|
# A port is either a number or 'ICMP'
|
||||||
|
protocol = 'TCP'
|
||||||
|
if str(port).upper() == 'ICMP':
|
||||||
|
protocol = 'ICMP'
|
||||||
|
if event_name == 'start':
|
||||||
|
hookenv.open_port(port, protocol)
|
||||||
|
elif event_name == 'stop':
|
||||||
|
hookenv.close_port(port, protocol)
|
||||||
|
|
||||||
|
def ports_contains(self, port, ports):
|
||||||
|
if not bool(port):
|
||||||
|
return False
|
||||||
|
if str(port).upper() != 'ICMP':
|
||||||
|
port = int(port)
|
||||||
|
return port in ports
|
||||||
|
|
||||||
|
|
||||||
|
def service_stop(service_name):
|
||||||
|
"""
|
||||||
|
Wrapper around host.service_stop to prevent spurious "unknown service"
|
||||||
|
messages in the logs.
|
||||||
|
"""
|
||||||
|
if host.service_running(service_name):
|
||||||
|
host.service_stop(service_name)
|
||||||
|
|
||||||
|
|
||||||
|
def service_restart(service_name):
|
||||||
|
"""
|
||||||
|
Wrapper around host.service_restart to prevent spurious "unknown service"
|
||||||
|
messages in the logs.
|
||||||
|
"""
|
||||||
|
if host.service_available(service_name):
|
||||||
|
if host.service_running(service_name):
|
||||||
|
host.service_restart(service_name)
|
||||||
|
else:
|
||||||
|
host.service_start(service_name)
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience aliases
|
||||||
|
open_ports = close_ports = manage_ports = PortManagerCallback()
|
290
tests/charmhelpers/core/services/helpers.py
Normal file
290
tests/charmhelpers/core/services/helpers.py
Normal file
@ -0,0 +1,290 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
from charmhelpers.core import host
|
||||||
|
from charmhelpers.core import templating
|
||||||
|
|
||||||
|
from charmhelpers.core.services.base import ManagerCallback
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['RelationContext', 'TemplateCallback',
|
||||||
|
'render_template', 'template']
|
||||||
|
|
||||||
|
|
||||||
|
class RelationContext(dict):
|
||||||
|
"""
|
||||||
|
Base class for a context generator that gets relation data from juju.
|
||||||
|
|
||||||
|
Subclasses must provide the attributes `name`, which is the name of the
|
||||||
|
interface of interest, `interface`, which is the type of the interface of
|
||||||
|
interest, and `required_keys`, which is the set of keys required for the
|
||||||
|
relation to be considered complete. The data for all interfaces matching
|
||||||
|
the `name` attribute that are complete will used to populate the dictionary
|
||||||
|
values (see `get_data`, below).
|
||||||
|
|
||||||
|
The generated context will be namespaced under the relation :attr:`name`,
|
||||||
|
to prevent potential naming conflicts.
|
||||||
|
|
||||||
|
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||||
|
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||||
|
"""
|
||||||
|
name = None
|
||||||
|
interface = None
|
||||||
|
|
||||||
|
def __init__(self, name=None, additional_required_keys=None):
|
||||||
|
if not hasattr(self, 'required_keys'):
|
||||||
|
self.required_keys = []
|
||||||
|
|
||||||
|
if name is not None:
|
||||||
|
self.name = name
|
||||||
|
if additional_required_keys:
|
||||||
|
self.required_keys.extend(additional_required_keys)
|
||||||
|
self.get_data()
|
||||||
|
|
||||||
|
def __bool__(self):
|
||||||
|
"""
|
||||||
|
Returns True if all of the required_keys are available.
|
||||||
|
"""
|
||||||
|
return self.is_ready()
|
||||||
|
|
||||||
|
__nonzero__ = __bool__
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return super(RelationContext, self).__repr__()
|
||||||
|
|
||||||
|
def is_ready(self):
|
||||||
|
"""
|
||||||
|
Returns True if all of the `required_keys` are available from any units.
|
||||||
|
"""
|
||||||
|
ready = len(self.get(self.name, [])) > 0
|
||||||
|
if not ready:
|
||||||
|
hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
|
||||||
|
return ready
|
||||||
|
|
||||||
|
def _is_ready(self, unit_data):
|
||||||
|
"""
|
||||||
|
Helper method that tests a set of relation data and returns True if
|
||||||
|
all of the `required_keys` are present.
|
||||||
|
"""
|
||||||
|
return set(unit_data.keys()).issuperset(set(self.required_keys))
|
||||||
|
|
||||||
|
def get_data(self):
|
||||||
|
"""
|
||||||
|
Retrieve the relation data for each unit involved in a relation and,
|
||||||
|
if complete, store it in a list under `self[self.name]`. This
|
||||||
|
is automatically called when the RelationContext is instantiated.
|
||||||
|
|
||||||
|
The units are sorted lexographically first by the service ID, then by
|
||||||
|
the unit ID. Thus, if an interface has two other services, 'db:1'
|
||||||
|
and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
|
||||||
|
and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
|
||||||
|
set of data, the relation data for the units will be stored in the
|
||||||
|
order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
|
||||||
|
|
||||||
|
If you only care about a single unit on the relation, you can just
|
||||||
|
access it as `{{ interface[0]['key'] }}`. However, if you can at all
|
||||||
|
support multiple units on a relation, you should iterate over the list,
|
||||||
|
like::
|
||||||
|
|
||||||
|
{% for unit in interface -%}
|
||||||
|
{{ unit['key'] }}{% if not loop.last %},{% endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
|
||||||
|
Note that since all sets of relation data from all related services and
|
||||||
|
units are in a single list, if you need to know which service or unit a
|
||||||
|
set of data came from, you'll need to extend this class to preserve
|
||||||
|
that information.
|
||||||
|
"""
|
||||||
|
if not hookenv.relation_ids(self.name):
|
||||||
|
return
|
||||||
|
|
||||||
|
ns = self.setdefault(self.name, [])
|
||||||
|
for rid in sorted(hookenv.relation_ids(self.name)):
|
||||||
|
for unit in sorted(hookenv.related_units(rid)):
|
||||||
|
reldata = hookenv.relation_get(rid=rid, unit=unit)
|
||||||
|
if self._is_ready(reldata):
|
||||||
|
ns.append(reldata)
|
||||||
|
|
||||||
|
def provide_data(self):
|
||||||
|
"""
|
||||||
|
Return data to be relation_set for this interface.
|
||||||
|
"""
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class MysqlRelation(RelationContext):
|
||||||
|
"""
|
||||||
|
Relation context for the `mysql` interface.
|
||||||
|
|
||||||
|
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||||
|
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||||
|
"""
|
||||||
|
name = 'db'
|
||||||
|
interface = 'mysql'
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.required_keys = ['host', 'user', 'password', 'database']
|
||||||
|
RelationContext.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class HttpRelation(RelationContext):
|
||||||
|
"""
|
||||||
|
Relation context for the `http` interface.
|
||||||
|
|
||||||
|
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||||
|
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||||
|
"""
|
||||||
|
name = 'website'
|
||||||
|
interface = 'http'
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.required_keys = ['host', 'port']
|
||||||
|
RelationContext.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
|
def provide_data(self):
|
||||||
|
return {
|
||||||
|
'host': hookenv.unit_get('private-address'),
|
||||||
|
'port': 80,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class RequiredConfig(dict):
|
||||||
|
"""
|
||||||
|
Data context that loads config options with one or more mandatory options.
|
||||||
|
|
||||||
|
Once the required options have been changed from their default values, all
|
||||||
|
config options will be available, namespaced under `config` to prevent
|
||||||
|
potential naming conflicts (for example, between a config option and a
|
||||||
|
relation property).
|
||||||
|
|
||||||
|
:param list *args: List of options that must be changed from their default values.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args):
|
||||||
|
self.required_options = args
|
||||||
|
self['config'] = hookenv.config()
|
||||||
|
with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
|
||||||
|
self.config = yaml.load(fp).get('options', {})
|
||||||
|
|
||||||
|
def __bool__(self):
|
||||||
|
for option in self.required_options:
|
||||||
|
if option not in self['config']:
|
||||||
|
return False
|
||||||
|
current_value = self['config'][option]
|
||||||
|
default_value = self.config[option].get('default')
|
||||||
|
if current_value == default_value:
|
||||||
|
return False
|
||||||
|
if current_value in (None, '') and default_value in (None, ''):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def __nonzero__(self):
|
||||||
|
return self.__bool__()
|
||||||
|
|
||||||
|
|
||||||
|
class StoredContext(dict):
|
||||||
|
"""
|
||||||
|
A data context that always returns the data that it was first created with.
|
||||||
|
|
||||||
|
This is useful to do a one-time generation of things like passwords, that
|
||||||
|
will thereafter use the same value that was originally generated, instead
|
||||||
|
of generating a new value each time it is run.
|
||||||
|
"""
|
||||||
|
def __init__(self, file_name, config_data):
|
||||||
|
"""
|
||||||
|
If the file exists, populate `self` with the data from the file.
|
||||||
|
Otherwise, populate with the given data and persist it to the file.
|
||||||
|
"""
|
||||||
|
if os.path.exists(file_name):
|
||||||
|
self.update(self.read_context(file_name))
|
||||||
|
else:
|
||||||
|
self.store_context(file_name, config_data)
|
||||||
|
self.update(config_data)
|
||||||
|
|
||||||
|
def store_context(self, file_name, config_data):
|
||||||
|
if not os.path.isabs(file_name):
|
||||||
|
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||||
|
with open(file_name, 'w') as file_stream:
|
||||||
|
os.fchmod(file_stream.fileno(), 0o600)
|
||||||
|
yaml.dump(config_data, file_stream)
|
||||||
|
|
||||||
|
def read_context(self, file_name):
|
||||||
|
if not os.path.isabs(file_name):
|
||||||
|
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||||
|
with open(file_name, 'r') as file_stream:
|
||||||
|
data = yaml.load(file_stream)
|
||||||
|
if not data:
|
||||||
|
raise OSError("%s is empty" % file_name)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
class TemplateCallback(ManagerCallback):
|
||||||
|
"""
|
||||||
|
Callback class that will render a Jinja2 template, for use as a ready
|
||||||
|
action.
|
||||||
|
|
||||||
|
:param str source: The template source file, relative to
|
||||||
|
`$CHARM_DIR/templates`
|
||||||
|
|
||||||
|
:param str target: The target to write the rendered template to (or None)
|
||||||
|
:param str owner: The owner of the rendered file
|
||||||
|
:param str group: The group of the rendered file
|
||||||
|
:param int perms: The permissions of the rendered file
|
||||||
|
:param partial on_change_action: functools partial to be executed when
|
||||||
|
rendered file changes
|
||||||
|
:param jinja2 loader template_loader: A jinja2 template loader
|
||||||
|
|
||||||
|
:return str: The rendered template
|
||||||
|
"""
|
||||||
|
def __init__(self, source, target,
|
||||||
|
owner='root', group='root', perms=0o444,
|
||||||
|
on_change_action=None, template_loader=None):
|
||||||
|
self.source = source
|
||||||
|
self.target = target
|
||||||
|
self.owner = owner
|
||||||
|
self.group = group
|
||||||
|
self.perms = perms
|
||||||
|
self.on_change_action = on_change_action
|
||||||
|
self.template_loader = template_loader
|
||||||
|
|
||||||
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
pre_checksum = ''
|
||||||
|
if self.on_change_action and os.path.isfile(self.target):
|
||||||
|
pre_checksum = host.file_hash(self.target)
|
||||||
|
service = manager.get_service(service_name)
|
||||||
|
context = {'ctx': {}}
|
||||||
|
for ctx in service.get('required_data', []):
|
||||||
|
context.update(ctx)
|
||||||
|
context['ctx'].update(ctx)
|
||||||
|
|
||||||
|
result = templating.render(self.source, self.target, context,
|
||||||
|
self.owner, self.group, self.perms,
|
||||||
|
template_loader=self.template_loader)
|
||||||
|
if self.on_change_action:
|
||||||
|
if pre_checksum == host.file_hash(self.target):
|
||||||
|
hookenv.log(
|
||||||
|
'No change detected: {}'.format(self.target),
|
||||||
|
hookenv.DEBUG)
|
||||||
|
else:
|
||||||
|
self.on_change_action()
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience aliases for templates
|
||||||
|
render_template = template = TemplateCallback
|
129
tests/charmhelpers/core/strutils.py
Normal file
129
tests/charmhelpers/core/strutils.py
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import six
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
def bool_from_string(value):
|
||||||
|
"""Interpret string value as boolean.
|
||||||
|
|
||||||
|
Returns True if value translates to True otherwise False.
|
||||||
|
"""
|
||||||
|
if isinstance(value, six.string_types):
|
||||||
|
value = six.text_type(value)
|
||||||
|
else:
|
||||||
|
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
|
||||||
|
raise ValueError(msg)
|
||||||
|
|
||||||
|
value = value.strip().lower()
|
||||||
|
|
||||||
|
if value in ['y', 'yes', 'true', 't', 'on']:
|
||||||
|
return True
|
||||||
|
elif value in ['n', 'no', 'false', 'f', 'off']:
|
||||||
|
return False
|
||||||
|
|
||||||
|
msg = "Unable to interpret string value '%s' as boolean" % (value)
|
||||||
|
raise ValueError(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def bytes_from_string(value):
|
||||||
|
"""Interpret human readable string value as bytes.
|
||||||
|
|
||||||
|
Returns int
|
||||||
|
"""
|
||||||
|
BYTE_POWER = {
|
||||||
|
'K': 1,
|
||||||
|
'KB': 1,
|
||||||
|
'M': 2,
|
||||||
|
'MB': 2,
|
||||||
|
'G': 3,
|
||||||
|
'GB': 3,
|
||||||
|
'T': 4,
|
||||||
|
'TB': 4,
|
||||||
|
'P': 5,
|
||||||
|
'PB': 5,
|
||||||
|
}
|
||||||
|
if isinstance(value, six.string_types):
|
||||||
|
value = six.text_type(value)
|
||||||
|
else:
|
||||||
|
msg = "Unable to interpret non-string value '%s' as bytes" % (value)
|
||||||
|
raise ValueError(msg)
|
||||||
|
matches = re.match("([0-9]+)([a-zA-Z]+)", value)
|
||||||
|
if matches:
|
||||||
|
size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
|
||||||
|
else:
|
||||||
|
# Assume that value passed in is bytes
|
||||||
|
try:
|
||||||
|
size = int(value)
|
||||||
|
except ValueError:
|
||||||
|
msg = "Unable to interpret string value '%s' as bytes" % (value)
|
||||||
|
raise ValueError(msg)
|
||||||
|
return size
|
||||||
|
|
||||||
|
|
||||||
|
class BasicStringComparator(object):
|
||||||
|
"""Provides a class that will compare strings from an iterator type object.
|
||||||
|
Used to provide > and < comparisons on strings that may not necessarily be
|
||||||
|
alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the
|
||||||
|
z-wrap.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_list = None
|
||||||
|
|
||||||
|
def __init__(self, item):
|
||||||
|
if self._list is None:
|
||||||
|
raise Exception("Must define the _list in the class definition!")
|
||||||
|
try:
|
||||||
|
self.index = self._list.index(item)
|
||||||
|
except Exception:
|
||||||
|
raise KeyError("Item '{}' is not in list '{}'"
|
||||||
|
.format(item, self._list))
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
assert isinstance(other, str) or isinstance(other, self.__class__)
|
||||||
|
return self.index == self._list.index(other)
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return not self.__eq__(other)
|
||||||
|
|
||||||
|
def __lt__(self, other):
|
||||||
|
assert isinstance(other, str) or isinstance(other, self.__class__)
|
||||||
|
return self.index < self._list.index(other)
|
||||||
|
|
||||||
|
def __ge__(self, other):
|
||||||
|
return not self.__lt__(other)
|
||||||
|
|
||||||
|
def __gt__(self, other):
|
||||||
|
assert isinstance(other, str) or isinstance(other, self.__class__)
|
||||||
|
return self.index > self._list.index(other)
|
||||||
|
|
||||||
|
def __le__(self, other):
|
||||||
|
return not self.__gt__(other)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""Always give back the item at the index so it can be used in
|
||||||
|
comparisons like:
|
||||||
|
|
||||||
|
s_mitaka = CompareOpenStack('mitaka')
|
||||||
|
s_newton = CompareOpenstack('newton')
|
||||||
|
|
||||||
|
assert s_newton > s_mitaka
|
||||||
|
|
||||||
|
@returns: <string>
|
||||||
|
"""
|
||||||
|
return self._list[self.index]
|
54
tests/charmhelpers/core/sysctl.py
Normal file
54
tests/charmhelpers/core/sysctl.py
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from subprocess import check_call
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
DEBUG,
|
||||||
|
ERROR,
|
||||||
|
)
|
||||||
|
|
||||||
|
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
||||||
|
|
||||||
|
|
||||||
|
def create(sysctl_dict, sysctl_file):
|
||||||
|
"""Creates a sysctl.conf file from a YAML associative array
|
||||||
|
|
||||||
|
:param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
|
||||||
|
:type sysctl_dict: str
|
||||||
|
:param sysctl_file: path to the sysctl file to be saved
|
||||||
|
:type sysctl_file: str or unicode
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
|
||||||
|
except yaml.YAMLError:
|
||||||
|
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
|
||||||
|
level=ERROR)
|
||||||
|
return
|
||||||
|
|
||||||
|
with open(sysctl_file, "w") as fd:
|
||||||
|
for key, value in sysctl_dict_parsed.items():
|
||||||
|
fd.write("{}={}\n".format(key, value))
|
||||||
|
|
||||||
|
log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
|
||||||
|
level=DEBUG)
|
||||||
|
|
||||||
|
check_call(["sysctl", "-p", sysctl_file])
|
93
tests/charmhelpers/core/templating.py
Normal file
93
tests/charmhelpers/core/templating.py
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from charmhelpers.core import host
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
|
def render(source, target, context, owner='root', group='root',
|
||||||
|
perms=0o444, templates_dir=None, encoding='UTF-8',
|
||||||
|
template_loader=None, config_template=None):
|
||||||
|
"""
|
||||||
|
Render a template.
|
||||||
|
|
||||||
|
The `source` path, if not absolute, is relative to the `templates_dir`.
|
||||||
|
|
||||||
|
The `target` path should be absolute. It can also be `None`, in which
|
||||||
|
case no file will be written.
|
||||||
|
|
||||||
|
The context should be a dict containing the values to be replaced in the
|
||||||
|
template.
|
||||||
|
|
||||||
|
config_template may be provided to render from a provided template instead
|
||||||
|
of loading from a file.
|
||||||
|
|
||||||
|
The `owner`, `group`, and `perms` options will be passed to `write_file`.
|
||||||
|
|
||||||
|
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
||||||
|
|
||||||
|
The rendered template will be written to the file as well as being returned
|
||||||
|
as a string.
|
||||||
|
|
||||||
|
Note: Using this requires python-jinja2 or python3-jinja2; if it is not
|
||||||
|
installed, calling this will attempt to use charmhelpers.fetch.apt_install
|
||||||
|
to install it.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
from charmhelpers.fetch import apt_install
|
||||||
|
except ImportError:
|
||||||
|
hookenv.log('Could not import jinja2, and could not import '
|
||||||
|
'charmhelpers.fetch to install it',
|
||||||
|
level=hookenv.ERROR)
|
||||||
|
raise
|
||||||
|
if sys.version_info.major == 2:
|
||||||
|
apt_install('python-jinja2', fatal=True)
|
||||||
|
else:
|
||||||
|
apt_install('python3-jinja2', fatal=True)
|
||||||
|
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||||
|
|
||||||
|
if template_loader:
|
||||||
|
template_env = Environment(loader=template_loader)
|
||||||
|
else:
|
||||||
|
if templates_dir is None:
|
||||||
|
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
||||||
|
template_env = Environment(loader=FileSystemLoader(templates_dir))
|
||||||
|
|
||||||
|
# load from a string if provided explicitly
|
||||||
|
if config_template is not None:
|
||||||
|
template = template_env.from_string(config_template)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
source = source
|
||||||
|
template = template_env.get_template(source)
|
||||||
|
except exceptions.TemplateNotFound as e:
|
||||||
|
hookenv.log('Could not load template %s from %s.' %
|
||||||
|
(source, templates_dir),
|
||||||
|
level=hookenv.ERROR)
|
||||||
|
raise e
|
||||||
|
content = template.render(context)
|
||||||
|
if target is not None:
|
||||||
|
target_dir = os.path.dirname(target)
|
||||||
|
if not os.path.exists(target_dir):
|
||||||
|
# This is a terrible default directory permission, as the file
|
||||||
|
# or its siblings will often contain secrets.
|
||||||
|
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
|
||||||
|
host.write_file(target, content.encode(encoding), owner, group, perms)
|
||||||
|
return content
|
520
tests/charmhelpers/core/unitdata.py
Normal file
520
tests/charmhelpers/core/unitdata.py
Normal file
@ -0,0 +1,520 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Kapil Thangavelu <kapil.foss@gmail.com>
|
||||||
|
#
|
||||||
|
"""
|
||||||
|
Intro
|
||||||
|
-----
|
||||||
|
|
||||||
|
A simple way to store state in units. This provides a key value
|
||||||
|
storage with support for versioned, transactional operation,
|
||||||
|
and can calculate deltas from previous values to simplify unit logic
|
||||||
|
when processing changes.
|
||||||
|
|
||||||
|
|
||||||
|
Hook Integration
|
||||||
|
----------------
|
||||||
|
|
||||||
|
There are several extant frameworks for hook execution, including
|
||||||
|
|
||||||
|
- charmhelpers.core.hookenv.Hooks
|
||||||
|
- charmhelpers.core.services.ServiceManager
|
||||||
|
|
||||||
|
The storage classes are framework agnostic, one simple integration is
|
||||||
|
via the HookData contextmanager. It will record the current hook
|
||||||
|
execution environment (including relation data, config data, etc.),
|
||||||
|
setup a transaction and allow easy access to the changes from
|
||||||
|
previously seen values. One consequence of the integration is the
|
||||||
|
reservation of particular keys ('rels', 'unit', 'env', 'config',
|
||||||
|
'charm_revisions') for their respective values.
|
||||||
|
|
||||||
|
Here's a fully worked integration example using hookenv.Hooks::
|
||||||
|
|
||||||
|
from charmhelper.core import hookenv, unitdata
|
||||||
|
|
||||||
|
hook_data = unitdata.HookData()
|
||||||
|
db = unitdata.kv()
|
||||||
|
hooks = hookenv.Hooks()
|
||||||
|
|
||||||
|
@hooks.hook
|
||||||
|
def config_changed():
|
||||||
|
# Print all changes to configuration from previously seen
|
||||||
|
# values.
|
||||||
|
for changed, (prev, cur) in hook_data.conf.items():
|
||||||
|
print('config changed', changed,
|
||||||
|
'previous value', prev,
|
||||||
|
'current value', cur)
|
||||||
|
|
||||||
|
# Get some unit specific bookeeping
|
||||||
|
if not db.get('pkg_key'):
|
||||||
|
key = urllib.urlopen('https://example.com/pkg_key').read()
|
||||||
|
db.set('pkg_key', key)
|
||||||
|
|
||||||
|
# Directly access all charm config as a mapping.
|
||||||
|
conf = db.getrange('config', True)
|
||||||
|
|
||||||
|
# Directly access all relation data as a mapping
|
||||||
|
rels = db.getrange('rels', True)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with hook_data():
|
||||||
|
hook.execute()
|
||||||
|
|
||||||
|
|
||||||
|
A more basic integration is via the hook_scope context manager which simply
|
||||||
|
manages transaction scope (and records hook name, and timestamp)::
|
||||||
|
|
||||||
|
>>> from unitdata import kv
|
||||||
|
>>> db = kv()
|
||||||
|
>>> with db.hook_scope('install'):
|
||||||
|
... # do work, in transactional scope.
|
||||||
|
... db.set('x', 1)
|
||||||
|
>>> db.get('x')
|
||||||
|
1
|
||||||
|
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
|
||||||
|
Values are automatically json de/serialized to preserve basic typing
|
||||||
|
and complex data struct capabilities (dicts, lists, ints, booleans, etc).
|
||||||
|
|
||||||
|
Individual values can be manipulated via get/set::
|
||||||
|
|
||||||
|
>>> kv.set('y', True)
|
||||||
|
>>> kv.get('y')
|
||||||
|
True
|
||||||
|
|
||||||
|
# We can set complex values (dicts, lists) as a single key.
|
||||||
|
>>> kv.set('config', {'a': 1, 'b': True'})
|
||||||
|
|
||||||
|
# Also supports returning dictionaries as a record which
|
||||||
|
# provides attribute access.
|
||||||
|
>>> config = kv.get('config', record=True)
|
||||||
|
>>> config.b
|
||||||
|
True
|
||||||
|
|
||||||
|
|
||||||
|
Groups of keys can be manipulated with update/getrange::
|
||||||
|
|
||||||
|
>>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
|
||||||
|
>>> kv.getrange('gui.', strip=True)
|
||||||
|
{'z': 1, 'y': 2}
|
||||||
|
|
||||||
|
When updating values, its very helpful to understand which values
|
||||||
|
have actually changed and how have they changed. The storage
|
||||||
|
provides a delta method to provide for this::
|
||||||
|
|
||||||
|
>>> data = {'debug': True, 'option': 2}
|
||||||
|
>>> delta = kv.delta(data, 'config.')
|
||||||
|
>>> delta.debug.previous
|
||||||
|
None
|
||||||
|
>>> delta.debug.current
|
||||||
|
True
|
||||||
|
>>> delta
|
||||||
|
{'debug': (None, True), 'option': (None, 2)}
|
||||||
|
|
||||||
|
Note the delta method does not persist the actual change, it needs to
|
||||||
|
be explicitly saved via 'update' method::
|
||||||
|
|
||||||
|
>>> kv.update(data, 'config.')
|
||||||
|
|
||||||
|
Values modified in the context of a hook scope retain historical values
|
||||||
|
associated to the hookname.
|
||||||
|
|
||||||
|
>>> with db.hook_scope('config-changed'):
|
||||||
|
... db.set('x', 42)
|
||||||
|
>>> db.gethistory('x')
|
||||||
|
[(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
|
||||||
|
(2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import contextlib
|
||||||
|
import datetime
|
||||||
|
import itertools
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import pprint
|
||||||
|
import sqlite3
|
||||||
|
import sys
|
||||||
|
|
||||||
|
__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
|
||||||
|
|
||||||
|
|
||||||
|
class Storage(object):
|
||||||
|
"""Simple key value database for local unit state within charms.
|
||||||
|
|
||||||
|
Modifications are not persisted unless :meth:`flush` is called.
|
||||||
|
|
||||||
|
To support dicts, lists, integer, floats, and booleans values
|
||||||
|
are automatically json encoded/decoded.
|
||||||
|
"""
|
||||||
|
def __init__(self, path=None):
|
||||||
|
self.db_path = path
|
||||||
|
if path is None:
|
||||||
|
if 'UNIT_STATE_DB' in os.environ:
|
||||||
|
self.db_path = os.environ['UNIT_STATE_DB']
|
||||||
|
else:
|
||||||
|
self.db_path = os.path.join(
|
||||||
|
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
||||||
|
with open(self.db_path, 'a') as f:
|
||||||
|
os.fchmod(f.fileno(), 0o600)
|
||||||
|
self.conn = sqlite3.connect('%s' % self.db_path)
|
||||||
|
self.cursor = self.conn.cursor()
|
||||||
|
self.revision = None
|
||||||
|
self._closed = False
|
||||||
|
self._init()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self._closed:
|
||||||
|
return
|
||||||
|
self.flush(False)
|
||||||
|
self.cursor.close()
|
||||||
|
self.conn.close()
|
||||||
|
self._closed = True
|
||||||
|
|
||||||
|
def get(self, key, default=None, record=False):
|
||||||
|
self.cursor.execute('select data from kv where key=?', [key])
|
||||||
|
result = self.cursor.fetchone()
|
||||||
|
if not result:
|
||||||
|
return default
|
||||||
|
if record:
|
||||||
|
return Record(json.loads(result[0]))
|
||||||
|
return json.loads(result[0])
|
||||||
|
|
||||||
|
def getrange(self, key_prefix, strip=False):
|
||||||
|
"""
|
||||||
|
Get a range of keys starting with a common prefix as a mapping of
|
||||||
|
keys to values.
|
||||||
|
|
||||||
|
:param str key_prefix: Common prefix among all keys
|
||||||
|
:param bool strip: Optionally strip the common prefix from the key
|
||||||
|
names in the returned dict
|
||||||
|
:return dict: A (possibly empty) dict of key-value mappings
|
||||||
|
"""
|
||||||
|
self.cursor.execute("select key, data from kv where key like ?",
|
||||||
|
['%s%%' % key_prefix])
|
||||||
|
result = self.cursor.fetchall()
|
||||||
|
|
||||||
|
if not result:
|
||||||
|
return {}
|
||||||
|
if not strip:
|
||||||
|
key_prefix = ''
|
||||||
|
return dict([
|
||||||
|
(k[len(key_prefix):], json.loads(v)) for k, v in result])
|
||||||
|
|
||||||
|
def update(self, mapping, prefix=""):
|
||||||
|
"""
|
||||||
|
Set the values of multiple keys at once.
|
||||||
|
|
||||||
|
:param dict mapping: Mapping of keys to values
|
||||||
|
:param str prefix: Optional prefix to apply to all keys in `mapping`
|
||||||
|
before setting
|
||||||
|
"""
|
||||||
|
for k, v in mapping.items():
|
||||||
|
self.set("%s%s" % (prefix, k), v)
|
||||||
|
|
||||||
|
def unset(self, key):
|
||||||
|
"""
|
||||||
|
Remove a key from the database entirely.
|
||||||
|
"""
|
||||||
|
self.cursor.execute('delete from kv where key=?', [key])
|
||||||
|
if self.revision and self.cursor.rowcount:
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into kv_revisions values (?, ?, ?)',
|
||||||
|
[key, self.revision, json.dumps('DELETED')])
|
||||||
|
|
||||||
|
def unsetrange(self, keys=None, prefix=""):
|
||||||
|
"""
|
||||||
|
Remove a range of keys starting with a common prefix, from the database
|
||||||
|
entirely.
|
||||||
|
|
||||||
|
:param list keys: List of keys to remove.
|
||||||
|
:param str prefix: Optional prefix to apply to all keys in ``keys``
|
||||||
|
before removing.
|
||||||
|
"""
|
||||||
|
if keys is not None:
|
||||||
|
keys = ['%s%s' % (prefix, key) for key in keys]
|
||||||
|
self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
|
||||||
|
if self.revision and self.cursor.rowcount:
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
|
||||||
|
list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
|
||||||
|
else:
|
||||||
|
self.cursor.execute('delete from kv where key like ?',
|
||||||
|
['%s%%' % prefix])
|
||||||
|
if self.revision and self.cursor.rowcount:
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into kv_revisions values (?, ?, ?)',
|
||||||
|
['%s%%' % prefix, self.revision, json.dumps('DELETED')])
|
||||||
|
|
||||||
|
def set(self, key, value):
|
||||||
|
"""
|
||||||
|
Set a value in the database.
|
||||||
|
|
||||||
|
:param str key: Key to set the value for
|
||||||
|
:param value: Any JSON-serializable value to be set
|
||||||
|
"""
|
||||||
|
serialized = json.dumps(value)
|
||||||
|
|
||||||
|
self.cursor.execute('select data from kv where key=?', [key])
|
||||||
|
exists = self.cursor.fetchone()
|
||||||
|
|
||||||
|
# Skip mutations to the same value
|
||||||
|
if exists:
|
||||||
|
if exists[0] == serialized:
|
||||||
|
return value
|
||||||
|
|
||||||
|
if not exists:
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into kv (key, data) values (?, ?)',
|
||||||
|
(key, serialized))
|
||||||
|
else:
|
||||||
|
self.cursor.execute('''
|
||||||
|
update kv
|
||||||
|
set data = ?
|
||||||
|
where key = ?''', [serialized, key])
|
||||||
|
|
||||||
|
# Save
|
||||||
|
if not self.revision:
|
||||||
|
return value
|
||||||
|
|
||||||
|
self.cursor.execute(
|
||||||
|
'select 1 from kv_revisions where key=? and revision=?',
|
||||||
|
[key, self.revision])
|
||||||
|
exists = self.cursor.fetchone()
|
||||||
|
|
||||||
|
if not exists:
|
||||||
|
self.cursor.execute(
|
||||||
|
'''insert into kv_revisions (
|
||||||
|
revision, key, data) values (?, ?, ?)''',
|
||||||
|
(self.revision, key, serialized))
|
||||||
|
else:
|
||||||
|
self.cursor.execute(
|
||||||
|
'''
|
||||||
|
update kv_revisions
|
||||||
|
set data = ?
|
||||||
|
where key = ?
|
||||||
|
and revision = ?''',
|
||||||
|
[serialized, key, self.revision])
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
def delta(self, mapping, prefix):
|
||||||
|
"""
|
||||||
|
return a delta containing values that have changed.
|
||||||
|
"""
|
||||||
|
previous = self.getrange(prefix, strip=True)
|
||||||
|
if not previous:
|
||||||
|
pk = set()
|
||||||
|
else:
|
||||||
|
pk = set(previous.keys())
|
||||||
|
ck = set(mapping.keys())
|
||||||
|
delta = DeltaSet()
|
||||||
|
|
||||||
|
# added
|
||||||
|
for k in ck.difference(pk):
|
||||||
|
delta[k] = Delta(None, mapping[k])
|
||||||
|
|
||||||
|
# removed
|
||||||
|
for k in pk.difference(ck):
|
||||||
|
delta[k] = Delta(previous[k], None)
|
||||||
|
|
||||||
|
# changed
|
||||||
|
for k in pk.intersection(ck):
|
||||||
|
c = mapping[k]
|
||||||
|
p = previous[k]
|
||||||
|
if c != p:
|
||||||
|
delta[k] = Delta(p, c)
|
||||||
|
|
||||||
|
return delta
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def hook_scope(self, name=""):
|
||||||
|
"""Scope all future interactions to the current hook execution
|
||||||
|
revision."""
|
||||||
|
assert not self.revision
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into hooks (hook, date) values (?, ?)',
|
||||||
|
(name or sys.argv[0],
|
||||||
|
datetime.datetime.utcnow().isoformat()))
|
||||||
|
self.revision = self.cursor.lastrowid
|
||||||
|
try:
|
||||||
|
yield self.revision
|
||||||
|
self.revision = None
|
||||||
|
except Exception:
|
||||||
|
self.flush(False)
|
||||||
|
self.revision = None
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
def flush(self, save=True):
|
||||||
|
if save:
|
||||||
|
self.conn.commit()
|
||||||
|
elif self._closed:
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
self.conn.rollback()
|
||||||
|
|
||||||
|
def _init(self):
|
||||||
|
self.cursor.execute('''
|
||||||
|
create table if not exists kv (
|
||||||
|
key text,
|
||||||
|
data text,
|
||||||
|
primary key (key)
|
||||||
|
)''')
|
||||||
|
self.cursor.execute('''
|
||||||
|
create table if not exists kv_revisions (
|
||||||
|
key text,
|
||||||
|
revision integer,
|
||||||
|
data text,
|
||||||
|
primary key (key, revision)
|
||||||
|
)''')
|
||||||
|
self.cursor.execute('''
|
||||||
|
create table if not exists hooks (
|
||||||
|
version integer primary key autoincrement,
|
||||||
|
hook text,
|
||||||
|
date text
|
||||||
|
)''')
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
|
def gethistory(self, key, deserialize=False):
|
||||||
|
self.cursor.execute(
|
||||||
|
'''
|
||||||
|
select kv.revision, kv.key, kv.data, h.hook, h.date
|
||||||
|
from kv_revisions kv,
|
||||||
|
hooks h
|
||||||
|
where kv.key=?
|
||||||
|
and kv.revision = h.version
|
||||||
|
''', [key])
|
||||||
|
if deserialize is False:
|
||||||
|
return self.cursor.fetchall()
|
||||||
|
return map(_parse_history, self.cursor.fetchall())
|
||||||
|
|
||||||
|
def debug(self, fh=sys.stderr):
|
||||||
|
self.cursor.execute('select * from kv')
|
||||||
|
pprint.pprint(self.cursor.fetchall(), stream=fh)
|
||||||
|
self.cursor.execute('select * from kv_revisions')
|
||||||
|
pprint.pprint(self.cursor.fetchall(), stream=fh)
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_history(d):
|
||||||
|
return (d[0], d[1], json.loads(d[2]), d[3],
|
||||||
|
datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
|
||||||
|
|
||||||
|
|
||||||
|
class HookData(object):
|
||||||
|
"""Simple integration for existing hook exec frameworks.
|
||||||
|
|
||||||
|
Records all unit information, and stores deltas for processing
|
||||||
|
by the hook.
|
||||||
|
|
||||||
|
Sample::
|
||||||
|
|
||||||
|
from charmhelper.core import hookenv, unitdata
|
||||||
|
|
||||||
|
changes = unitdata.HookData()
|
||||||
|
db = unitdata.kv()
|
||||||
|
hooks = hookenv.Hooks()
|
||||||
|
|
||||||
|
@hooks.hook
|
||||||
|
def config_changed():
|
||||||
|
# View all changes to configuration
|
||||||
|
for changed, (prev, cur) in changes.conf.items():
|
||||||
|
print('config changed', changed,
|
||||||
|
'previous value', prev,
|
||||||
|
'current value', cur)
|
||||||
|
|
||||||
|
# Get some unit specific bookeeping
|
||||||
|
if not db.get('pkg_key'):
|
||||||
|
key = urllib.urlopen('https://example.com/pkg_key').read()
|
||||||
|
db.set('pkg_key', key)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with changes():
|
||||||
|
hook.execute()
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
self.kv = kv()
|
||||||
|
self.conf = None
|
||||||
|
self.rels = None
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def __call__(self):
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
hook_name = hookenv.hook_name()
|
||||||
|
|
||||||
|
with self.kv.hook_scope(hook_name):
|
||||||
|
self._record_charm_version(hookenv.charm_dir())
|
||||||
|
delta_config, delta_relation = self._record_hook(hookenv)
|
||||||
|
yield self.kv, delta_config, delta_relation
|
||||||
|
|
||||||
|
def _record_charm_version(self, charm_dir):
|
||||||
|
# Record revisions.. charm revisions are meaningless
|
||||||
|
# to charm authors as they don't control the revision.
|
||||||
|
# so logic dependnent on revision is not particularly
|
||||||
|
# useful, however it is useful for debugging analysis.
|
||||||
|
charm_rev = open(
|
||||||
|
os.path.join(charm_dir, 'revision')).read().strip()
|
||||||
|
charm_rev = charm_rev or '0'
|
||||||
|
revs = self.kv.get('charm_revisions', [])
|
||||||
|
if charm_rev not in revs:
|
||||||
|
revs.append(charm_rev.strip() or '0')
|
||||||
|
self.kv.set('charm_revisions', revs)
|
||||||
|
|
||||||
|
def _record_hook(self, hookenv):
|
||||||
|
data = hookenv.execution_environment()
|
||||||
|
self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
|
||||||
|
self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
|
||||||
|
self.kv.set('env', dict(data['env']))
|
||||||
|
self.kv.set('unit', data['unit'])
|
||||||
|
self.kv.set('relid', data.get('relid'))
|
||||||
|
return conf_delta, rels_delta
|
||||||
|
|
||||||
|
|
||||||
|
class Record(dict):
|
||||||
|
|
||||||
|
__slots__ = ()
|
||||||
|
|
||||||
|
def __getattr__(self, k):
|
||||||
|
if k in self:
|
||||||
|
return self[k]
|
||||||
|
raise AttributeError(k)
|
||||||
|
|
||||||
|
|
||||||
|
class DeltaSet(Record):
|
||||||
|
|
||||||
|
__slots__ = ()
|
||||||
|
|
||||||
|
|
||||||
|
Delta = collections.namedtuple('Delta', ['previous', 'current'])
|
||||||
|
|
||||||
|
|
||||||
|
_KV = None
|
||||||
|
|
||||||
|
|
||||||
|
def kv():
|
||||||
|
global _KV
|
||||||
|
if _KV is None:
|
||||||
|
_KV = Storage()
|
||||||
|
return _KV
|
25
tests/charmhelpers/osplatform.py
Normal file
25
tests/charmhelpers/osplatform.py
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
import platform
|
||||||
|
|
||||||
|
|
||||||
|
def get_platform():
|
||||||
|
"""Return the current OS platform.
|
||||||
|
|
||||||
|
For example: if current os platform is Ubuntu then a string "ubuntu"
|
||||||
|
will be returned (which is the name of the module).
|
||||||
|
This string is used to decide which platform module should be imported.
|
||||||
|
"""
|
||||||
|
# linux_distribution is deprecated and will be removed in Python 3.7
|
||||||
|
# Warings *not* disabled, as we certainly need to fix this.
|
||||||
|
tuple_platform = platform.linux_distribution()
|
||||||
|
current_platform = tuple_platform[0]
|
||||||
|
if "Ubuntu" in current_platform:
|
||||||
|
return "ubuntu"
|
||||||
|
elif "CentOS" in current_platform:
|
||||||
|
return "centos"
|
||||||
|
elif "debian" in current_platform:
|
||||||
|
# Stock Python does not detect Ubuntu and instead returns debian.
|
||||||
|
# Or at least it does in some build environments like Travis CI
|
||||||
|
return "ubuntu"
|
||||||
|
else:
|
||||||
|
raise RuntimeError("This module is not supported on {}."
|
||||||
|
.format(current_platform))
|
23
tests/dev-basic-bionic-queens
Executable file
23
tests/dev-basic-bionic-queens
Executable file
@ -0,0 +1,23 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright 2016 Canonical Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Amulet tests on a basic Glance deployment on bionic-queens."""
|
||||||
|
|
||||||
|
from basic_deployment import GlanceBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = GlanceBasicDeployment(series='bionic')
|
||||||
|
deployment.run_tests()
|
23
tests/gate-basic-artful-pike
Executable file
23
tests/gate-basic-artful-pike
Executable file
@ -0,0 +1,23 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright 2016 Canonical Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Amulet tests on a basic Glance deployment on artful-pike."""
|
||||||
|
|
||||||
|
from basic_deployment import GlanceBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = GlanceBasicDeployment(series='artful')
|
||||||
|
deployment.run_tests()
|
23
tests/gate-basic-trusty-icehouse
Executable file
23
tests/gate-basic-trusty-icehouse
Executable file
@ -0,0 +1,23 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright 2016 Canonical Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Amulet tests on a basic Glance deployment on trusty-icehouse."""
|
||||||
|
|
||||||
|
from basic_deployment import GlanceBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = GlanceBasicDeployment(series='trusty')
|
||||||
|
deployment.run_tests()
|
25
tests/gate-basic-trusty-mitaka
Executable file
25
tests/gate-basic-trusty-mitaka
Executable file
@ -0,0 +1,25 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright 2016 Canonical Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Amulet tests on a basic glance deployment on trusty-mitaka."""
|
||||||
|
|
||||||
|
from basic_deployment import GlanceBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = GlanceBasicDeployment(series='trusty',
|
||||||
|
openstack='cloud:trusty-mitaka',
|
||||||
|
source='cloud:trusty-updates/mitaka')
|
||||||
|
deployment.run_tests()
|
23
tests/gate-basic-xenial-mitaka
Executable file
23
tests/gate-basic-xenial-mitaka
Executable file
@ -0,0 +1,23 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright 2016 Canonical Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Amulet tests on a basic Glance deployment on xenial-mitaka."""
|
||||||
|
|
||||||
|
from basic_deployment import GlanceBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = GlanceBasicDeployment(series='xenial')
|
||||||
|
deployment.run_tests()
|
25
tests/gate-basic-xenial-ocata
Executable file
25
tests/gate-basic-xenial-ocata
Executable file
@ -0,0 +1,25 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright 2016 Canonical Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Amulet tests on a basic glance deployment on xenial-ocata."""
|
||||||
|
|
||||||
|
from basic_deployment import GlanceBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = GlanceBasicDeployment(series='xenial',
|
||||||
|
openstack='cloud:xenial-ocata',
|
||||||
|
source='cloud:xenial-updates/ocata')
|
||||||
|
deployment.run_tests()
|
25
tests/gate-basic-xenial-pike
Executable file
25
tests/gate-basic-xenial-pike
Executable file
@ -0,0 +1,25 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright 2016 Canonical Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Amulet tests on a basic glance deployment on xenial-pike."""
|
||||||
|
|
||||||
|
from basic_deployment import GlanceBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = GlanceBasicDeployment(series='xenial',
|
||||||
|
openstack='cloud:xenial-pike',
|
||||||
|
source='cloud:xenial-updates/pike')
|
||||||
|
deployment.run_tests()
|
25
tests/gate-basic-xenial-queens
Executable file
25
tests/gate-basic-xenial-queens
Executable file
@ -0,0 +1,25 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright 2016 Canonical Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Amulet tests on a basic glance deployment on xenial-queens."""
|
||||||
|
|
||||||
|
from basic_deployment import GlanceBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = GlanceBasicDeployment(series='xenial',
|
||||||
|
openstack='cloud:xenial-queens',
|
||||||
|
source='cloud:xenial-updates/queens')
|
||||||
|
deployment.run_tests()
|
18
tests/tests.yaml
Normal file
18
tests/tests.yaml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# Bootstrap the model if necessary.
|
||||||
|
bootstrap: True
|
||||||
|
# Re-use bootstrap node.
|
||||||
|
reset: True
|
||||||
|
# Use tox/requirements to drive the venv instead of bundletester's venv feature.
|
||||||
|
virtualenv: False
|
||||||
|
# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet.
|
||||||
|
makefile: []
|
||||||
|
# Do not specify juju PPA sources. Juju is presumed to be pre-installed
|
||||||
|
# and configured in all test runner environments.
|
||||||
|
#sources:
|
||||||
|
# Do not specify or rely on system packages.
|
||||||
|
#packages:
|
||||||
|
# Do not specify python packages here. Use test-requirements.txt
|
||||||
|
# and tox instead. ie. The venv is constructed before bundletester
|
||||||
|
# is invoked.
|
||||||
|
#python-packages:
|
||||||
|
reset_timeout: 600
|
20
tox.ini
20
tox.ini
@ -1,3 +1,6 @@
|
|||||||
|
# Classic charm: ./tox.ini
|
||||||
|
# This file is managed centrally by release-tools and should not be modified
|
||||||
|
# within individual charm repos.
|
||||||
[tox]
|
[tox]
|
||||||
envlist = pep8,py27
|
envlist = pep8,py27
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
@ -5,20 +8,29 @@ skipsdist = True
|
|||||||
[testenv]
|
[testenv]
|
||||||
setenv = VIRTUAL_ENV={envdir}
|
setenv = VIRTUAL_ENV={envdir}
|
||||||
PYTHONHASHSEED=0
|
PYTHONHASHSEED=0
|
||||||
|
CHARM_DIR={envdir}
|
||||||
|
AMULET_SETUP_TIMEOUT=5400
|
||||||
install_command =
|
install_command =
|
||||||
pip install --allow-unverified python-apt {opts} {packages}
|
pip install {opts} {packages}
|
||||||
commands = ostestr {posargs}
|
commands = ostestr {posargs}
|
||||||
|
whitelist_externals = juju
|
||||||
|
passenv = HOME TERM AMULET_* CS_API_*
|
||||||
|
|
||||||
[testenv:py27]
|
[testenv:py27]
|
||||||
basepython = python2.7
|
basepython = python2.7
|
||||||
deps = -r{toxinidir}/requirements.txt
|
deps = -r{toxinidir}/requirements.txt
|
||||||
-r{toxinidir}/test-requirements.txt
|
-r{toxinidir}/test-requirements.txt
|
||||||
|
|
||||||
|
[testenv:py35]
|
||||||
|
basepython = python3.5
|
||||||
|
deps = -r{toxinidir}/requirements.txt
|
||||||
|
-r{toxinidir}/test-requirements.txt
|
||||||
|
|
||||||
[testenv:pep8]
|
[testenv:pep8]
|
||||||
basepython = python2.7
|
basepython = python2.7
|
||||||
deps = -r{toxinidir}/requirements.txt
|
deps = -r{toxinidir}/requirements.txt
|
||||||
-r{toxinidir}/test-requirements.txt
|
-r{toxinidir}/test-requirements.txt
|
||||||
commands = flake8 {posargs} hooks unit_tests
|
commands = flake8 {posargs} hooks unit_tests tests actions lib
|
||||||
charm-proof
|
charm-proof
|
||||||
|
|
||||||
[testenv:venv]
|
[testenv:venv]
|
||||||
@ -48,7 +60,7 @@ basepython = python2.7
|
|||||||
deps = -r{toxinidir}/requirements.txt
|
deps = -r{toxinidir}/requirements.txt
|
||||||
-r{toxinidir}/test-requirements.txt
|
-r{toxinidir}/test-requirements.txt
|
||||||
commands =
|
commands =
|
||||||
bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy
|
bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy
|
||||||
|
|
||||||
[testenv:func27-dfs]
|
[testenv:func27-dfs]
|
||||||
# Charm Functional Test
|
# Charm Functional Test
|
||||||
@ -70,4 +82,4 @@ commands =
|
|||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
ignore = E402,E226
|
ignore = E402,E226
|
||||||
exclude = hooks/charmhelpers
|
exclude = */charmhelpers
|
||||||
|
Loading…
Reference in New Issue
Block a user