Add integration tests
This patch set adds integration tests to Deckhand where "integration" means the interaction between Deckhand, Barbican and Keystone. OSH is used to deploy Keystone and Barbican and Docker to deploy PostgreSQL and Deckhand. Unlike functional testing in Deckhand, all integration tests use the default in-code policy defaults and an admin token supplied by keystone to validate authN and authZ. The test scenarios consist of Deckhand secret lifecycle management as well as document rendering with secrets retrieved from Barbican. Change-Id: Ib5ae1b345b2a4bd579671ec4ae9a232c2e3887dc
This commit is contained in:
parent
d32c7a2c8d
commit
f30484a14c
33
deckhand/tests/integration/README.rst
Normal file
33
deckhand/tests/integration/README.rst
Normal file
@ -0,0 +1,33 @@
|
||||
Integration Tests
|
||||
=================
|
||||
|
||||
What
|
||||
----
|
||||
|
||||
These tests validate integration scenarios between Deckhand, Keystone
|
||||
and Barbican. These scenarios include validating Deckhand's secret
|
||||
lifecycle management as well as substitution of encrypted secrets,
|
||||
which are stored in Barbican and retrieved by Deckhand during document
|
||||
rendering.
|
||||
|
||||
How
|
||||
---
|
||||
|
||||
Deckhand uses `gabbi`_ to drive its integration tests. The entry point for
|
||||
these tests is ``integration-tests.sh`` under ``tools`` directory.
|
||||
|
||||
The integration environment is deployed using `OpenStack-Helm`_ which
|
||||
uses Helm to orchestrate deployment of Keystone, Barbican and other
|
||||
pre-requisite services.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
These tests can be executed via ``./tools/integration-tests.sh <test-regex>``
|
||||
from the command line, where ``<test-regex>`` is optional and if omitted all
|
||||
available tests are run. ``sudo`` permissions are required. It is recommended
|
||||
that these tests be executed inside a VM as a lot of data is pulled in (which
|
||||
requires thorough clean up) during the deployment phase.
|
||||
|
||||
.. _gabbi: https://gabbi.readthedocs.io/en/latest/gabbi.html
|
||||
.. _OpenStack-Helm: https://github.com/openstack/openstack-helm
|
0
deckhand/tests/integration/__init__.py
Normal file
0
deckhand/tests/integration/__init__.py
Normal file
49
deckhand/tests/integration/gabbits/document-crud-secret.yaml
Normal file
49
deckhand/tests/integration/gabbits/document-crud-secret.yaml
Normal file
@ -0,0 +1,49 @@
|
||||
# Tests success paths for secret management:
|
||||
#
|
||||
# 1. Tests that creating a secret passphrase results in the Barbican secret
|
||||
# ref being returned.
|
||||
# 2. Tests that the same happens when querying revision documents.
|
||||
|
||||
defaults:
|
||||
request_headers:
|
||||
content-type: application/x-yaml
|
||||
X-Auth-Token: $ENVIRON['TEST_AUTH_TOKEN']
|
||||
response_headers:
|
||||
content-type: application/x-yaml
|
||||
verbose: true
|
||||
|
||||
tests:
|
||||
- name: purge
|
||||
desc: Begin testing from known state.
|
||||
DELETE: /api/v1.0/revisions
|
||||
status: 204
|
||||
response_headers: null
|
||||
|
||||
- name: create_encrypted_passphrase
|
||||
desc: Create passphrase with storagePolicy=encrypted
|
||||
PUT: /api/v1.0/buckets/secret/documents
|
||||
status: 200
|
||||
data: |-
|
||||
---
|
||||
schema: deckhand/Passphrase/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: my-passphrase
|
||||
storagePolicy: encrypted
|
||||
data: not-a-real-password
|
||||
...
|
||||
response_multidoc_jsonpaths:
|
||||
$.`len`: 1
|
||||
# NOTE(fmontei): jsonpath-rw-ext uses a 1 character separator (rather than allowing a string)
|
||||
# leading to this nastiness:
|
||||
$.[0].data.`split(:, 0, 1)` + "://" + $.[0].data.`split(/, 2, 3)` + "/v1": $ENVIRON['TEST_BARBICAN_URL']
|
||||
|
||||
- name: verify_revision_documents_returns_secret_ref
|
||||
desc: Verify that the documents for the created revision returns the secret ref.
|
||||
GET: /api/v1.0/revisions/$RESPONSE['$.[0].status.revision']/documents
|
||||
status: 200
|
||||
response_multidoc_jsonpaths:
|
||||
$.`len`: 1
|
||||
# NOTE(fmontei): jsonpath-rw-ext uses a 1 character separator (rather than allowing a string)
|
||||
# leading to this nastiness:
|
||||
$.[0].data.`split(:, 0, 1)` + "://" + $.[0].data.`split(/, 2, 3)` + "/v1": $ENVIRON['TEST_BARBICAN_URL']
|
@ -0,0 +1,53 @@
|
||||
# Tests success paths for rendering a secret document:
|
||||
#
|
||||
# 1. Verifies that rendering a document with storagePolicy: encrypted
|
||||
# results in the secret payload getting returned instead of the ref.
|
||||
|
||||
defaults:
|
||||
request_headers:
|
||||
content-type: application/x-yaml
|
||||
X-Auth-Token: $ENVIRON['TEST_AUTH_TOKEN']
|
||||
response_headers:
|
||||
content-type: application/x-yaml
|
||||
verbose: true
|
||||
|
||||
tests:
|
||||
- name: purge
|
||||
desc: Begin testing from known state.
|
||||
DELETE: /api/v1.0/revisions
|
||||
status: 204
|
||||
response_headers: null
|
||||
|
||||
- name: create_encrypted_passphrase
|
||||
desc: Create passphrase with storagePolicy=encrypted
|
||||
PUT: /api/v1.0/buckets/secret/documents
|
||||
status: 200
|
||||
data: |-
|
||||
---
|
||||
schema: deckhand/LayeringPolicy/v1
|
||||
metadata:
|
||||
schema: metadata/Control/v1
|
||||
name: layering-policy
|
||||
data:
|
||||
layerOrder:
|
||||
- site
|
||||
---
|
||||
schema: deckhand/Passphrase/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: my-passphrase
|
||||
storagePolicy: encrypted
|
||||
layeringDefinition:
|
||||
layer: site
|
||||
data: not-a-real-password
|
||||
...
|
||||
|
||||
- name: verify_rendered_documents_returns_secret_payload
|
||||
desc: Verify that the rendering the document returns the secret payload.
|
||||
GET: /api/v1.0/revisions/$RESPONSE['$.[0].status.revision']/rendered-documents
|
||||
status: 200
|
||||
query_parameters:
|
||||
metadata.name: my-passphrase
|
||||
response_multidoc_jsonpaths:
|
||||
$.`len`: 1
|
||||
$.[0].data: not-a-real-password
|
@ -0,0 +1,92 @@
|
||||
# Tests success paths for secret substitution:
|
||||
#
|
||||
# 1. Tests that creating a secret passphrase alongside other documents
|
||||
# results in the Barbican secret ref being returned.
|
||||
# 2. Tests that the secret payload is included in the destination
|
||||
# and source documents after document rendering.
|
||||
|
||||
defaults:
|
||||
request_headers:
|
||||
content-type: application/x-yaml
|
||||
X-Auth-Token: $ENVIRON['TEST_AUTH_TOKEN']
|
||||
response_headers:
|
||||
content-type: application/x-yaml
|
||||
verbose: true
|
||||
|
||||
tests:
|
||||
- name: purge
|
||||
desc: Begin testing from known state.
|
||||
DELETE: /api/v1.0/revisions
|
||||
status: 204
|
||||
response_headers: null
|
||||
|
||||
- name: create_documents_for_secret_substitution
|
||||
desc: Create documents with substitution source with storagePolicy=encrypted
|
||||
PUT: /api/v1.0/buckets/secret/documents
|
||||
status: 200
|
||||
data: |-
|
||||
---
|
||||
schema: deckhand/LayeringPolicy/v1
|
||||
metadata:
|
||||
schema: metadata/Control/v1
|
||||
name: layering-policy
|
||||
data:
|
||||
layerOrder:
|
||||
- site
|
||||
---
|
||||
schema: deckhand/Certificate/v1
|
||||
metadata:
|
||||
name: example-cert
|
||||
schema: metadata/Document/v1
|
||||
layeringDefinition:
|
||||
layer: site
|
||||
storagePolicy: encrypted
|
||||
data: CERTIFICATE DATA
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: armada-chart-01
|
||||
layeringDefinition:
|
||||
layer: site
|
||||
substitutions:
|
||||
- dest:
|
||||
path: .chart.values.tls.certificate
|
||||
src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: example-cert
|
||||
path: .
|
||||
data: {}
|
||||
...
|
||||
|
||||
- name: verify_multiple_revision_documents_returns_secret_ref
|
||||
desc: Verify that secret ref was created for example-cert among multiple created documents.
|
||||
GET: /api/v1.0/revisions/$RESPONSE['$.[0].status.revision']/documents
|
||||
status: 200
|
||||
query_parameters:
|
||||
metadata.name: example-cert
|
||||
response_multidoc_jsonpaths:
|
||||
$.`len`: 1
|
||||
# NOTE(fmontei): jsonpath-rw-ext uses a 1 character separator (rather than allowing a string)
|
||||
# leading to this nastiness:
|
||||
$.[0].data.`split(:, 0, 1)` + "://" + $.[0].data.`split(/, 2, 3)` + "/v1": $ENVIRON['TEST_BARBICAN_URL']
|
||||
|
||||
- name: verify_secret_payload_in_destination_document
|
||||
desc: Verify secret payload is injected in destination document as well as example-cert.
|
||||
GET: /api/v1.0/revisions/$RESPONSE['$.[0].status.revision']/rendered-documents
|
||||
status: 200
|
||||
query_parameters:
|
||||
metadata.name:
|
||||
- armada-chart-01
|
||||
- example-cert
|
||||
sort: metadata.name
|
||||
response_multidoc_jsonpaths:
|
||||
$.`len`: 2
|
||||
$.[0].metadata.name: armada-chart-01
|
||||
$.[0].data:
|
||||
chart:
|
||||
values:
|
||||
tls:
|
||||
certificate: CERTIFICATE DATA
|
||||
$.[1].metadata.name: example-cert
|
||||
$.[1].data: CERTIFICATE DATA
|
60
deckhand/tests/integration/test_gabbi.py
Normal file
60
deckhand/tests/integration/test_gabbi.py
Normal file
@ -0,0 +1,60 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import yaml
|
||||
|
||||
from gabbi import driver
|
||||
from gabbi.driver import test_pytest # noqa
|
||||
from gabbi.handlers import jsonhandler
|
||||
|
||||
TESTS_DIR = 'gabbits'
|
||||
|
||||
|
||||
# This is quite similar to the existing JSONHandler, so use it as the base
|
||||
# class instead of `gabbi.handlers.base.ContentHandler`.
|
||||
class MultidocJsonpaths(jsonhandler.JSONHandler):
|
||||
test_key_suffix = 'multidoc_jsonpaths'
|
||||
|
||||
@staticmethod
|
||||
def accepts(content_type):
|
||||
content_type = content_type.split(';', 1)[0].strip()
|
||||
return (content_type.endswith('+yaml') or
|
||||
content_type.startswith('application/yaml') or
|
||||
content_type.startswith('application/x-yaml'))
|
||||
|
||||
@staticmethod
|
||||
def dumps(data, pretty=False, test=None):
|
||||
return yaml.safe_dump_all(data)
|
||||
|
||||
@staticmethod
|
||||
def loads(string):
|
||||
# NOTE: The simple approach to handling dictionary versus list response
|
||||
# bodies is to always parse the response body as a list and index into
|
||||
# the first element using [0] throughout the tests.
|
||||
return list(yaml.safe_load_all(string))
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)
|
||||
# NOTE(fmontei): While only `url` or `host` is needed, strangely both
|
||||
# are needed because we use `pytest-html` which throws an error without
|
||||
# `host`.
|
||||
driver.py_test_generator(
|
||||
test_dir, url=os.environ['DECKHAND_TEST_URL'], host='localhost',
|
||||
# NOTE(fmontei): When there are multiple handlers listed that accept
|
||||
# the same content-type, the one that is earliest in the list will be
|
||||
# used. Thus, we cannot specify multiple content handlers for handling
|
||||
# list/dictionary responses from the server using different handlers.
|
||||
content_handlers=[MultidocJsonpaths], metafunc=metafunc)
|
180
tools/common-tests.sh
Normal file
180
tools/common-tests.sh
Normal file
@ -0,0 +1,180 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
function log_section {
|
||||
set +x
|
||||
echo 1>&2
|
||||
echo 1>&2
|
||||
echo === $* === 1>&2
|
||||
set -x
|
||||
}
|
||||
|
||||
|
||||
function deploy_postgre {
|
||||
set -xe
|
||||
|
||||
POSTGRES_ID=$(
|
||||
sudo docker run \
|
||||
--detach \
|
||||
--publish :5432 \
|
||||
-e POSTGRES_DB=deckhand \
|
||||
-e POSTGRES_USER=deckhand \
|
||||
-e POSTGRES_PASSWORD=password \
|
||||
postgres:9.5
|
||||
)
|
||||
|
||||
POSTGRES_IP=$(
|
||||
sudo docker inspect \
|
||||
--format='{{ .NetworkSettings.Networks.bridge.IPAddress }}' \
|
||||
$POSTGRES_ID
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
function gen_config {
|
||||
set -xe
|
||||
|
||||
log_section Creating config directory and test deckhand.conf
|
||||
|
||||
CONF_DIR=$(mktemp -d -p $(pwd))
|
||||
sudo chmod 777 -R $CONF_DIR
|
||||
|
||||
export DECKHAND_TEST_URL=$1
|
||||
export DATABASE_URL=postgresql+psycopg2://deckhand:password@$POSTGRES_IP:5432/deckhand
|
||||
# Used by Deckhand's initialization script to search for config files.
|
||||
export DECKHAND_CONFIG_DIR=$CONF_DIR
|
||||
|
||||
cp etc/deckhand/logging.conf.sample $CONF_DIR/logging.conf
|
||||
|
||||
# Create a logging config file to dump everything to stdout/stderr.
|
||||
cat <<EOCONF > $CONF_DIR/logging.conf
|
||||
[loggers]
|
||||
keys = root, deckhand, error
|
||||
|
||||
[handlers]
|
||||
keys = null, stderr, stdout
|
||||
|
||||
[formatters]
|
||||
keys = simple, context
|
||||
|
||||
[logger_deckhand]
|
||||
level = DEBUG
|
||||
handlers = stdout
|
||||
qualname = deckhand
|
||||
|
||||
[logger_error]
|
||||
level = ERROR
|
||||
handlers = stderr
|
||||
|
||||
[logger_root]
|
||||
level = WARNING
|
||||
handlers = null
|
||||
|
||||
[handler_stderr]
|
||||
class = StreamHandler
|
||||
args = (sys.stderr,)
|
||||
formatter = context
|
||||
|
||||
[handler_stdout]
|
||||
class = StreamHandler
|
||||
args = (sys.stdout,)
|
||||
formatter = context
|
||||
|
||||
[handler_null]
|
||||
class = logging.NullHandler
|
||||
formatter = context
|
||||
args = ()
|
||||
|
||||
[formatter_context]
|
||||
class = oslo_log.formatters.ContextFormatter
|
||||
|
||||
[formatter_simple]
|
||||
format=%(asctime)s.%(msecs)03d %(process)d %(levelname)s: %(message)s
|
||||
EOCONF
|
||||
|
||||
# Create a Deckhand config file with bare minimum options.
|
||||
cat <<EOCONF > $CONF_DIR/deckhand.conf
|
||||
[DEFAULT]
|
||||
debug = true
|
||||
publish_errors = true
|
||||
use_stderr = true
|
||||
# NOTE: allow_anonymous_access allows these functional tests to get around
|
||||
# Keystone authentication, but the context that is provided has zero privileges
|
||||
# so we must also override the policy file for authorization to pass.
|
||||
allow_anonymous_access = true
|
||||
|
||||
[oslo_policy]
|
||||
policy_file = policy.yaml
|
||||
|
||||
[barbican]
|
||||
|
||||
[database]
|
||||
connection = $DATABASE_URL
|
||||
|
||||
[keystone_authtoken]
|
||||
# NOTE(fmontei): Values taken from clouds.yaml. Values only used for
|
||||
# integration testing.
|
||||
#
|
||||
# clouds.yaml (snippet):
|
||||
#
|
||||
# username: 'admin'
|
||||
# password: 'password'
|
||||
# project_name: 'admin'
|
||||
# project_domain_name: 'default'
|
||||
# user_domain_name: 'default'
|
||||
# auth_url: 'http://keystone.openstack.svc.cluster.local/v3'
|
||||
|
||||
username = admin
|
||||
password = password
|
||||
project_name = admin
|
||||
project_domain_name = Default
|
||||
user_domain_name = Default
|
||||
auth_url = http://keystone.openstack.svc.cluster.local/v3
|
||||
auth_type = password
|
||||
EOCONF
|
||||
|
||||
# Only set up logging if running Deckhand via uwsgi. The container already has
|
||||
# values for logging.
|
||||
if [ -z "$DECKHAND_IMAGE" ]; then
|
||||
sed '1 a log_config_append = '"$CONF_DIR"'/logging.conf' $CONF_DIR/deckhand.conf
|
||||
fi
|
||||
|
||||
echo $CONF_DIR/deckhand.conf 1>&2
|
||||
cat $CONF_DIR/deckhand.conf 1>&2
|
||||
|
||||
echo $CONF_DIR/logging.conf 1>&2
|
||||
cat $CONF_DIR/logging.conf 1>&2
|
||||
|
||||
log_section Starting server
|
||||
}
|
||||
|
||||
|
||||
function gen_paste {
|
||||
set -xe
|
||||
|
||||
local disable_keystone=$1
|
||||
|
||||
if $disable_keystone; then
|
||||
sed 's/authtoken api/api/' etc/deckhand/deckhand-paste.ini &> $CONF_DIR/deckhand-paste.ini
|
||||
else
|
||||
cp etc/deckhand/deckhand-paste.ini $CONF_DIR/deckhand-paste.ini
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function gen_policy {
|
||||
set -xe
|
||||
|
||||
log_section Creating policy file with liberal permissions
|
||||
|
||||
policy_file='etc/deckhand/policy.yaml.sample'
|
||||
policy_pattern="deckhand\:"
|
||||
|
||||
touch $CONF_DIR/policy.yaml
|
||||
|
||||
sed -n "/$policy_pattern/p" "$policy_file" \
|
||||
| sed 's/^../\"/' \
|
||||
| sed 's/rule\:[A-Za-z\_\-]*/@/' > $CONF_DIR/policy.yaml
|
||||
|
||||
echo $CONF_DIR/'policy.yaml' 1>&2
|
||||
cat $CONF_DIR/'policy.yaml' 1>&2
|
||||
}
|
@ -3,235 +3,91 @@
|
||||
# Script intended for running Deckhand functional tests via gabbi. Requires
|
||||
# Docker CE (at least) to run.
|
||||
|
||||
set -xe
|
||||
|
||||
# Meant for capturing output of Deckhand image. This requires that logging
|
||||
# in the image be set up to pipe everything out to stdout/stderr.
|
||||
STDOUT=$(mktemp)
|
||||
|
||||
# NOTE(fmontei): `DECKHAND_IMAGE` should only be specified if the desire is to
|
||||
# run Deckhand functional tests against a specific Deckhand image, which is
|
||||
# useful for CICD (as validating the image is vital). However, if the
|
||||
# `DECKHAND_IMAGE` is not specified, then this implies that the most current
|
||||
# version of the code should be used, which is in the repo itself.
|
||||
DECKHAND_IMAGE=${DECKHAND_IMAGE:-}
|
||||
ROOTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
function log_section {
|
||||
set +x
|
||||
echo 1>&2
|
||||
echo 1>&2
|
||||
echo === $* === 1>&2
|
||||
set -x
|
||||
}
|
||||
source $ROOTDIR/common-tests.sh
|
||||
|
||||
set -ex
|
||||
|
||||
function cleanup {
|
||||
sudo docker stop $POSTGRES_ID
|
||||
function cleanup_deckhand {
|
||||
set +e
|
||||
|
||||
if [ -n "$POSTGRES_ID" ]; then
|
||||
sudo docker stop $POSTGRES_ID
|
||||
fi
|
||||
if [ -n "$DECKHAND_ID" ]; then
|
||||
sudo docker stop $DECKHAND_ID
|
||||
fi
|
||||
rm -rf $CONF_DIR
|
||||
if [ -d "$CONF_DIR" ]; then
|
||||
rm -rf $CONF_DIR
|
||||
fi
|
||||
|
||||
if [ -z "$DECKHAND_IMAGE" ]; then
|
||||
# Kill all processes and child processes (for example, if workers > 1)
|
||||
# if using uwsgi only.
|
||||
PGID=$(ps -o comm -o pgid | grep uwsgi | grep -o [0-9]* | head -n 1)
|
||||
# Kill all processes and child processes (for example, if workers > 1)
|
||||
# if using uwsgi only.
|
||||
PGID=$(ps -o comm -o pgid | grep uwsgi | grep -o [0-9]* | head -n 1)
|
||||
if [ -n "$PGID" ]; then
|
||||
setsid kill -- -$PGID
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
trap cleanup EXIT
|
||||
trap cleanup_deckhand EXIT
|
||||
|
||||
|
||||
POSTGRES_ID=$(
|
||||
sudo docker run \
|
||||
--detach \
|
||||
--publish :5432 \
|
||||
-e POSTGRES_DB=deckhand \
|
||||
-e POSTGRES_USER=deckhand \
|
||||
-e POSTGRES_PASSWORD=password \
|
||||
postgres:9.5
|
||||
)
|
||||
function deploy_deckhand {
|
||||
gen_config "http://localhost:9000"
|
||||
gen_paste true
|
||||
gen_policy
|
||||
|
||||
POSTGRES_IP=$(
|
||||
sudo docker inspect \
|
||||
--format='{{ .NetworkSettings.Networks.bridge.IPAddress }}' \
|
||||
$POSTGRES_ID
|
||||
)
|
||||
if [ -z "$DECKHAND_IMAGE" ]; then
|
||||
log_section "Running Deckhand via uwsgi"
|
||||
|
||||
alembic upgrade head
|
||||
# NOTE(fmontei): Deckhand's database is not configured to work with
|
||||
# multiprocessing. Currently there is a data race on acquiring shared
|
||||
# SQLAlchemy engine pooled connection strings when workers > 1. As a
|
||||
# workaround, we use multiple threads but only 1 worker. For more
|
||||
# information, see: https://github.com/att-comdev/deckhand/issues/20
|
||||
export DECKHAND_API_WORKERS=1
|
||||
export DECKHAND_API_THREADS=4
|
||||
source $ROOTDIR/../entrypoint.sh server &
|
||||
else
|
||||
log_section "Running Deckhand via Docker"
|
||||
sudo docker run \
|
||||
--rm \
|
||||
--net=host \
|
||||
-v $CONF_DIR:/etc/deckhand \
|
||||
$DECKHAND_IMAGE alembic upgrade head &> $STDOUT &
|
||||
sudo docker run \
|
||||
--rm \
|
||||
--net=host \
|
||||
-p 9000:9000 \
|
||||
-v $CONF_DIR:/etc/deckhand \
|
||||
$DECKHAND_IMAGE server &> $STDOUT &
|
||||
fi
|
||||
|
||||
CONF_DIR=$(mktemp -d -p $(pwd))
|
||||
sudo chmod 777 -R $CONF_DIR
|
||||
# Give the server a chance to come up. Better to poll a health check.
|
||||
sleep 5
|
||||
|
||||
function gen_config {
|
||||
log_section Creating config file
|
||||
|
||||
export DECKHAND_TEST_URL=http://localhost:9000
|
||||
export DATABASE_URL=postgresql+psycopg2://deckhand:password@$POSTGRES_IP:5432/deckhand
|
||||
# Used by Deckhand's initialization script to search for config files.
|
||||
export DECKHAND_CONFIG_DIR=$CONF_DIR
|
||||
|
||||
cp etc/deckhand/logging.conf.sample $CONF_DIR/logging.conf
|
||||
|
||||
# Create a logging config file to dump everything to stdout/stderr.
|
||||
cat <<EOCONF > $CONF_DIR/logging.conf
|
||||
[loggers]
|
||||
keys = root, deckhand, error
|
||||
|
||||
[handlers]
|
||||
keys = null, stderr, stdout
|
||||
|
||||
[formatters]
|
||||
keys = simple, context
|
||||
|
||||
[logger_deckhand]
|
||||
level = DEBUG
|
||||
handlers = stdout
|
||||
qualname = deckhand
|
||||
|
||||
[logger_error]
|
||||
level = ERROR
|
||||
handlers = stderr
|
||||
|
||||
[logger_root]
|
||||
level = WARNING
|
||||
handlers = null
|
||||
|
||||
[handler_stderr]
|
||||
class = StreamHandler
|
||||
args = (sys.stderr,)
|
||||
formatter = context
|
||||
|
||||
[handler_stdout]
|
||||
class = StreamHandler
|
||||
args = (sys.stdout,)
|
||||
formatter = context
|
||||
|
||||
[handler_null]
|
||||
class = logging.NullHandler
|
||||
formatter = context
|
||||
args = ()
|
||||
|
||||
[formatter_context]
|
||||
class = oslo_log.formatters.ContextFormatter
|
||||
|
||||
[formatter_simple]
|
||||
format=%(asctime)s.%(msecs)03d %(process)d %(levelname)s: %(message)s
|
||||
EOCONF
|
||||
|
||||
# Create a Deckhand config file with bare minimum options.
|
||||
cat <<EOCONF > $CONF_DIR/deckhand.conf
|
||||
[DEFAULT]
|
||||
debug = true
|
||||
publish_errors = true
|
||||
use_stderr = true
|
||||
# NOTE: allow_anonymous_access allows these functional tests to get around
|
||||
# Keystone authentication, but the context that is provided has zero privileges
|
||||
# so we must also override the policy file for authorization to pass.
|
||||
allow_anonymous_access = true
|
||||
|
||||
[oslo_policy]
|
||||
policy_file = policy.yaml
|
||||
|
||||
[barbican]
|
||||
|
||||
[database]
|
||||
connection = $DATABASE_URL
|
||||
|
||||
[keystone_authtoken]
|
||||
# Populate keystone_authtoken with values like the following should Keystone
|
||||
# integration be needed here.
|
||||
# project_domain_name = Default
|
||||
# project_name = admin
|
||||
# user_domain_name = Default
|
||||
# password = devstack
|
||||
# username = admin
|
||||
# auth_url = http://127.0.0.1/identity
|
||||
# auth_type = password
|
||||
EOCONF
|
||||
|
||||
# Only set up logging if running Deckhand via uwsgi. The container already has
|
||||
# values for logging.
|
||||
if [ -z "$DECKHAND_IMAGE" ]; then
|
||||
sed '1 a log_config_append = '"$CONF_DIR"'/logging.conf' $CONF_DIR/deckhand.conf
|
||||
fi
|
||||
|
||||
# Only set up logging if running Deckhand via uwsgi. The container already has
|
||||
# values for logging.
|
||||
if [ -z "$DECKHAND_IMAGE" ]; then
|
||||
sed '1 a log_config_append = '"$CONF_DIR"'/logging.conf' $CONF_DIR/deckhand.conf
|
||||
fi
|
||||
|
||||
echo $CONF_DIR/deckhand.conf 1>&2
|
||||
cat $CONF_DIR/deckhand.conf 1>&2
|
||||
|
||||
echo $CONF_DIR/logging.conf 1>&2
|
||||
cat $CONF_DIR/logging.conf 1>&2
|
||||
|
||||
log_section Starting server
|
||||
DECKHAND_ID=$(sudo docker ps | grep deckhand | awk '{print $1}')
|
||||
echo $DECKHAND_ID
|
||||
}
|
||||
|
||||
function gen_paste {
|
||||
log_section Creating paste config without [filter:authtoken]
|
||||
# NOTE(fmontei): Since this script does not currently support Keystone
|
||||
# integration, we remove ``filter:authtoken`` from the ``deckhand_api``
|
||||
# pipeline to avoid any kind of auth issues.
|
||||
sed 's/authtoken api/api/' etc/deckhand/deckhand-paste.ini &> $CONF_DIR/deckhand-paste.ini
|
||||
}
|
||||
|
||||
function gen_policy {
|
||||
log_section Creating policy file with liberal permissions
|
||||
|
||||
policy_file='etc/deckhand/policy.yaml.sample'
|
||||
policy_pattern="deckhand\:"
|
||||
|
||||
touch $CONF_DIR/policy.yaml
|
||||
|
||||
sed -n "/$policy_pattern/p" "$policy_file" \
|
||||
| sed 's/^../\"/' \
|
||||
| sed 's/rule\:[A-Za-z\_\-]*/@/' > $CONF_DIR/policy.yaml
|
||||
|
||||
echo $CONF_DIR/'policy.yaml' 1>&2
|
||||
cat $CONF_DIR/'policy.yaml' 1>&2
|
||||
}
|
||||
|
||||
gen_config
|
||||
gen_paste
|
||||
gen_policy
|
||||
|
||||
ROOTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
if [ -z "$DECKHAND_IMAGE" ]; then
|
||||
log_section "Running Deckhand via uwsgi"
|
||||
|
||||
alembic upgrade head
|
||||
# NOTE(fmontei): Deckhand's database is not configured to work with
|
||||
# multiprocessing. Currently there is a data race on acquiring shared
|
||||
# SQLAlchemy engine pooled connection strings when workers > 1. As a
|
||||
# workaround, we use multiple threads but only 1 worker. For more
|
||||
# information, see: https://github.com/att-comdev/deckhand/issues/20
|
||||
export DECKHAND_API_WORKERS=1
|
||||
export DECKHAND_API_THREADS=4
|
||||
source $ROOTDIR/../entrypoint.sh server &
|
||||
else
|
||||
log_section "Running Deckhand via Docker"
|
||||
sudo docker run \
|
||||
--rm \
|
||||
--net=host \
|
||||
-v $CONF_DIR:/etc/deckhand \
|
||||
$DECKHAND_IMAGE alembic upgrade head &> $STDOUT
|
||||
sudo docker run \
|
||||
--rm \
|
||||
--net=host \
|
||||
-p 9000:9000 \
|
||||
-v $CONF_DIR:/etc/deckhand \
|
||||
$DECKHAND_IMAGE &> $STDOUT &
|
||||
fi
|
||||
|
||||
# Give the server a chance to come up. Better to poll a health check.
|
||||
sleep 5
|
||||
|
||||
DECKHAND_ID=$(sudo docker ps | grep deckhand | awk '{print $1}')
|
||||
echo $DECKHAND_ID
|
||||
# Deploy Deckhand and PostgreSQL and run tests.
|
||||
deploy_postgre
|
||||
deploy_deckhand
|
||||
|
||||
log_section Running tests
|
||||
|
||||
|
203
tools/integration-tests.sh
Executable file
203
tools/integration-tests.sh
Executable file
@ -0,0 +1,203 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Script intended for running Deckhand integration tests, where integration
|
||||
# is defined as the interaction between Deckhand and Keystone and Barbican.
|
||||
# Installation dependency is openstack-helm-infra.
|
||||
#
|
||||
# USAGE: ./tools/integration-tests.sh <test-regex>
|
||||
|
||||
# TODO(fmontei): Use Ansible for all this.
|
||||
# NOTE(fmontei): May have to automate the following installation guide for CI:
|
||||
# https://docs.openstack.org/openstack-helm/latest/install/developer/requirements-and-host-config.html#host-configuration
|
||||
|
||||
set -xe
|
||||
|
||||
DECKHAND_IMAGE=${DECKHAND_IMAGE:-quay.io/attcomdev/deckhand:latest}
|
||||
|
||||
CURRENT_DIR="$(pwd)"
|
||||
: ${OSH_INFRA_PATH:="../openstack-helm-infra"}
|
||||
: ${OSH_PATH:="../openstack-helm"}
|
||||
|
||||
|
||||
function cleanup_deckhand {
|
||||
set +e
|
||||
|
||||
if [ -n "$POSTGRES_ID" ]; then
|
||||
sudo docker stop $POSTGRES_ID
|
||||
fi
|
||||
if [ -n "$DECKHAND_ID" ]; then
|
||||
sudo docker stop $DECKHAND_ID
|
||||
fi
|
||||
if [ -d "$CONF_DIR" ]; then
|
||||
rm -rf $CONF_DIR
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
trap cleanup_deckhand EXIT
|
||||
|
||||
|
||||
function install_deps {
|
||||
set -xe
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get install --no-install-recommends -y \
|
||||
ca-certificates \
|
||||
git \
|
||||
make \
|
||||
jq \
|
||||
nmap \
|
||||
curl \
|
||||
uuid-runtime \
|
||||
ipcalc \
|
||||
python-pytest \
|
||||
python-pip
|
||||
# NOTE(fmontei): Use this version because newer versions might
|
||||
# be slightly different in terms of test syntax in YAML files.
|
||||
sudo -H -E pip install gabbi==1.35.1
|
||||
}
|
||||
|
||||
|
||||
function deploy_barbican {
|
||||
set -xe
|
||||
|
||||
# Pull images and lint chart
|
||||
make pull-images barbican
|
||||
|
||||
# Deploy command
|
||||
helm upgrade --install barbican ./barbican \
|
||||
--namespace=openstack
|
||||
|
||||
# Wait for deploy
|
||||
./tools/deployment/common/wait-for-pods.sh openstack
|
||||
|
||||
# Validate deployment info
|
||||
helm status barbican
|
||||
}
|
||||
|
||||
|
||||
function deploy_osh_keystone_barbican {
|
||||
set -xe
|
||||
|
||||
if [ ! -d "$OSH_INFRA_PATH" ]; then
|
||||
git clone https://git.openstack.org/openstack/openstack-helm-infra.git ../openstack-helm-infra
|
||||
fi
|
||||
|
||||
if [ ! -d "$OSH_PATH" ]; then
|
||||
git clone https://git.openstack.org/openstack/openstack-helm.git ../openstack-helm
|
||||
fi
|
||||
|
||||
cd ${OSH_INFRA_PATH}
|
||||
make dev-deploy setup-host
|
||||
make dev-deploy k8s
|
||||
|
||||
cd ${OSH_PATH}
|
||||
# Setup clients on the host and assemble the charts¶
|
||||
./tools/deployment/developer/common/020-setup-client.sh
|
||||
# Deploy the ingress controller
|
||||
./tools/deployment/developer/common/030-ingress.sh
|
||||
# Deploy NFS Provisioner
|
||||
./tools/deployment/developer/nfs/040-nfs-provisioner.sh
|
||||
# Deploy MariaDB
|
||||
./tools/deployment/developer/nfs/050-mariadb.sh
|
||||
# Deploy RabbitMQ
|
||||
./tools/deployment/developer/nfs/060-rabbitmq.sh
|
||||
# Deploy Memcached
|
||||
./tools/deployment/developer/nfs/070-memcached.sh
|
||||
# Deploy Keystone
|
||||
./tools/deployment/developer/nfs/080-keystone.sh
|
||||
|
||||
deploy_barbican
|
||||
}
|
||||
|
||||
|
||||
function deploy_deckhand {
|
||||
set -xe
|
||||
|
||||
export OS_CLOUD=openstack_helm
|
||||
|
||||
cd ${CURRENT_DIR}
|
||||
|
||||
# TODO(fmontei): Use Keystone bootstrap override instead.
|
||||
interfaces=("admin" "public" "internal")
|
||||
deckhand_endpoint="http://127.0.0.1:9000"
|
||||
|
||||
if [ -z "$( openstack service list --format value | grep deckhand )" ]; then
|
||||
openstack service create --enable --name deckhand deckhand
|
||||
fi
|
||||
|
||||
for iface in ${interfaces[@]}; do
|
||||
if [ -z "$( openstack endpoint list --format value | grep deckhand | grep $iface )" ]; then
|
||||
openstack endpoint create --enable \
|
||||
--region RegionOne \
|
||||
deckhand $iface $deckhand_endpoint/api/v1.0
|
||||
fi
|
||||
done
|
||||
|
||||
openstack service list | grep deckhand
|
||||
openstack endpoint list | grep deckhand
|
||||
|
||||
gen_config $deckhand_endpoint
|
||||
gen_paste false
|
||||
|
||||
# NOTE(fmontei): Generate an admin token instead of hacking a policy
|
||||
# file with no permissions to test authN as well as authZ.
|
||||
export TEST_AUTH_TOKEN=$( openstack token issue --format value -c id )
|
||||
export TEST_BARBICAN_URL=$( openstack endpoint list --format value | grep barbican | grep public | awk '{print $7}' )
|
||||
|
||||
log_section "Running Deckhand via Docker"
|
||||
sudo docker run \
|
||||
--rm \
|
||||
--net=host \
|
||||
-v $CONF_DIR:/etc/deckhand \
|
||||
$DECKHAND_IMAGE alembic upgrade head &
|
||||
sudo docker run \
|
||||
--rm \
|
||||
--net=host \
|
||||
-p 9000:9000 \
|
||||
-v $CONF_DIR:/etc/deckhand \
|
||||
$DECKHAND_IMAGE server &
|
||||
|
||||
# Give the server a chance to come up. Better to poll a health check.
|
||||
sleep 5
|
||||
|
||||
DECKHAND_ID=$(sudo docker ps | grep deckhand | awk '{print $1}')
|
||||
echo $DECKHAND_ID
|
||||
}
|
||||
|
||||
|
||||
function run_tests {
|
||||
set +e
|
||||
|
||||
posargs=$@
|
||||
if [ ${#posargs} -ge 1 ]; then
|
||||
py.test -k $1 -svx ${CURRENT_DIR}/deckhand/tests/integration/test_gabbi.py
|
||||
else
|
||||
py.test -svx ${CURRENT_DIR}/deckhand/tests/integration/test_gabbi.py
|
||||
fi
|
||||
TEST_STATUS=$?
|
||||
|
||||
set -e
|
||||
|
||||
if [ "x$TEST_STATUS" = "x0" ]; then
|
||||
log_section Done SUCCESS
|
||||
else
|
||||
log_section Done FAILURE
|
||||
exit $TEST_STATUS
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
source ${CURRENT_DIR}/tools/common-tests.sh
|
||||
|
||||
# Install required packages.
|
||||
install_deps
|
||||
|
||||
# Clone openstack-helm-infra and setup host and k8s.
|
||||
deploy_osh_keystone_barbican
|
||||
|
||||
# Deploy PostgreSQL and Deckhand.
|
||||
deploy_postgre
|
||||
deploy_deckhand
|
||||
|
||||
run_tests "$@"
|
Loading…
Reference in New Issue
Block a user