Implement barbican in operator

Change-Id: Ide8c89f22ce45bc2366382c497da223c578a1556
This commit is contained in:
okozachenko 2020-08-05 17:25:17 +03:00
parent 0ff9d1cddb
commit 72175e9137
20 changed files with 1181 additions and 30 deletions

View File

@ -1,6 +1,7 @@
---
secretName: devstack
configMap:
barbican: {}
ceilometer:
dbUri: "sqlite:///:memory:"
glance: {}

View File

@ -4,6 +4,7 @@ metadata:
name: operator-config
data:
operator-config.yaml: |
barbican: {}
ceilometer:
dbUri: "sqlite:///:memory:"
horizon:

620
devstack/lib/barbican Normal file
View File

@ -0,0 +1,620 @@
#!/usr/bin/env bash
# Install and start **Barbican** service
# To enable a minimal set of Barbican features, add the following to localrc:
# enable_service barbican-svc barbican-retry barbican-keystone-listener
#
# Dependencies:
# - functions
# - OS_AUTH_URL for auth in api
# - DEST set to the destination directory
# - SERVICE_PASSWORD, SERVICE_PROJECT_NAME for auth in api
# - STACK_USER service user
# stack.sh
# ---------
# install_barbican
# configure_barbican
# init_barbican
# start_barbican
# stop_barbican
# cleanup_barbican
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
# PyKMIP configuration
PYKMIP_SERVER_KEY=${PYKMIP_SERVER_KEY:-$INT_CA_DIR/private/pykmip-server.key}
PYKMIP_SERVER_CERT=${PYKMIP_SERVER_CERT:-$INT_CA_DIR/pykmip-server.crt}
PYKMIP_CLIENT_KEY=${PYKMIP_CLIENT_KEY:-$INT_CA_DIR/private/pykmip-client.key}
PYKMIP_CLIENT_CERT=${PYKMIP_CLIENT_CERT:-$INT_CA_DIR/pykmip-client.crt}
PYKMIP_CA_PATH=${PYKMIP_CA_PATH:-$INT_CA_DIR/ca-chain.pem}
# Functions
# ---------
# TODO(john-wood-w) These 'magic' functions are called by devstack to enable
# a given service (so the name between 'is_' and '_enabled'). Currently the
# Zuul infra gate configuration (at https://github.com/openstack-infra/project-config/blob/master/jenkins/jobs/barbican.yaml)
# only enables the 'barbican' service. So the two functions below, for the two
# services we wish to run, have to key off of that lone 'barbican' selection.
# Once the Zuul config is updated to add these two services properly, then
# these functions should be replaced by the single method below.
# !!!! Special thanks to rm_work for figuring this out !!!!
function is_barbican-retry_enabled {
[[ ,${ENABLED_SERVICES} =~ ,"barbican" ]] && return 0
}
function is_barbican-svc_enabled {
[[ ,${ENABLED_SERVICES} =~ ,"barbican" ]] && return 0
}
function is_barbican-keystone-listener_enabled {
[[ ,${ENABLED_SERVICES} =~ ,"barbican" ]] && return 0
}
# TODO(john-wood-w) Replace the above two functions with the one below once
# Zuul is update per above.
## Test if any Barbican services are enabled
## is_barbican_enabled
#function is_barbican_enabled {
# [[ ,${ENABLED_SERVICES} =~ ,"barbican-" ]] && return 0
# return 1
#}
# cleanup_barbican - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_barbican {
if is_service_enabled barbican-vault; then
# Kill the vault process, screen session and remove the generated files
# during installation.
local session_name="barbican_vault"
local vault_token_file="${BARBICAN_DIR}/vault_root_token_id"
existing_ses=$(screen -ls | grep ${session_name} | awk '{print $1}')
if [[ -n "${existing_ses}" ]]; then
screen -S ${existing_ses} -X quit
fi
sudo pkill -f -9 "vault server"
sudo rm -f ${vault_token_file} vault.log
fi
}
# configure_barbicanclient - Set config files, create data dirs, etc
function configure_barbicanclient {
setup_dev_lib "python-barbicanclient"
}
# configure_dogtag_plugin - Change config to use dogtag plugin
function configure_dogtag_plugin {
sudo openssl pkcs12 -in /root/.dogtag/pki-tomcat/ca_admin_cert.p12 -passin pass:PASSWORD -out $BARBICAN_CONF_DIR/kra_admin_cert.pem -nodes
sudo chown $USER $BARBICAN_CONF_DIR/kra_admin_cert.pem
iniset $BARBICAN_CONF dogtag_plugin dogtag_port 8373
iniset $BARBICAN_CONF dogtag_plugin pem_path "$BARBICAN_CONF_DIR/kra_admin_cert.pem"
iniset $BARBICAN_CONF dogtag_plugin dogtag_host localhost
iniset $BARBICAN_CONF dogtag_plugin nss_db_path '/etc/barbican/alias'
iniset $BARBICAN_CONF dogtag_plugin nss_db_path_ca '/etc/barbican/alias-ca'
iniset $BARBICAN_CONF dogtag_plugin nss_password 'password123'
iniset $BARBICAN_CONF dogtag_plugin simple_cmc_profile 'caOtherCert'
iniset $BARBICAN_CONF dogtag_plugin ca_expiration_time 1
iniset $BARBICAN_CONF dogtag_plugin plugin_working_dir '/etc/barbican/dogtag'
iniset $BARBICAN_CONF secretstore enabled_secretstore_plugins dogtag_crypto
iniset $BARBICAN_CONF certificate enabled_certificate_plugins dogtag
}
# configure_barbican - Set config files, create data dirs, etc
function configure_barbican {
setup_develop $BARBICAN_DIR
[ ! -d $BARBICAN_CONF_DIR ] && sudo mkdir -m 755 -p $BARBICAN_CONF_DIR
sudo chown $USER $BARBICAN_CONF_DIR
[ ! -d $BARBICAN_API_LOG_DIR ] && sudo mkdir -m 755 -p $BARBICAN_API_LOG_DIR
sudo chown $USER $BARBICAN_API_LOG_DIR
[ ! -d $BARBICAN_CONF_DIR ] && sudo mkdir -m 755 -p $BARBICAN_CONF_DIR
sudo chown $USER $BARBICAN_CONF_DIR
# Copy the barbican config files to the config dir
cp $BARBICAN_DIR/etc/barbican/barbican-api-paste.ini $BARBICAN_CONF_DIR
cp -R $BARBICAN_DIR/etc/barbican/vassals $BARBICAN_CONF_DIR
# Copy functional test config
cp $BARBICAN_DIR/etc/barbican/barbican-functional.conf $BARBICAN_CONF_DIR
# Enable DEBUG
iniset $BARBICAN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
# Set the host_href
iniset $BARBICAN_CONF DEFAULT host_href "$BARBICAN_HOST_HREF"
# Enable logging to stderr to have log also in the screen window
iniset $BARBICAN_CONF DEFAULT use_stderr True
# Format logging
if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
setup_colorized_logging $BARBICAN_CONF DEFAULT project user
fi
# Set the database connection url
BARBICAN_DATABASE_USER=$(get_data_from_secret barbican-mysql openstack USER)
BARBICAN_DATABASE_PASSWORD=$(get_data_from_secret barbican-mysql openstack PASSWORD)
BARBICAN_DATABASE_NAME=$(get_data_from_secret barbican-mysql openstack DATABASE)
iniset $BARBICAN_CONF DEFAULT sql_connection "mysql+pymysql://$BARBICAN_DATABASE_USER:$BARBICAN_DATABASE_PASSWORD@barbican-mysql-master/$BARBICAN_DATABASE_NAME?charset=utf8"
# Disable auto-migration when deploying Barbican
iniset $BARBICAN_CONF DEFAULT db_auto_create False
# Increase default request buffer size, keystone auth PKI tokens can be very long
iniset $BARBICAN_CONF_DIR/vassals/barbican-api.ini uwsgi buffer-size 65535
# Rabbit settings
if is_service_enabled rabbit; then
iniset $BARBICAN_CONF DEFAULT transport_url rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672
else
echo_summary "Barbican requires that the RabbitMQ service is enabled"
fi
write_uwsgi_config "$BARBICAN_UWSGI_CONF" "$BARBICAN_WSGI" "/key-manager"
## Set up keystone
# Turn on the middleware
iniset $BARBICAN_PASTE_CONF 'pipeline:barbican_api' pipeline 'barbican-api-keystone'
# Set the keystone parameters
configure_auth_token_middleware $BARBICAN_CONF barbican $BARBICAN_AUTH_CACHE_DIR
# NOTE(Alex): Operator stuff for memcached
iniset $BARBICAN_CONF keystone_authtoken memcached_servers "mcrouter-memcached-barbican:11211"
# Enable the keystone listener
iniset $BARBICAN_CONF keystone_notifications enable True
iniset $BARBICAN_CONF keystone_notifications control_exchange 'keystone'
}
# init_barbican - Initialize etc.
function init_barbican {
kubectl create secret generic barbican-config -n openstack \
--from-file=/etc/barbican/barbican.conf \
--from-file=/etc/barbican/barbican-api-paste.ini \
--from-file=/etc/barbican/barbican-functional.conf
# Create cache dir
sudo mkdir -p $BARBICAN_AUTH_CACHE_DIR
sudo chown $STACK_USER $BARBICAN_AUTH_CACHE_DIR
rm -f $BARBICAN_AUTH_CACHE_DIR/*
recreate_database barbican utf8
}
# install_barbican - Collect source and prepare
function install_barbican {
echo noop
}
# install_barbicanclient - Collect source and prepare
function install_barbicanclient {
if use_library_from_git "python-barbicanclient"; then
git_clone_by_name "python-barbicanclient"
setup_dev_lib "python-barbicanclient"
fi
}
# start_barbican - Start running processes, including screen
function start_barbican {
# Start the Barbican service up.
kubernetes_rollout_restart daemonset/barbican
kubernetes_rollout_status daemonset/barbican
proxy_pass_to_kubernetes /key-manager barbican barbican-wsgi-api
# Pause while the barbican-svc populates the database, otherwise the retry
# service below might try to do this at the same time, leading to race
# conditions.
sleep 10
# Start the retry scheduler server up.
run_process barbican-retry "$BARBICAN_BIN_DIR/barbican-retry --config-file=$BARBICAN_CONF_DIR/barbican.conf"
# Start the barbican-keystone-listener
run_process barbican-keystone-listener "$BARBICAN_BIN_DIR/barbican-keystone-listener --config-file=$BARBICAN_CONF_DIR/barbican.conf"
}
# stop_barbican - Stop running processes
function stop_barbican {
# This will eventually be refactored to work like
# Solum and Manila (script to kick off a wsgiref server)
# For now, this will stop uWSGI rather than have it hang
killall -9 uwsgi
# This cleans up the PID file, but uses pkill so Barbican
# uWSGI emperor process doesn't actually stop
stop_process barbican-svc
stop_process barbican-retry
stop_process barbican-keystone-listener
}
function get_id {
echo `"$@" | awk '/ id / { print $4 }'`
}
function create_barbican_accounts {
#
# Setup Default Admin User
#
SERVICE_PROJECT=$(openstack project list | awk "/ $SERVICE_PROJECT_NAME / { print \$2 }")
ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
BARBICAN_USER=$(openstack user create \
--password "$SERVICE_PASSWORD" \
--project $SERVICE_PROJECT \
--email "barbican@example.com" \
barbican \
| grep " id " | get_field 2)
openstack role add --project $SERVICE_PROJECT \
--user $BARBICAN_USER \
$ADMIN_ROLE
#
# Setup Default service-admin User
#
SERVICE_ADMIN=$(get_id openstack user create \
--password "$SERVICE_PASSWORD" \
--email "service-admin@example.com" \
"service-admin")
SERVICE_ADMIN_ROLE=$(get_id openstack role create \
"key-manager:service-admin")
openstack role add \
--user "$SERVICE_ADMIN" \
--project "$SERVICE_PROJECT" \
"$SERVICE_ADMIN_ROLE"
#
# Setup RBAC User Projects and Roles
#
PASSWORD="barbican"
PROJECT_A_ID=$(get_id openstack project create "project_a")
PROJECT_B_ID=$(get_id openstack project create "project_b")
ROLE_ADMIN_ID=$(get_id openstack role show admin)
ROLE_CREATOR_ID=$(get_id openstack role create "creator")
ROLE_OBSERVER_ID=$(get_id openstack role create "observer")
ROLE_AUDIT_ID=$(get_id openstack role create "audit")
#
# Setup RBAC Admin of Project A
#
USER_ID=$(get_id openstack user create \
--password "$PASSWORD" \
--email "admin_a@example.net" \
"project_a_admin")
openstack role add \
--user "$USER_ID" \
--project "$PROJECT_A_ID" \
"$ROLE_ADMIN_ID"
#
# Setup RBAC Creator of Project A
#
USER_ID=$(get_id openstack user create \
--password "$PASSWORD" \
--email "creator_a@example.net" \
"project_a_creator")
openstack role add \
--user "$USER_ID" \
--project "$PROJECT_A_ID" \
"$ROLE_CREATOR_ID"
# Adding second creator user in project_a
USER_ID=$(openstack user create \
--password "$PASSWORD" \
--email "creator2_a@example.net" \
"project_a_creator_2" -f value -c id)
openstack role add \
--user "$USER_ID" \
--project "$PROJECT_A_ID" \
"$ROLE_CREATOR_ID"
#
# Setup RBAC Observer of Project A
#
USER_ID=$(get_id openstack user create \
--password "$PASSWORD" \
--email "observer_a@example.net" \
"project_a_observer")
openstack role add \
--user "$USER_ID" \
--project "$PROJECT_A_ID" \
"$ROLE_OBSERVER_ID"
#
# Setup RBAC Auditor of Project A
#
USER_ID=$(get_id openstack user create \
--password "$PASSWORD" \
--email "auditor_a@example.net" \
"project_a_auditor")
openstack role add \
--user "$USER_ID" \
--project "$PROJECT_A_ID" \
"$ROLE_AUDIT_ID"
#
# Setup RBAC Admin of Project B
#
USER_ID=$(get_id openstack user create \
--password "$PASSWORD" \
--email "admin_b@example.net" \
"project_b_admin")
openstack role add \
--user "$USER_ID" \
--project "$PROJECT_B_ID" \
"$ROLE_ADMIN_ID"
#
# Setup RBAC Creator of Project B
#
USER_ID=$(get_id openstack user create \
--password "$PASSWORD" \
--email "creator_b@example.net" \
"project_b_creator")
openstack role add \
--user "$USER_ID" \
--project "$PROJECT_B_ID" \
"$ROLE_CREATOR_ID"
#
# Setup RBAC Observer of Project B
#
USER_ID=$(get_id openstack user create \
--password "$PASSWORD" \
--email "observer_b@example.net" \
"project_b_observer")
openstack role add \
--user "$USER_ID" \
--project "$PROJECT_B_ID" \
"$ROLE_OBSERVER_ID"
#
# Setup RBAC auditor of Project B
#
USER_ID=$(get_id openstack user create \
--password "$PASSWORD" \
--email "auditor_b@example.net" \
"project_b_auditor")
openstack role add \
--user "$USER_ID" \
--project "$PROJECT_B_ID" \
"$ROLE_AUDIT_ID"
}
# PyKMIP functions
# ----------------
# install_pykmip - install the PyKMIP python module
# create keys and certificate for server
function install_pykmip {
pip_install 'pykmip'
if is_service_enabled pykmip-server; then
[ ! -d ${PYKMIP_CONF_DIR} ] && sudo mkdir -p ${PYKMIP_CONF_DIR}
sudo chown ${USER} ${PYKMIP_CONF_DIR}
[ ! -d ${PYKMIP_LOG_DIR} ] && sudo mkdir -p ${PYKMIP_LOG_DIR}
sudo chown ${USER} ${PYKMIP_LOG_DIR}
init_CA
if [ ! -e ${PYKMIP_SERVER_KEY} ]; then
make_cert ${INT_CA_DIR} 'pykmip-server' 'pykmip-server'
chmod 400 ${PYKMIP_SERVER_KEY}
fi
if [ ! -e ${PYKMIP_CLIENT_KEY} ]; then
make_cert ${INT_CA_DIR} 'pykmip-client' 'pykmip-client'
chmod 400 ${PYKMIP_CLIENT_KEY}
fi
if [ ! -e ${PYKMIP_CONF} ]; then
cat > ${PYKMIP_CONF} <<EOF
[server]
hostname=127.0.0.1
port=5696
certificate_path=${PYKMIP_SERVER_CERT}
key_path=${PYKMIP_SERVER_KEY}
ca_path=${PYKMIP_CA_PATH}
auth_suite=TLS1.2
EOF
fi
fi
}
# configure_pykmip - enable KMIP plugin and configure
function configure_pykmip {
iniset $BARBICAN_CONF secretstore enabled_secretstore_plugins kmip_plugin
iniset $BARBICAN_CONF kmip_plugin username demo
iniset $BARBICAN_CONF kmip_plugin password secretpassword
iniset $BARBICAN_CONF kmip_plugin keyfile ${PYKMIP_CLIENT_KEY}
iniset $BARBICAN_CONF kmip_plugin certfile ${PYKMIP_CLIENT_CERT}
iniset $BARBICAN_CONF kmip_plugin ca_certs ${PYKMIP_CA_PATH}
}
# start_pykmip - start the PyKMIP server
function start_pykmip {
run_process pykmip-server "$BARBICAN_BIN_DIR/pykmip-server -f ${PYKMIP_CONF} -l ${PYKMIP_LOG_DIR}/pykmip-devstack.log"
}
# Dogtag functions
# ----------------
function install_389_directory_server {
# Make sure that 127.0.0.1 resolves to localhost.localdomain (fqdn)
sudo sed -i 's/127.0.0.1[ \t]*localhost localhost.localdomain/127.0.0.1\tlocalhost.localdomain localhost/' /etc/hosts
sudo mkdir -p /etc/389-ds
dscreate create-template ds.tmp
sed -e 's/;root_password = .*/root_password = PASSWORD/g' \
-e 's/;full_machine_name = .*/full_machine_name = localhost.localdomain/g' \
-e 's/;instance_name =.*/instance_name = pki-tomcat/g' \
ds.tmp > ds.inf
rm ds.tmp
sudo mv ds.inf /etc/389-ds/ds.inf
sudo dscreate from-file /etc/389-ds/ds.inf
}
function install_dogtag_ca {
sudo mkdir -p /etc/dogtag
cat > .tmp.ca.cfg <<EOF
[CA]
pki_admin_email=caadmin@example.com
pki_admin_name=caadmin
pki_admin_nickname=caadmin
pki_admin_password=PASSWORD
pki_admin_uid=caadmin
pki_backup_password=PASSWORD
pki_client_database_password=PASSWORD
pki_client_database_purge=False
pki_client_pkcs12_password=PASSWORD
pki_clone_pkcs12_password=PASSWORD
pki_ds_base_dn=dc=ca,dc=example,dc=com
pki_ds_database=ca
pki_ds_password=PASSWORD
pki_hostname=localhost
pki_security_domain_name=EXAMPLE
pki_token_password=PASSWORD
pki_https_port=8373
pki_http_port=8370
pki_ajp_port=8379
pki_tomcat_server_port=8375
EOF
sudo mv .tmp.ca.cfg /etc/dogtag/ca.cfg
sudo pkispawn -v -f /etc/dogtag/ca.cfg -s CA
}
function wait_for_ca {
while true; do
# If the sleep command is executed "as-is", the subprocess that it
# executes will trigger the "exit_trap" and will cause this script to
# fail. To avoid this, we run the sleep command inside this sub-shell,
# so the signal will not be caught in this process.
ca_running=$(sleep 2 && curl -s -k https://localhost:8373/ca/admin/ca/getStatus | grep -c running)
if [[ $ca_running == 1 ]]; then
break
fi
done
}
function install_dogtag_kra {
sudo mkdir -p /etc/dogtag
# Even though we are using localhost.localdomain, the server certificate by
# default will get the real host name for the server. So we need to
# properly configure the KRA to try to communicate with the real host name
# instead of the localhost.
cat > .tmp.kra.cfg <<EOF
[KRA]
pki_admin_cert_file=/root/.dogtag/pki-tomcat/ca_admin.cert
pki_admin_email=kraadmin@example.com
pki_admin_name=kraadmin
pki_admin_nickname=kraadmin
pki_admin_password=PASSWORD
pki_admin_uid=kraadmin
pki_backup_password=PASSWORD
pki_client_database_password=PASSWORD
pki_client_database_purge=False
pki_client_pkcs12_password=PASSWORD
pki_clone_pkcs12_password=PASSWORD
pki_ds_base_dn=dc=kra,dc=example,dc=com
pki_ds_database=kra
pki_ds_password=PASSWORD
pki_hostname=localhost
pki_security_domain_name=EXAMPLE
pki_security_domain_user=caadmin
pki_security_domain_password=PASSWORD
pki_token_password=PASSWORD
pki_https_port=8373
pki_http_port=8370
pki_ajp_port=8379
pki_tomcat_server_port=8375
pki_security_domain_hostname=localhost
pki_security_domain_https_port=8373
EOF
sudo mv .tmp.kra.cfg /etc/dogtag/kra.cfg
sudo pkispawn -v -f /etc/dogtag/kra.cfg -s KRA
}
function install_dogtag_plugin_dependencies {
install_package nss-devel 389-ds-base dogtag-pki
}
function install_dogtag_components {
install_dogtag_plugin_dependencies
install_389_directory_server
install_dogtag_ca
wait_for_ca
install_dogtag_kra
}
# Vault functions
# ----------------
function install_vault {
# Install vault if needed
if [[ ! -x "$(command -v vault)" ]]; then
wget https://releases.hashicorp.com/vault/1.3.0/vault_1.3.0_linux_amd64.zip
unzip vault_1.3.0_linux_amd64.zip
sudo mv vault /usr/bin
fi
install_package screen
TOKEN_ID_FILE="${BARBICAN_DIR}/vault_root_token_id"
local session_name="barbican_vault"
# Clean up first before starting new screen session
existing_ses=$(screen -ls | grep ${session_name} | awk '{print $1}')
if [[ -n "${existing_ses}" ]]; then
screen -S ${existing_ses} -X quit
fi
rm -f ${TOKEN_ID_FILE} vault.log
screen -dmS ${session_name}
screen -S ${session_name} -p bash -X stuff 'vault server -dev 2>&1 >vault.log\n'
# get the root_token_id, use tempfile for counter
touch $TOKEN_ID_FILE
COUNTER=0
while [ ! -s $TOKEN_ID_FILE ] && [ "$COUNTER" -lt "20" ]
do
sleep 2
awk '/Root Token:/ {print $3}' vault.log > $TOKEN_ID_FILE
COUNTER=$[COUNTER + 1]
done
if [ ! -s $TOKEN_ID_FILE ]; then
echo "Wah! Need to throw an error code here!"
fi
export VAULT_ADDR="http://127.0.0.1:8200"
# Enable kv version 1
vault secrets disable secret/
vault secrets enable -version=1 -path=secret -description "kv version 1" kv
#debug code follows:
vault status
vault kv put secret/hello foo=world
vault kv get secret/hello
vault kv delete secret/hello
}
function configure_vault_plugin {
root_token_id=`cat ${BARBICAN_DIR}/vault_root_token_id`
iniset $BARBICAN_CONF secretstore enabled_secretstore_plugins vault_plugin
iniset $BARBICAN_CONF vault_plugin root_token_id $root_token_id
iniset $BARBICAN_CONF vault_plugin vault_url "http://127.0.0.1:8200"
iniset $BARBICAN_CONF vault_plugin use_ssl "false"
}
# Restore xtrace
$XTRACE

115
devstack/plugin-barbican.sh Normal file
View File

@ -0,0 +1,115 @@
#!/bin/bash
#
# Copyright 2020 VEXXHOST, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Save trace setting
# Configure the needed tempest options
function configure_barbican_tempest() {
iniset $TEMPEST_CONFIG service_available barbican True
roles="$(iniget $TEMPEST_CONFIG auth tempest_roles)"
if [[ -z $roles ]]; then
roles="creator"
else
roles="$roles,creator"
fi
iniset $TEMPEST_CONFIG auth tempest_roles $roles
iniset $TEMPEST_CONFIG service_available barbican True
}
# check for service enabled
if is_service_enabled barbican; then
if [[ "$1" == "source" || "`type -t install_barbican`" != 'function' ]]; then
# Initial source
source $BARBICAN_DIR/devstack/lib/barbican
fi
if [[ "$1" == "stack" && "$2" == "install" ]]; then
echo_summary "Installing Barbican"
stack_install_service barbican
install_barbicanclient
if is_service_enabled barbican-pykmip; then
echo_summary "Installing PyKMIP"
install_pykmip
fi
if is_service_enabled barbican-dogtag; then
echo_summary "Installing Dogtag"
install_dogtag_components
fi
if is_service_enabled barbican-vault; then
echo_summary "Installing Vault"
install_vault
fi
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
echo_summary "Configuring Barbican"
configure_barbican
if is_service_enabled barbican-pykmip; then
echo_summary "Configuring KMIP plugin"
configure_pykmip
fi
if is_service_enabled barbican-dogtag; then
echo_summary "Configuring Dogtag plugin"
configure_dogtag_plugin
fi
if is_service_enabled barbican-vault; then
echo_summary "Configuring Vault plugin"
configure_vault_plugin
fi
# Configure Cinder, Nova and Glance to use Barbican
configure_core_services
if is_service_enabled key; then
create_barbican_accounts
fi
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
echo_summary "Initializing Barbican"
init_barbican
start_barbican
if is_service_enabled pykmip-server; then
echo_summary "Starting PyKMIP server"
start_pykmip
fi
elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
if is_service_enabled tempest; then
echo_summary "Configuring Tempest options for Barbican"
configure_barbican_tempest
fi
fi
if [[ "$1" == "unstack" ]]; then
stop_barbican
fi
if [[ "$1" == "clean" ]]; then
cleanup_barbican
fi
fi
# Set the correct config options in Nova, Cinder and Glance
function configure_core_services {
if is_service_enabled n-cpu; then
iniset $NOVA_CONF key_manager backend 'barbican'
fi
if is_service_enabled c-vol; then
iniset $CINDER_CONF key_manager backend 'barbican'
fi
if is_service_enabled g-api; then
iniset $GLANCE_API_CONF key_manager backend 'barbican'
iniset $GLANCE_API_CONF barbican auth_endpoint $KEYSTONE_AUTH_URI_V3
fi
}

View File

@ -17,6 +17,7 @@
define_plugin openstack-operator
source $DEST/openstack-operator/devstack/lib/common
source $DEST/openstack-operator/devstack/lib/barbican
source $DEST/openstack-operator/devstack/lib/glance
source $DEST/openstack-operator/devstack/lib/horizon
source $DEST/openstack-operator/devstack/lib/keystone

View File

@ -0,0 +1,21 @@
# Copyright (c) 2020 VEXXHOST, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM vexxhost/python-builder as builder
FROM vexxhost/python-base AS barbican-api
COPY barbican-wsgi-api /usr/local/bin/barbican-wsgi-api
EXPOSE 9311
ENV UWSGI_HTTP_SOCKET=:9311 UWSGI_WSGI_FILE=/usr/local/bin/barbican-wsgi-api
CMD ["/usr/local/bin/uwsgi", "--ini", "/etc/uwsgi/uwsgi.ini"]

View File

@ -0,0 +1,33 @@
#!/usr/local/bin/python
# Copyright (c) 2020 VEXXHOST, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkg_resources
import sentry_sdk
from barbican.api.app import get_api_wsgi_script
from sentry_sdk.integrations import wsgi
VERSION = pkg_resources.get_distribution("barbican").version
sentry_sdk.init(
release="barbican@%s" % VERSION,
traces_sample_rate=0.1
)
application = get_api_wsgi_script()
application = wsgi.SentryWsgiMiddleware(application)

View File

@ -0,0 +1,2 @@
gcc [compile]
libc-dev [compile]

View File

View File

@ -0,0 +1 @@
--constraint https://releases.openstack.org/constraints/upper/ussuri

View File

@ -0,0 +1,6 @@
uWSGI
PyKMIP
PyMySQL
python-memcached
sentry-sdk
git+https://opendev.org/openstack/barbican@stable/ussuri

View File

@ -0,0 +1,63 @@
# Copyright 2020 VEXXHOST, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""barbican Operator
This module maintains the operator for Mcrouter, it takes care of creating
the appropriate deployments, Mcrouter, pod monitors and Prometheus rules.
"""
from openstack_operator import database
from openstack_operator import identity
from openstack_operator import utils
def create_or_resume(name, spec, **_):
"""Create and re-sync a barbican instance
"""
# deploy mysql for barbican
if "mysql" not in spec:
database.ensure_mysql_cluster("barbican", {})
else:
database.ensure_mysql_cluster("barbican", spec["mysql"])
# deploy memcached
utils.create_or_update('barbican/memcached.yml.j2', spec=spec)
# deploy barbican api
utils.create_or_update('barbican/daemonset.yml.j2',
name=name, spec=spec)
utils.create_or_update('barbican/service.yml.j2',
name=name, spec=spec)
url = None
if "ingress" in spec:
utils.create_or_update('barbican/ingress.yml.j2',
name=name, spec=spec)
url = spec["ingress"]["host"]
identity.ensure_service(name="barbican", service_type="key-manager",
url=url, desc="Barbican Service")
def update(name, spec, **_):
"""Update a barbican
This function updates the deployment for barbican if there are any
changes that happen within it.
"""
if "ingress" in spec:
utils.create_or_update('barbican/ingress.yml.j2',
name=name, spec=spec)

View File

@ -27,6 +27,7 @@ import kopf
import sentry_sdk
from sentry_sdk.integrations import aiohttp
from openstack_operator import barbican
from openstack_operator import ceilometer
from openstack_operator import chronyd
from openstack_operator import glance
@ -110,6 +111,9 @@ def deploy(name, namespace, new, **_):
if "magnum" in config:
spec = set_service_config(config, "magnum")
magnum.create_or_resume("magnum", spec)
if "barbican" in config:
spec = config["barbican"]
barbican.create_or_resume("barbican", spec)
if "ceilometer" in config:
spec = config["ceilometer"]
ceilometer.create_or_resume(spec)

View File

@ -0,0 +1,101 @@
---
# Copyright 2020 VEXXHOST, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: barbican
namespace: openstack
labels:
{{ labels("barbican") | indent(4) }}
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
{{ labels("barbican") | indent(6) }}
template:
metadata:
labels:
{{ labels("barbican") | indent(8) }}
annotations:
checksum/config: "{{ config_hash }}"
spec:
automountServiceAccountToken: false
initContainers:
- name: db-upgrade
image: vexxhost/barbican-api:latest
imagePullPolicy: Always
command:
- barbican-manage
- db
- upgrade
- -v
- head
volumeMounts:
- mountPath: /etc/barbican
name: config
containers:
- name: barbican
image: vexxhost/barbican-api:latest
imagePullPolicy: Always
env:
{% if 'sentryDSN' in spec %}
- name: SENTRY_DSN
value: {{ spec.sentryDSN }}
{% endif %}
{% for v in env %}
- name: "{{ v.name }}"
value: "{{ v.value }}"
{% endfor %}
ports:
- name: barbican
protocol: TCP
containerPort: 9311
livenessProbe:
tcpSocket:
port: barbican
readinessProbe:
tcpSocket:
port: barbican
securityContext:
runAsUser: 1001
volumeMounts:
- name: config
mountPath: /etc/barbican
- name: uwsgi-config
mountPath: /etc/uwsgi
volumes:
- name: config
secret:
secretName: barbican-config
- name: uwsgi-config
configMap:
defaultMode: 420
name: uwsgi-default
{% if 'nodeSelector' in spec %}
nodeSelector:
{{ spec.nodeSelector | to_yaml | indent(8) }}
{% endif %}
{% if 'tolerations' in spec %}
tolerations:
{{ spec.tolerations | to_yaml | indent(8) }}
{% endif %}
{% if 'hostAliases' in spec %}
hostAliases:
{{ spec.hostAliases | to_yaml | indent(8) }}
{% endif %}

View File

@ -0,0 +1,57 @@
---
# Copyright 2020 VEXXHOST, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: barbican
namespace: openstack
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
certmanager.k8s.io/cluster-issuer: "letsencrypt-prod"
labels:
{{ labels("barbican") | indent(4) }}
spec:
{% if spec.ingress.host is defined %}
rules:
- host: {{ spec.ingress.host }}
http:
paths:
- path: /
backend:
serviceName: barbican
servicePort: 80
tls:
- hosts:
- {{ spec.ingress.host }}
secretName: barbican-tls
{% else %}
rules:
{% for v in spec.ingress %}
- host: {{ v.host }}
http:
paths:
- path: /
backend:
serviceName: barbican
servicePort: 80
{% endfor %}
tls:
- hosts:
{% for v in spec.ingress %}
- {{ v.host }}
{% endfor %}
secretName: barbican-tls
{% endif %}

View File

@ -0,0 +1,32 @@
---
# Copyright 2020 VEXXHOST, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: infrastructure.vexxhost.cloud/v1alpha1
kind: Memcached
metadata:
name: barbican
namespace: openstack
labels:
{{ labels("barbican") | indent(4) }}
spec:
megabytes: 128
{% if 'nodeSelector' in spec %}
nodeSelector:
{{ spec.nodeSelector | to_yaml | indent(4) }}
{% endif %}
{% if 'tolerations' in spec %}
tolerations:
{{ spec.tolerations | to_yaml | indent(4) }}
{% endif %}

View File

@ -0,0 +1,30 @@
---
# Copyright 2020 VEXXHOST, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
name: barbican
namespace: openstack
labels:
{{ labels("barbican") | indent(4) }}
spec:
ports:
- name: barbican
port: 80
protocol: TCP
targetPort: barbican
selector:
{{ labels("barbican") | indent(4) }}

View File

@ -60,6 +60,7 @@
- name: Copy Zuul repo into devstack working directory
become: true
command: rsync -av src/opendev.org/vexxhost/openstack-operator /opt/stack
- name: Copy heat repo into devstack working directory
git:
repo: https://github.com/openstack/heat
@ -81,6 +82,18 @@
become: true
command: rsync -av src/opendev.org/vexxhost/openstack-operator/devstack/plugin-magnum.sh /opt/stack/magnum/devstack/plugin.sh
- name: Copy barbican repo into devstack working directory
git:
repo: https://github.com/openstack/barbican
dest: /opt/stack/barbican
become: true
- name: Override barbican lib functions
become: true
command: rsync -av src/opendev.org/vexxhost/openstack-operator/devstack/lib/barbican /opt/stack/magnum/devstack/lib/barbican
- name: Override barbican plugin.sh
become: true
command: rsync -av src/opendev.org/vexxhost/openstack-operator/devstack/plugin-barbican.sh /opt/stack/barbican/devstack/plugin.sh
# Changes that run through devstack-tempest are likely to have an impact on
# the devstack part of the job, so we keep devstack in the main play to
# avoid zuul retrying on legitimate failures.

42
zuul.d/barbican-jobs.yaml Normal file
View File

@ -0,0 +1,42 @@
- job:
name: openstack-operator:images:build:barbican
parent: vexxhost-build-docker-image
provides: openstack-operator:image:barbican
nodeset: &id001
nodes:
- name: ubuntu-bionic
label: ubuntu-bionic-vexxhost
vars: &id002
docker_images:
- context: images/barbican
repository: vexxhost/barbican-api
target: barbican-api
dependencies:
- openstack-operator:images:build:openstack-operator
files: &id003
- ^images/barbican/.*
- job:
name: openstack-operator:images:upload:barbican
parent: vexxhost-upload-docker-image
provides: openstack-operator:image:barbican
nodeset: *id001
vars: *id002
dependencies:
- openstack-operator:images:upload:openstack-operator
files: *id003
- job:
name: openstack-operator:images:promote:barbican
parent: vexxhost-promote-docker-image
nodeset: *id001
vars: *id002
files: *id003
- project:
check:
jobs:
- openstack-operator:images:build:barbican
gate:
jobs:
- openstack-operator:images:upload:barbican
promote:
jobs:
- openstack-operator:images:promote:barbican

View File

@ -7,6 +7,8 @@
run: playbooks/functional/run.yaml
post-run: playbooks/functional/post.yaml
required-projects:
- openstack/barbican
- openstack/barbican-tempest-plugin
- openstack/devstack-plugin-ceph
- openstack/heat
- openstack/heat-tempest-plugin
@ -29,19 +31,21 @@
heat: https://github.com/openstack/heat
magnum: https://github.com/openstack/magnum
devstack-plugin-ceph: https://github.com/openstack/devstack-plugin-ceph
barbican: https://github.com/openstack/barbican
devstack_source_dirs:
- src/opendev.org/openstack
- src/opendev.org/vexxhost
tox_envlist: all
tempest_test_regex: (\[.*\bsmoke\b.*\]|(^heat_tempest_plugin.tests.api)|(^tempest_horizon.tests.scenario))
tempest_black_regex: ^tempest.scenario.test_network_basic_ops
tempest_test_regex: (\[.*\bsmoke\b.*\]|(^heat_tempest_plugin.tests.api)|(^tempest_horizon.tests.scenario)|(^barbican_tempest_plugin.tests.api)|(^barbican_tempest_plugin.tests.scenario))
tempest_black_regex: (^tempest.scenario.test_network_basic_ops|barbican_tempest_plugin.tests.scenario.(test_image_signing.ImageSigningTest.test_signed_image_upload_boot_failure|test_volume_encryption.VolumeEncryptionTest.test_encrypted_cinder_volumes_cryptsetup))
tempest_plugins:
- barbican-tempest-plugin
- heat-tempest-plugin
- magnum-tempest-plugin
- tempest-horizon
devstack_localrc:
TEMPEST_PLUGINS: /opt/stack/heat-tempest-plugin /opt/stack/magnum-tempest-plugin
/opt/stack/tempest-horizon
TEMPEST_PLUGINS: /opt/stack/barbican-tempest-plugin /opt/stack/heat-tempest-plugin
/opt/stack/magnum-tempest-plugin /opt/stack/tempest-horizon
docker_use_buildset_registry: true
minikube_dns_resolvers: [1.1.1.1, 8.8.8.8]
ensure_kubernetes_minikube_addons: [metrics-server]
@ -51,53 +55,57 @@
jobs:
- openstack-operator:functional:
dependencies:
- name: openstack-operator:images:build:heat
soft: true
- name: openstack-operator:images:build:ceilometer
soft: true
- name: openstack-operator:images:build:rabbitmq
soft: true
- name: openstack-operator:images:build:keystone
- name: openstack-operator:images:build:mcrouter-exporter
soft: true
- name: openstack-operator:images:build:horizon
soft: true
- name: openstack-operator:images:build:magnum
soft: true
- name: openstack-operator:images:build:glance
soft: true
- name: openstack-operator:images:build:rabbitmq
soft: true
- name: openstack-operator:images:build:ceilometer
soft: true
- name: openstack-operator:images:build:memcached-exporter
soft: true
- name: openstack-operator:images:build:memcached
soft: true
- name: openstack-operator:images:build:keystone
soft: true
- name: openstack-operator:images:build:mcrouter
soft: true
- openstack-operator:images:build:openstack-operator
- name: openstack-operator:images:build:glance
- name: openstack-operator:images:build:barbican
soft: true
- name: openstack-operator:images:build:mcrouter-exporter
soft: true
- name: openstack-operator:images:build:memcached-exporter
soft: true
- name: openstack-operator:images:build:magnum
- name: openstack-operator:images:build:heat
soft: true
gate:
jobs:
- openstack-operator:functional:
dependencies:
- name: openstack-operator:images:upload:heat
soft: true
- name: openstack-operator:images:upload:ceilometer
soft: true
- name: openstack-operator:images:upload:rabbitmq
soft: true
- name: openstack-operator:images:upload:keystone
- name: openstack-operator:images:upload:mcrouter-exporter
soft: true
- name: openstack-operator:images:upload:horizon
soft: true
- name: openstack-operator:images:upload:magnum
soft: true
- name: openstack-operator:images:upload:glance
soft: true
- name: openstack-operator:images:upload:rabbitmq
soft: true
- name: openstack-operator:images:upload:ceilometer
soft: true
- name: openstack-operator:images:upload:memcached-exporter
soft: true
- name: openstack-operator:images:upload:memcached
soft: true
- name: openstack-operator:images:upload:keystone
soft: true
- name: openstack-operator:images:upload:mcrouter
soft: true
- openstack-operator:images:upload:openstack-operator
- name: openstack-operator:images:upload:glance
- name: openstack-operator:images:upload:barbican
soft: true
- name: openstack-operator:images:upload:mcrouter-exporter
soft: true
- name: openstack-operator:images:upload:memcached-exporter
soft: true
- name: openstack-operator:images:upload:magnum
- name: openstack-operator:images:upload:heat
soft: true