Retire Solum: remove repo content

Solum project is retiring
- https://review.opendev.org/c/openstack/governance/+/919211

this commit remove the content of this project repo

Change-Id: Ide264835cf6bcd5596851b283f465f6bd5718840
This commit is contained in:
Ghanshyam Mann 2024-05-10 12:38:01 -07:00 committed by Ghanshyam
parent f6640157df
commit 3860cebef1
585 changed files with 8 additions and 37522 deletions
.coveragerc.gitreview.stestr.conf.zuul.yamlCONTRIBUTING.rstHACKING.rstLICENSEREADME.rst
contrib
devstack
doc
etc

@ -1,7 +0,0 @@
[run]
branch = True
source = solum
omit = solum/tests/*
[report]
ignore_errors = True

@ -1,4 +0,0 @@
[gerrit]
host=review.opendev.org
port=29418
project=openstack/solum.git

@ -1,3 +0,0 @@
[DEFAULT]
test_path=${OS_TEST_PATH:-./solum/tests}
top_dir=./

@ -1,97 +0,0 @@
- project:
queue: solum
templates:
- openstack-cover-jobs
- openstack-python3-jobs
- publish-openstack-docs-pti
- release-notes-jobs-python3
- check-requirements
check:
jobs:
- solum-devstack
- solum-tempest-ipv6-only
- solum-tox-bandit-baseline:
voting: false
gate:
jobs:
- solum-devstack
- solum-tempest-ipv6-only
- job:
name: solum-devstack
parent: solum-tempest-base
- job:
name: solum-tempest-base
parent: devstack-tempest
irrelevant-files: &base_irrelevant_files
- ^(test-|)requirements.txt$
- ^setup.cfg$
- ^doc/.*$
- ^.*\.rst$
- ^releasenotes/.*$
- ^solum/tests/.*$
- ^contrib/.*$
- ^examples/.*$
- ^tools/.*$
timeout: 7800
required-projects: &base_required_projects
- openstack/devstack
- openstack/devstack-gate
- openstack/devstack-plugin-container
- openstack/python-solumclient
- openstack/python-zunclient
- openstack/solum
- openstack/solum-dashboard
- openstack/solum-tempest-plugin
- openstack/zun
- openstack/zun-tempest-plugin
vars: &base_vars
devstack_plugins:
solum: https://opendev.org/openstack/solum
zun: https://opendev.org/openstack/zun
devstack-plugin-container: https://opendev.org/openstack/devstack-plugin-container
devstack_services:
tempest: true
s-account: true
s-container: true
s-object: true
s-proxy: true
tls-proxy: false
devstack_localrc:
TEMPEST_PLUGINS: '/opt/stack/solum-tempest-plugin'
USE_PYTHON3: True
KEYSTONE_ADMIN_ENDPOINT: true
tempest_test_regex: application_deployment
tox_envlist: all
- job:
name: solum-tempest-ipv6-only
parent: devstack-tempest-ipv6
description: |
Solum devstack tempest tests job for IPv6-only deployment
irrelevant-files: *base_irrelevant_files
required-projects: *base_required_projects
timeout: 7800
vars: *base_vars
- job:
# Security testing for known issues
name: solum-tox-bandit-baseline
parent: openstack-tox
timeout: 2400
vars:
tox_envlist: bandit-baseline
required-projects:
- openstack/requirements
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^releasenotes/.*$
- ^setup.cfg$
- ^tools/.*$
- ^contrib/.*$
- ^solum/hacking/.*$
- ^solum/locale/.*$
- ^solum/tests/.*$

@ -1,27 +0,0 @@
==========================
How to contribute to Solum
==========================
If you would like to contribute to Solum, please see our contributing wiki:
https://wiki.openstack.org/wiki/Solum/Contributing
We have the same CLA requirements as OpenStack, so you must follow the steps
in the "If you're a developer, start here" section of this page:
https://docs.openstack.org/infra/manual/developers.html
Once those steps have been completed, submit your changes to for review via
the Gerrit tool, following the workflow documented at:
https://docs.openstack.org/infra/manual/developers.html#development-workflow
Pull requests submitted through GitHub will be ignored.
Bugs should be filed on Launchpad, not GitHub:
https://bugs.launchpad.net/solum
For tips to help with running unit tests and functional tests on your code,
see:
https://wiki.openstack.org/wiki/Solum/Testing

@ -1,12 +0,0 @@
========================
Solum Style Commandments
========================
Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/
See Also: CONTRIBUTING.rst
Solum Specific Commandments
---------------------------
- [M322] Method's default argument shouldn't be mutable.

175
LICENSE

@ -1,175 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

@ -1,57 +1,10 @@
========================
Team and repository tags
========================
This project is no longer maintained.
.. image:: https://governance.openstack.org/tc/badges/solum.svg
:target: https://governance.openstack.org/tc/reference/tags/index.html
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
.. Change things from this point on
=====
Solum
=====
Application Lifecycle Management
An OpenStack project designed to make cloud services easier to
consume and integrate into your application development process.
Overview
--------
Solum is natively designed for OpenStack clouds and leverages numerous
OpenStack projects, including Keystone, Swift, Glance, Heat, Nova, Trove, and more.
We value vendor neutrality, open design and collaboration, and leveraging existing
solutions where possible. One example is our use of Docker for deployment of
containers. Multiple language run-time environments will be supported with a
modular "language pack" solution so you can easily run applications written
in any language of your choice.
* Free software: Apache 2.0 License. See LICENSE file.
* Documentation: https://docs.openstack.org/solum/latest/
Development-setup with Solum
-----------------------------
See the solum-development-setup_ guide.
.. _solum-development-setup: https://wiki.openstack.org/wiki/Solum/solum-development-setup
Getting started with Solum
--------------------------
See the getting_started_ guide.
.. _getting_started: https://docs.openstack.org/solum/latest/user/index.html
Project Info
-------------
* Source Code: https://opendev.org/openstack/solum/
* Wiki: https://wiki.openstack.org/wiki/Solum
* Launchpad: https://launchpad.net/solum
* Blueprints: https://blueprints.launchpad.net/solum
* Bugs: https://bugs.launchpad.net/solum
* Code Reviews: https://review.opendev.org/#/q/project:openstack/solum
https://review.opendev.org/#/q/project:openstack/python-solumclient
* IRC: #solum at chat.oftc.net
* Release notes: https://docs.openstack.org/releasenotes/solum/
For any further questions, please email
openstack-discuss@lists.openstack.org or join #openstack-dev on
OFTC.

@ -1,11 +0,0 @@
The contrib/devstack/ directory contains the files necessary to integrate Solum with devstack.
Install Docker, to be used by our build service: http://docs.docker.io/installation/
To install devstack, run the setup script provided in the Solum code tree::
export SOLUM_DIR=/path/to/solum/source/dir
sh ${SOLUM_DIR}/contrib/devstack/setup_devstack.sh
Note that this setup will produce virtual machines, not Docker containers.
For an example of the Docker setup, see: https://wiki.openstack.org/wiki/Solum/Docker

@ -1,18 +0,0 @@
# This directory contains common reusable code for lp-cedarish and lp-dockerfile.
# TODO(ravips): Reorganize language pack directory structure
# solum/contrib--> language-pack
# |
# --> common (code common to all lp formats)
# |
# --> formats (supported formats)
# |
# --> cedarish
# | |
# | --> common (code common to cedarish formats)
# | |
# | --> docker
# | |
# | --> vm
# |
# --> dockerfile

@ -1,46 +0,0 @@
#!/bin/sh
location="%location%"
dep_unit="%du%"
publish_ports="%publish_ports%"
trial_count=3
# Try wget and docker_load
stage1_success=false
RETRIES=0
until [ $RETRIES -ge $trial_count ]; do
wget "$location" --output-document="$dep_unit"
docker load < $dep_unit
if [[ $? == 0 ]]; then
stage1_success=true
break
fi
RETRIES=$[$RETRIES+1]
sleep 2
done
if [ "$stage1_success" = false ]; then
wc_notify --data-binary '{"status": "FAILURE", "reason": "wget and docker load failed."}'
fi
# Try docker run
docker_run_success=false
RETRIES=0
until [ $RETRIES -ge $trial_count ]; do
docker run $publish_ports -d $dep_unit
docker ps |grep $dep_unit
if [[ $? == 0 ]]; then
docker_run_success=true
break
fi
RETRIES=$[$RETRIES+1]
sleep 2
done
if [ "$docker_run_success" = false ]; then
wc_notify --data-binary '{"status": "FAILURE", "reason": "docker run failed."}'
else
wc_notify --data-binary '{"status": "SUCCESS"}'
fi

@ -1,225 +0,0 @@
#!/bin/bash
# Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Common functions for build-app and unittest-app
# Add a timestamp, and log a message to STDOUT and to $LOG.
function TLOG () {
local MESSAGE="$*"
if [ ! -z "$MESSAGE" ]; then
local LOGFILE=${LOG_FILE:-/dev/null}
local TIMESTAMP=$(date --iso-8601=seconds)
JSONMESSAGE="{ \"@timestamp\": \"$TIMESTAMP\", \"project_id\": \"$PROJECT_ID\", \"commit_id\": \"$COMMIT_ID\", \"stage_id\": \"$BUILD_ID\", \"task\": \"$TASKNAME\", \"message\": \"$MESSAGE\"}"
echo $JSONMESSAGE >> $LOGFILE
fi
}
# Overwrite TLOG to filter for user input.
function TLOG_FILTERED () {
local MESSAGE="$*"
if [ ! -z "$MESSAGE" ]; then
# This is deliberately not a local, as it's to persist between calls to TLOG.
_USER=${_USER:-false}
echo "$MESSAGE" | grep -i "drone build results" && _USER=false
local LOGFILE=${LOG_FILE:-/dev/null}
local TIMESTAMP=$(date --iso-8601=seconds)
JSONMESSAGE="{ \"@timestamp\": \"$TIMESTAMP\", \"project_id\": \"$PROJECT_ID\", \"commit_id\": \"$COMMIT_ID\", \"stage_id\": \"$BUILD_ID\", \"task\": \"$TASKNAME\", \"message\": \"$MESSAGE\", \"_user\": \"$_USER\" }"
echo $JSONMESSAGE >> $LOGFILE
echo "$MESSAGE" | grep -i "starting build" && _USER=true
export _USER
fi
}
# Build the logfile name, and ensure it exists.
function GET_LOGFILE () {
local LOG_DIR=${SOLUM_TASK_DIR:-/dev/null}
if [ "$LOG_DIR" != "/dev/null" ]; then
sudo mkdir -p "$LOG_DIR"
sudo chmod a+w "$LOG_DIR"
fi
local LOG_FILE=/dev/null
if [ "$LOG_DIR" != "/dev/null" ]; then
LOG_FILE="$LOG_DIR/$TASKNAME-$BUILD_ID.log"
touch $LOG_FILE
fi
echo $LOG_FILE
}
# Get time elapsed since $1.
function elapsed () {
local START=$1
local NOW=$(date +"%s")
expr $NOW - $START
}
# Profile and run a command, and return its exit code.
function PRUN () {
# If the first argument is "silent", then set a flag and shift.
local SILENT=false
if [ "$1" == "silent" ]; then
SILENT=true
shift
fi
local CMD="$*"
local LOGFILE=${LOG:-/dev/null}
if $SILENT; then
LOGFILE=/dev/null
fi
if ! $SILENT; then
TLOG Starting: $CMD
fi
local EXIT_STATUS
local START=$(date +"%s")
if $SILENT; then
$CMD 2>&1 >> /dev/null; test ${PIPESTATUS[0]} -eq 0
EXIT_STATUS=$?
else
TLOG Starting: $CMD
$CMD 2>&1 > >(while read LINE; do TLOG $LINE; done)
EXIT_STATUS=$?
fi
local ELAPSED=$(elapsed $START)
local SUCCESS
[ $EXIT_STATUS -eq 0 ] && SUCCESS="Finished" || SUCCESS="FAILED"
if ! $SILENT; then
TLOG $SUCCESS: $CMD "[Elapsed: $ELAPSED sec] (EXIT_STATUS=$EXIT_STATUS)"
fi
return $EXIT_STATUS
}
# Register ssh private key with ssh-agent.
# SSH_AUTH_SOCK env variable will be unique to this process and
# it restricts other apps to access current ssh credentials.
function add_ssh_creds () {
local SSH_PRIVATE_KEY=$1
local APP_DIR=$2
if [ -n "$SSH_PRIVATE_KEY" ]; then
eval `ssh-agent -s`
SSH_PRIVATE_KEY_FILE=$APP_DIR/.creds
echo "$SSH_PRIVATE_KEY" > $SSH_PRIVATE_KEY_FILE
chmod 600 $SSH_PRIVATE_KEY_FILE
ssh-add $SSH_PRIVATE_KEY_FILE ; EXIT_STATUS=$?
rm -f $SSH_PRIVATE_KEY_FILE
ssh -o StrictHostKeyChecking=no git@github.com
return $EXIT_STATUS
else
return 0
fi
}
# De-register ssh private key with ssh-agent.
function remove_ssh_creds () {
local SSH_PRIVATE_KEY=$1
if [ -n "$SSH_PRIVATE_KEY" ]; then
ssh-agent -k
fi
}
function test_public_repo () {
local GIT_REPO=$1
if [[ -z $GIT_PRIVATE_KEY ]]; then
curl -If ${GIT_REPO%%.git} > /dev/null 2>&1
return $?
fi
return 0
}
function git_clone_with_retry () {
local GIT_REPO=$1
local DESTINATION=$2
shift; shift
local SINGLEBRANCH=$1
RETRIES=0
until [ $RETRIES -ge 5 ]; do
rm -rf $DESTINATION
PRUN git clone $SINGLEBRANCH $GIT_REPO $DESTINATION && return 0
RETRIES=$[$RETRIES+1]
sleep 5
done
return 1
}
function git_clone_with_commit_sha_retry () {
local GIT_REPO=$1
local DESTINATION=$2
local COMMIT_SHA=$3
RETRIES=0
until [ $RETRIES -ge 5 ]; do
rm -rf $DESTINATION
PRUN git clone $GIT_REPO $DESTINATION;cd $DESTINATION; git checkout $COMMIT_SHA;cd - && return 0
RETRIES=$[$RETRIES+1]
sleep 5
done
return 1
}
function glance_upload_with_retry () {
local DU_IMG_TAG=$1
shift
sudo docker save "$DU_IMG_TAG" | glance --os-image-api-version 2 --os-auth-token $OS_AUTH_TOKEN image-create --container-format=docker --disk-format=raw --name "$DU_IMG_TAG" >& /dev/null && return 0
sleep 2
sudo docker save "$DU_IMG_TAG" | glance --os-image-api-version 2 --os-auth-token $OS_AUTH_TOKEN image-create --container-format=docker --disk-format=raw --name "$DU_IMG_TAG" >& /dev/null
}
function docker_build_with_retry () {
local APP_NAME=$1
local DESTINATION=$2
shift; shift
PRUN sudo docker build --force-rm=true -t $APP_NAME $DESTINATION && return 0
sleep 2
PRUN sudo docker build --no-cache --force-rm=true -t $APP_NAME $DESTINATION
}
function docker_load_with_retry () {
local FILE=$1
shift
sudo docker load -i $FILE && return 0
sleep 1
sudo docker load -i $FILE
}
function docker_save_with_retry () {
local OUTPUT_FILE=$1
shift
local DOCKER_IMG=$1
sudo docker save --output $OUTPUT_FILE $DOCKER_IMG && return 0
sleep 1
sudo docker save --output $OUTPUT_FILE $DOCKER_IMG
}

@ -1,6 +0,0 @@
Setup
-----
These scripts require installation of OpenStack diskimage-builder
https://opendev.org/openstack/diskimage-builder
The package is available on Ubuntu, Fedora, and Red Hat Enterprise Linux

@ -1,3 +0,0 @@
#!/bin/bash
ELEMENTS_PATH=./elements disk-image-create --no-tmpfs -a amd64 vm fedora tomcat -o f19-tomcat7-openjdk1.7.qcow2

@ -1,4 +0,0 @@
#!/bin/bash
DIB_RELEASE="raring"
disk-image-create --no-tmpfs -a amd64 vm ubuntu -o u1304-py33.qcow2

@ -1,5 +0,0 @@
#!/bin/bash
# Install tomcat
install-packages tomcat7 maven

@ -1,178 +0,0 @@
Git Push POC
============
## About
git push workflow example which combines with `contrib/lp-cedarish/vm-slug`
These scripts represent a workflow that solum could use internally to create and deploy applications. the `create-app` and `deploy-app` scripts would be solum CLI commands, the rest would be internal solum magic and are just represented as scripts for POC purposes.
These scripts should never be part of a production install of Solum and will likely be removed as their workflows are integrated into solum itself, or discarded if alternative workflows are used.
These scripts may rely on external tools and applications stored on github which may change wihtout notice. You should vet these before running and should also probably fork them and repoint the scripts at your forks.
See `contrib/lp-cedarish/vm-slug/README.md` for more details on the build environment.
## Requirements
### Devstack
If you want to run this in Vagrant you can use the following canned devstack
```
$ git clone https://github.com/rackerlabs/vagrant-solum-dev.git
$ cd vagrant-solum-dev
$ SOLUM=/path/to/code vagrant up devstack
```
## Using
### Install
run this as the same user you installed devstack with to get a free ride on the passwordless sudo bus. This will set up your git server, download a pre-baked cedarish VM, set up the build tooling, and push the cedarish VM to glance.
```
$ /solum/contrib/example-gitpush/prepare
```
### Create Application
This will add a repo to the solum git server and add it as a remote repo of your local git repo.
```
$ cd /tmp
$ git clone https://github.com/paulczar/example-nodejs-express.git nodejs
$ cd nodejs
$ /solum/contrib/example-gitpush/create-app
[master 9a0f18d] adding repo and key for nodejs
Committer: vagrant <vagrant@devstack>
1 file changed, 4 insertions(+)
Counting objects: 7, done.
Delta compression using up to 2 threads.
Compressing objects: 100% (3/3), done.
Writing objects: 100% (4/4), 366 bytes, done.
Total 4 (delta 1), reused 0 (delta 0)
remote: Initialized empty Git repository in /opt/git/repositories/nodejs.git/
To git@127.0.0.1:gitolite-admin
75db5b8..9a0f18d master -> master
```
### Push your data to the App
```
$ git push solum master
1 file changed, 1 insertion(+)
Counting objects: 5, done.
Delta compression using up to 2 threads.
Compressing objects: 100% (3/3), done.
Writing objects: 100% (3/3), 271 bytes, done.
Total 3 (delta 2), reused 0 (delta 0)
remote: SHA of head e046e00eb09b72bee130430159127a342c01cc94
-----> Node.js app detected
-----> Requested node range: 0.10.x
-----> Resolved node version: 0.10.25
-----> Downloading and installing node
-----> Restoring node_modules directory from cache
-----> Pruning cached dependencies not specified in package.json
npm WARN package.json example-nodejs-express@0.0.1 No repository field.
-----> Installing dependencies
npm WARN package.json example-nodejs-express@0.0.1 No repository field.
-----> Caching node_modules directory for future builds
-----> Cleaning up node-gyp and npm artifacts
-----> Building runtime environment
-----> Discovering process types
Procfile declares types -> web
-----> Compiled slug size is 5.6M
remote: 62441108436bfed9e21cf44db167022fe60c65caa99fb4ca82d45f9de64ce0b9
remote: APP BUILT!!
To git@127.0.0.1:nodejs
a2e48d4..e046e00 master -> master
```
### Deploy your app
This will create a security group, an ssh key in `/opt/solum/apps/<sha>/key.priv`, and launch a vm. The VM uses user-data stored in `/opt/solum/apps/sha/user-data.txt` to download, install, and run the application slug built during the git push.
```
$ /solum/contrib/example-gitpush/deploy-app
+----+--------+----------------------------------+
| Id | Name | Description |
+----+--------+----------------------------------+
| 4 | nodejs | allow ssh/web to nodejs instance |
+----+--------+----------------------------------+
+-------------+-----------+---------+-----------+--------------+
| IP Protocol | From Port | To Port | IP Range | Source Group |
+-------------+-----------+---------+-----------+--------------+
| tcp | 22 | 22 | 0.0.0.0/0 | |
+-------------+-----------+---------+-----------+--------------+
+-------------+-----------+---------+-----------+--------------+
| IP Protocol | From Port | To Port | IP Range | Source Group |
+-------------+-----------+---------+-----------+--------------+
| tcp | 5000 | 5000 | 0.0.0.0/0 | |
+-------------+-----------+---------+-----------+--------------+
+--------------------------------------+-------------------------------------------------+
| Property | Value |
+--------------------------------------+-------------------------------------------------+
| OS-DCF:diskConfig | MANUAL |
| OS-EXT-AZ:availability_zone | nova |
| OS-EXT-STS:power_state | 0 |
| OS-EXT-STS:task_state | scheduling |
| OS-EXT-STS:vm_state | building |
| OS-SRV-USG:launched_at | - |
| OS-SRV-USG:terminated_at | - |
| accessIPv4 | |
| accessIPv6 | |
| adminPass | 5ovU9E2pnMZR |
| config_drive | |
| created | 2014-01-31T19:42:39Z |
| flavor | m1.small (2) |
| hostId | |
| id | 9b50c765-e811-44a7-bf0f-a5d52ad6dbb4 |
| image | cedarish (9654b39e-a340-4ddf-822b-ad67f9aa5f5c) |
| key_name | - |
| metadata | {} |
| name | nodejs01 |
| os-extended-volumes:volumes_attached | [] |
| progress | 0 |
| security_groups | nodejs |
| status | BUILD |
| tenant_id | 06a788275a6f458cb1ad6ab061face7f |
| updated | 2014-01-31T19:42:39Z |
| user_id | 693cbc7b4cf04c8a9e4cabfb49a37d62 |
+--------------------------------------+-------------------------------------------------+
```
if you're running this in devstack on a vm on your laptop like I do, this step can take some time. I usually kick off a `watch` job so I can see what is happening:
```
$ source ~/devstack/openrc
$ nova list
+--------------------------------------+----------+--------+------------+-------------+-----------------------+
| ID | Name | Status | Task State | Power State | Networks |
+--------------------------------------+----------+--------+------------+-------------+-----------------------+
| 1b380721-2030-4797-b829-bb4bb38cd633 | nodejs01 | ACTIVE | - | Running | private=192.168.78.21 |
+--------------------------------------+----------+--------+------------+-------------+-----------------------+
$ watch 'nova console-log --length=5 1b380721-2030-4797-b829-bb4bb38cd633'
```
when the output looks like this it means it's done:
```
Every 2.0s: nova console-log --length=5 40f95d1b-eee3-4de6-8549-15eaff2cca95 Fri Jan 31 21:13:40 2014
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEt/agOdXF6PCQYOeC3JjCETxNx963vS7N64kWePccQ/g6iDoGWO1TrCahEo2H88RtVtqcGqr8NJggGf3FyLuWY= root@nodejs01
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDtG6nLUPZf4GC3+bc8SLOLZq4Jzm/untvTYfr0+0mAitnVT7O9ckUtLrIi4YiQLAmKJB5FCczOtEF4gek2gdW3kgMmTmMmfWBfXOsWMRDltGAy0zoQu6pcjWiXUg88H/5BMfQF2QExDf02Dt26yhBSOPf
MIdFc+VT+Qs77E/kp3ErP5y3oH6b3prpQSaBOucMnLkFp9nIpM3Jukcycuj2GMY4XlSuw0gvxPZNkZSNUvuNWClipxnPyKy8i/47YUR+JblRFN7CpdmwUGltNnx8YWHO9maH57uDrIusJ5c4sxUa1ZlNLxEWmT03QVSIhgtTbLcbCWz3VKpadnZXC9O// r
oot@nodejs01
-----END SSH HOST KEY KEYS-----
Cloud-init v. 0.7.3 finished at Fri, 31 Jan 2014 21:12:11 +0000. Datasource DataSourceConfigDriveNet [net,ver=2][source=/dev/sr0]. Up 285.18 seconds
```
and you should be able to hit the app:
```
$ curl 192.168.78.21:5000
Hello World
```
A lot of effort to go to for a hello world...

@ -1,38 +0,0 @@
#!/bin/bash
DIR=`dirname $0`
CODE_DIR=`pwd`
if [[ ! -f $CODE_DIR/.git/config ]]; then
echo 'you must run this from inside a git repo'
echo $CODE_DIR/.git/config
exit 1
fi
APP=`basename $CODE_DIR`
cd ~/gitolite-admin
# add repo to solum git server
cat << EOF >> ~/gitolite-admin/conf/gitolite.conf
repo $APP
RW+ = admin
option hook.post-receive = build
EOF
git commit -am "adding repo and key for $APP"
git push origin master
cd $CODE_DIR
# add upstream
grep '\[remote "solum"\]' $CODE_DIR/.git/config > /dev/null
if [ $? != 0 ]; then
cat << EOF >> $CODE_DIR/.git/config
[remote "solum"]
url = git@127.0.0.1:$APP
EOF
else
echo 'already has solum remote'
fi

@ -1,35 +0,0 @@
#!/bin/bash
DIR=`dirname $0`
CODE_DIR=`pwd`
APP=`basename $CODE_DIR`
SHA=$(head -n 1 .git/refs/heads/master)
if [[ ! -d /opt/solum/apps/$SHA ]]; then
echo no app found for your current ref
exit
fi
[[ -f ./openrc ]] && . ./openrc
[[ -f ~/devstack/openrc ]] && . ~/devstack/openrc
nova list 2> /dev/null > /dev/null
if [ $? != 0 ]; then
echo 'cannot talk to nova. check your OpenStack credentials'
exit 1
fi
nova secgroup-create $APP "allow ssh/web to $APP instance"
nova secgroup-add-rule $APP tcp 22 22 0.0.0.0/0
nova secgroup-add-rule $APP tcp 5000 5000 0.0.0.0/0
if [[ `nova keypair-list | grep ${APP} | wc -l` == 0 ]]; then
nova keypair-add ${APP}_key > /opt/solum/apps/$SHA/key.priv
chmod 0600 /opt/solum/apps/$SHA/key.priv
fi
nova boot --flavor=2 --image=cedarish --security-groups=$APP \
--key-name=$APP_key --user-data=/opt/solum/apps/$SHA/user-data.txt \
${APP}01

@ -1,22 +0,0 @@
#!/bin/bash
DIR=`dirname $0`
GIT_DIR=/opt/git
mkdir -p $GIT_DIR/admin_keys
mkdir -p $GIT_DIR/bin
git clone git://github.com/sitaramc/gitolite $GIT_DIR/source
cd /opt/git
source/install -to $GIT_DIR/bin
bin/gitolite setup -pk $GIT_DIR/admin_keys/admin.pub
# link hooks dir
ln -s ${DIR}/site-local ${GIT_DIR}/local
# enable hooks
sed -i "s/# 'repo-specific-hooks',/'repo-specific-hooks',/" $GIT_DIR/.gitolite.rc
sed -i 's/^.*\$ENV{HOME}\/local.*$/\tLOCAL_CODE\t\t => "\$ENV{HOME}\/local",/' $GIT_DIR/.gitolite.rc
$GIT_DIR/bin/gitolite setup --hooks-only

@ -1,39 +0,0 @@
#!/bin/bash
DIR=`dirname $0`
GIT_DIR=/opt/git
# check if git user already exists
id git
if [ $? != 0 ]; then
sudo useradd git -d /opt/git -m
fi
echo Prepare VM-Slug
/solum/contrib/lp-cedarish/vm-slug/prepare
echo Download Cedarish Image
/solum/contrib/lp-cedarish/vm-slug/download-cedarish
sudo mkdir -p $GIT_DIR
sudo mkdir -p $GIT_DIR/admin_keys
sudo chown -R git:git $GIT_DIR
[[ -f ~/.ssh/id_rsa ]] || ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa
sudo cp ~/.ssh/id_rsa.pub $GIT_DIR/admin_keys/admin.pub
sudo su git -c "$DIR/install"
cd ~
# we do this to accept the ssh-key for git@localhost
# to avoid 'Yes' prompt from subsequent git commands.
ssh -o StrictHostKeyChecking=no git@127.0.0.1
git clone git@127.0.0.1:gitolite-admin ~/gitolite-admin
git config --global user.name "admin"
git config --global user.email you@example.com
sudo chmod 777 /var/run/docker.sock
sudo usermod -a -G docker,$USER git
sudo chmod 2775 /opt/solum/apps

@ -1,7 +0,0 @@
#!/bin/bash
SOLUM_HOST=127.0.0.1
SOLUM_PORT=9777
SOLUM_TRIGGER=7ad6f35961150bf83d0afdd913f8779417e538ea
curl -X POST http://$SOLUM_HOST:$SOLUM_PORT/v1/triggers/$SOLUM_TRIGGER

@ -1,14 +0,0 @@
FROM solum/guestagent
MAINTAINER Julien Vey
RUN apt-get update
RUN apt-get install -y wget
ADD ../../../diskimage-builder/elements/drone/install.d/02-install-drone /tmp/install-drone.sh
RUN /tmp/install-drone.sh
EXPOSE 8080
ENTRYPOINT ["/usr/local/bin/droned"]

@ -1,6 +0,0 @@
Setup
-----
These scripts require installation of OpenStack diskimage-builder
https://github.com/openstack/diskimage-builder
The package is available on Ubuntu, Fedora, and Red Hat Enterprise Linux

@ -1,7 +0,0 @@
#!/bin/bash
set -e
../guestagent/build-ubuntu12_04-docker.sh ../guestagent
sudo docker build -t solum/drone:u1204 .
sudo docker tag solum/drone:u1204 solum/drone

@ -1,4 +0,0 @@
#!/bin/bash
export DIB_RELEASE=precise
ELEMENTS_PATH=../../../diskimage-builder/elements disk-image-create --no-tmpfs -a amd64 vm ubuntu guestagent drone -o u1204-build-drone.qcow2

@ -1,6 +0,0 @@
FROM ubuntu:12.04
MAINTAINER Pierre Padrixe
ADD ../../../diskimage-builder/elements/guestagent/install.d/02-install-guestagent /tmp/install-guestagent.sh
RUN /tmp/install-guestagent.sh

@ -1,11 +0,0 @@
#!/bin/bash
set -e
DOCKERFILE_PATH="."
if [[ ! -z "$1" ]]; then
DOCKERFILE_PATH=$1
fi
sudo docker build -t solum/guestagent:u1304 $DOCKERFILE_PATH
sudo docker tag solum/guestagent:u1304 solum/guestagent

@ -1,4 +0,0 @@
#!/bin/bash
export DIB_RELEASE=precise
ELEMENTS_PATH=../../../diskimage-builder/elements disk-image-create --no-tmpfs -a amd64 vm ubuntu guestagent -o u1204-build-guestagent.qcow2

@ -1,10 +0,0 @@
FROM solum/guestagent
MAINTAINER Pierre Padrixe
ADD ../../../diskimage-builder/elements/jenkins/install.d/02-install-jenkins /tmp/install-jenkins.sh
RUN /tmp/install-jenkins.sh
EXPOSE 8080
ENTRYPOINT ["java", "-jar", "/usr/share/jenkins/jenkins.war"]

@ -1,7 +0,0 @@
#!/bin/bash
set -e
../guestagent/build-ubuntu12_04-docker.sh ../guestagent
sudo docker build -t solum/jenkins:u1204 .
sudo docker tag solum/jenkins:u1204 solum/jenkins

@ -1,4 +0,0 @@
#!/bin/bash
export DIB_RELEASE=precise
ELEMENTS_PATH=../../../diskimage-builder/elements disk-image-create --no-tmpfs -a amd64 vm ubuntu guestagent jenkins -o u1204-build-jenkins.qcow2

@ -1,8 +0,0 @@
#!/bin/bash
# This script is specific to ubuntu, since gitolite has been tested only for it
# http://drone.readthedocs.org/en/latest/install.html#requirements
# Install drone
wget http://downloads.drone.io/latest/drone.deb
dpkg -i drone.deb

@ -1,19 +0,0 @@
#!/bin/bash
GIT_DIR=/home/git
mkdir -p $GIT_DIR/admin_keys
mkdir -p $GIT_DIR/bin
install-packages git openssh-server
locale-gen en_US.UTF-8
dpkg-reconfigure locales
mkdir /var/run/sshd
adduser --system --group --shell /bin/sh git
git clone git://github.com/sitaramc/gitolite $GIT_DIR/source
cd $GIT_DIR
source/install -to $GIT_DIR/bin

@ -1,5 +0,0 @@
#!/bin/bash
apt-get update
apt-get install -y python-pip git
pip install git+https://github.com/openstack/solum-infra-guestagent.git

@ -1,6 +0,0 @@
#!/bin/bash
apt-get update && apt-get install -y openjdk-7-jre-headless wget
wget -q -O - http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key | apt-key add -
echo deb http://pkg.jenkins-ci.org/debian binary/ > /etc/apt/sources.list.d/jenkins.list
apt-get update && apt-get install -y jenkins

@ -1,2 +0,0 @@
admin
admin.pub

@ -1,25 +0,0 @@
FROM ubuntu:12.04
MAINTAINER Julien Vey
RUN apt-get update
RUN apt-get -y install sudo openssh-server git
RUN locale-gen en_US.UTF-8
RUN dpkg-reconfigure locales
RUN mkdir /var/run/sshd
RUN adduser --system --group --shell /bin/sh git
RUN su git -c "mkdir /home/git/bin"
ADD admin.pub /home/git/admin.pub
RUN cd /home/git; su git -c "git clone git://github.com/sitaramc/gitolite"
RUN cd /home/git; su git -c "gitolite/install -ln"
RUN cd /home/git; su git -c "bin/gitolite setup -pk admin.pub"
ENTRYPOINT ["/usr/sbin/sshd", "-D"]
EXPOSE 22

@ -1,4 +0,0 @@
#!/bin/bash
[[ -f admin.pub ]] || ssh-keygen -t rsa -N "" -f admin
sudo docker build -t u1204-gitolite .

@ -1,4 +0,0 @@
#!/bin/bash
export DIB_RELEASE=precise
ELEMENTS_PATH=../../../diskimage-builder/elements disk-image-create --no-tmpfs -a amd64 vm ubuntu guestagent gitolite -o u1204-gitolite.qcow2

@ -1,29 +0,0 @@
Cedarish Language Pack Examples
-------------------------------
What even is?
=============
Cedarish is a stack developed for Docker style PAAS to deploy Heroku buildpacks. The original project can be found [here](https://github.com/progrium/cedarish).
This is an example implementation of a Cedarish Language Pack for Solum.
This has been implemented ( as a POC/MVP ) to build/run Heroku buildpacks upon both regular VMs and the Docker driver on top of devstack.
Read the README files in the `docker`, `vm` dirs for details on building/running apps in various scenarios.
It was built and tested on top of the rackerlabs [solum dev environment](https://github.com/rackerlabs/vagrant-solum-dev).
Usage
=====
Follow the `readme` in either `docker`, or `vm` to install the appropriate tooling.
Example Apps
------------
Examples forked for stability from the [Opdemand](https://github.com/opdemand) project at github.
Python - https://github.com/paulczar/example-python-django
Java - https://github.com/paulczar/example-java-jetty
NodeJS - https://github.com/paulczar/example-nodejs-express.git

@ -1,68 +0,0 @@
# docker builder!
build heroku style buildpacks with docker
# Requirements
## Devstack + Docker Driver
If you want to run this in Vagrant you can use the following canned devstack:
```
git clone https://github.com/rackerlabs/vagrant-solum-dev.git
cd vagrant-solum-dev
DOCKER=true SOLUM=/path/to/code vagrant up devstack
```
# Using Docker Builder
## Prepare Environment
This should prepare your system to use the docker builder by building a few containers and syncing down the heroku buildpacks. Currently requires sudo access to work with docker. Run it as the same user you installed devstack which has passwordless sudo.
```
/opt/stack/solum/contrib/lp-cedarish/docker/prepare
```
## Build an Application
The build script takes two positional arguments. The location of the git repo, and the app name. Currently requires sudo access to work with docker. Run it as the same user you installed devstack which has passwordless sudo.
```
$ source ~/devstack/openrc
$ keystone tenant-get $OS_TENANT_NAME
# get the tenant_id and pass that into build-app
/opt/stack/solum/contrib/lp-cedarish/docker/build-app https://github.com/paulczar/example-nodejs-express.git nodejs $OS_TENANT_ID
```
This script will upload a docker container with your built application to glance to be run via nova.
## Deploy an Application
Due to a bug with the docker driver the first VM you start will run `sh` instead of the embedded `CMD`. thus we should kick off a dummy build first
```
$ nova image-list
+--------------------------------------+---------------------------------+--------+--------+
| ID | Name | Status | Server |
+--------------------------------------+---------------------------------+--------+--------+
| 0df6ccbc-c43c-4e4d-9caa-3ddf5485b51b | docker-busybox:latest | ACTIVE | |
| 8d558f99-02a9-4b9e-98ac-9bb65009ecda | nodejs:latest | ACTIVE | |
+--------------------------------------+---------------------------------+--------+--------+
$ nova boot --flavor 1 --image docker-busybox:latest dummy
$ nova delete dummy
$ nova boot --flavor 1 --image nodejs:latest nodejs01
$ nova boot --flavor 1 --image nodejs:latest nodejs02
$ nova list
+--------------------------------------+----------+--------+------------+-------------+-----------------------+
| ID | Name | Status | Task State | Power State | Networks |
+--------------------------------------+----------+--------+------------+-------------+-----------------------+
| b4ac0b33-42da-4efa-8580-28b1627199b8 | nodejs01 | ACTIVE | - | Running | private=192.168.78.21 |
| 1697f9fa-9c2c-44b3-b13b-87f7bd3a96c6 | nodejs02 | ACTIVE | - | Running | private=192.168.78.22 |
+--------------------------------------+----------+--------+------------+-------------+-----------------------+
$ curl http://192.168.78.21:5000
Hello World
$ curl http://192.168.78.22:5000
Hello World
```

@ -1,365 +0,0 @@
#!/bin/bash
# Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Solum Build Script for Docker and lp-cedarish
SCRIPT_START_TIME=$(date +"%s")
IMAGE_STORAGE=${IMAGE_STORAGE:-null}
ASSEMBLY_ID=${ASSEMBLY_ID:-null}
PROJECT_ID=${PROJECT_ID:-null}
BUILD_ID=${BUILD_ID:-null}
TASKNAME=build
REUSE_IMAGES_IF_REPO_UNCHANGED=${REUSE_IMAGES_IF_REPO_UNCHANGED:="0"}
USER_PARAMS=${USER_PARAMS:-null}
SOLUM_PARAMS=${SOLUM_PARAMS:-null}
GIT_PRIVATE_KEY=${REPO_DEPLOY_KEYS:-''}
RUN_CMD=${RUN_CMD:-''}
DELETE_LOCAL_CACHE=${DELETE_LOCAL_CACHE:-null}
LP_ACCESS=${ACCESS:-null}
OS_AUTH_TOKEN=${OS_AUTH_TOKEN:-null}
OS_REGION_NAME=${OS_REGION_NAME:-null}
OS_STORAGE_URL=${OS_STORAGE_URL:-null}
TEMP_URL_SECRET=${TEMP_URL_SECRET:-null}
TEMP_URL_PROTOCOL=${TEMP_URL_PROTOCOL:-null}
TEMP_URL_TTL=${TEMP_URL_TTL:-null}
OPR_LP_DOWNLOAD_STRATEGY=${OPR_LP_DOWNLOAD_STRATEGY:-null}
OPER_AUTH_TOKEN=${OPER_AUTH_TOKEN:-null}
OPER_OS_STORAGE_URL=${OPER_OS_STORAGE_URL:-null}
# TLOG, PRUN, etc. defined in common/utils
HERE=$(dirname $0)
source $HERE/../../common/utils
LOG_FILE=$(GET_LOGFILE)
# Get the image_id of the image named $1
function app_glance_id () {
glance --os-image-api-version 2 --os-auth-token $OS_AUTH_TOKEN image-list --sort-key updated_at --sort-dir asc | grep $1 | grep -v "+--" | tail -1 | cut -d'|' -f2
}
function cleanup_on_exit () {
if [[ $DELETE_LOCAL_CACHE == "true" ]]; then
# Not Deleting languagepack image because we found it conflicts with docker load in
# performance tests, which might be due to a concurrency bug in docker load and rmi.
#if [[ -n $BASE_IMG ]]; then
# sudo docker rmi -f $BASE_IMG
#fi
# Delete DU image after uploading to backend
if [[ -n $DU_IMG_TAG ]]; then
sudo docker rmi $DU_IMG_TAG
fi
# Delete temp output files
if [[ -n $APP_DU_FILE ]] && [[ -e $APP_DU_FILE ]]; then
rm -f $APP_DU_FILE
fi
if [[ -n $OUTPUT ]] && [[ -e $OUTPUT ]]; then
rm -f $OUTPUT
fi
# Delete the cloned git repo
if [[ -n $APP_DIR ]]; then
rm -rf $APP_DIR
fi
if [[ -n $TMP_APP_DIR ]]; then
rm -rf $TMP_APP_DIR
fi
remove_ssh_creds "$GIT_PRIVATE_KEY"
fi
}
TLOG ===== Starting Build Script $0 $*
# Make sure tenant auth credentials were passed in.
if [[ -z "$OS_AUTH_TOKEN" ]]; then
TLOG OpenStack credentials not passed via ENV. && exit 1
fi
# Check command line arguments
if [[ $# -lt 6 ]]; then
TLOG Usage: $0 git_url commit_sha appname project_id img_external_ref lp_img_tag && exit 1
fi
PRUN silent sudo docker ps
[[ $? != 0 ]] && TLOG cannot talk to docker. && exit 1
GIT=$1
shift
COMMIT_ID=$1
shift
APP=$1
shift
TENANT=$1
shift
IMG_EXTERNAL_REF=$1
shift
LP_IMG_TAG=$1
if ! (test_public_repo $GIT); then
TLOG Could not reach $GIT with curl. Failing. && exit 1
fi
BASE_DIR=/dev/shm
GIT_CHECKSUM=$(git ls-remote $GIT | head -1 | awk '{print $1}')
TS=$(date +"%Y%m%dt%H%M%S%N")
APP_DIR="$BASE_DIR/apps/$TENANT/$ASSEMBLY_ID"
TMP_APP_DIR="/tmp/apps/$TENANT/$ASSEMBLY_ID"
mkdir -p $APP_DIR
mkdir -p $TMP_APP_DIR
add_ssh_creds "$GIT_PRIVATE_KEY" "$APP_DIR"
[[ $? != 0 ]] && TLOG FAILED to register ssh key with ssh-agent && exit 1
trap cleanup_on_exit EXIT
if [[ -d "$APP_DIR/build" ]]; then
cd $APP_DIR/build
OUT=$(git pull | grep -c 'Already up-to-date')
# Check to see if this is the same as last build, and don't rebuild if allowed to skip
if [[ "$OUT" != "0" ]]; then
if [[ "$REUSE_IMAGES_IF_REPO_UNCHANGED" -eq "1" ]]; then
image_id=$(app_glance_id $APP)
if [[ ${#image_id} == 36 ]]; then # uuid4 is 36 characters
TLOG Repo is unchanged. Reusing image $image_id.
TLOG created_image_id=$image_id
# Need stdout for solum-worker to parse the image_id
echo created_image_id=$image_id
TOTAL_TIME=$(elapsed $SCRIPT_START_TIME)
TLOG ===== Total elapsed time: $TOTAL_TIME sec
exit 0
fi
fi
fi
else
git_clone_with_commit_sha_retry $GIT $APP_DIR/build $COMMIT_ID
[[ $? != 0 ]] && TLOG Git clone failed. Check repo $GIT && exit 1
fi
cd $APP_DIR/build
if [[ "$COMMIT_ID" == "" ]]; then
COMMIT_ID=$(git log -1 --pretty=%H)
fi
# if $RUN_CMD contains a run script in repo ($APP_DIR/build)
# and not executable, make it executable
# docker ADD and COPY will inherit permissions
run_cmd_prefix="./"
if [[ -n $RUN_CMD ]] && [[ "$RUN_CMD" == $run_cmd_prefix* ]]; then
script_name=${RUN_CMD:2}
if [[ -e "$script_name" ]] && [[ ! -x "$script_name" ]]; then
chmod +x "$script_name"
fi
fi
# If languagepack is 'auto', build the application slug
if [[ $IMG_EXTERNAL_REF == "auto" ]]; then
TLOG "===>" Building App
BUILD_ID=$(git archive master | sudo docker run -i -a stdin \
-v /opt/solum/cache:/tmp/cache:rw \
-v /opt/solum/buildpacks:/tmp/buildpacks:rw \
solum/slugbuilder)
PRUN sudo docker logs --tail=all -f $BUILD_ID
cd $APP_DIR
PRUN sudo docker cp $BUILD_ID:/tmp/slug.tgz $APP_DIR/
if [[ ! -f "$APP_DIR/slug.tgz" ]]; then
TLOG Slug build failed see container: $BUILD_ID && exit 1
fi
sudo docker rm -f $BUILD_ID
else
# download base image (languagepack) if it is not 'auto'
TLOG downloading LP image from $IMAGE_STORAGE
if [[ $IMAGE_STORAGE == "glance" ]]; then
OUTPUT="$TMP_APP_DIR/$LP_IMG_TAG"
PRUN silent glance --os-image-api-version 2 --os-auth-token $OS_AUTH_TOKEN image-list
if [[ $? != 0 ]]; then
TLOG Cannot talk to Glance. Check your OpenStack credentials. && exit 1
fi
glance --os-image-api-version 2 --os-auth-token $OS_AUTH_TOKEN image-download --file $OUTPUT $IMG_EXTERNAL_REF
if [[ $? != 0 ]]; then
TLOG Failed to download image $IMG_EXTERNAL_REF from glance. && exit 1
fi
docker_load_with_retry $OUTPUT
if [[ $? != 0 ]]; then
OUTPUT_FILE_SIZE=$(stat -c%s "$OUTPUT")
TLOG Failed docker load, file size $OUTPUT_FILE_SIZE && exit 1
fi
BASE_IMG=$LP_IMG_TAG
elif [[ $IMAGE_STORAGE == "swift" ]]; then
OUTPUT="$TMP_APP_DIR/$LP_IMG_TAG"
if [[ $LP_ACCESS == "custom" ]]; then
python $HERE/swift-handler.py $OS_REGION_NAME $OS_AUTH_TOKEN $OS_STORAGE_URL download solum_lp $IMG_EXTERNAL_REF $OUTPUT \
> >(while read ALINE; do TLOG $ALINE; done)
elif [[ $LP_ACCESS == "operator" ]]; then
if [[ $OPR_LP_DOWNLOAD_STRATEGY == "swift-client" ]]; then
python $HERE/swift-handler.py $OS_REGION_NAME $OPER_AUTH_TOKEN $OPER_OS_STORAGE_URL download solum_lp $IMG_EXTERNAL_REF $OUTPUT \
> >(while read ALINE; do TLOG $ALINE; done)
elif [[ $OPR_LP_DOWNLOAD_STRATEGY == "wget" ]]; then
wget -q "$IMG_EXTERNAL_REF" --output-document=$OUTPUT
fi
fi
if [[ $? != 0 ]]; then
TLOG Failed to download image $IMG_EXTERNAL_REF from swift. && exit 1
fi
docker_load_with_retry $OUTPUT
if [[ $? != 0 ]]; then
OUTPUT_FILE_SIZE=$(stat -c%s "$OUTPUT")
TLOG Failed docker load, file size $OUTPUT_FILE_SIZE && exit 1
fi
BASE_IMG=$LP_IMG_TAG
elif [[ $IMAGE_STORAGE == "docker_registry" ]]; then
sudo docker pull $IMG_EXTERNAL_REF
if [[ $? != 0 ]]; then
TLOG Failed to download image $IMG_EXTERNAL_REF from docker registry. && exit 1
fi
BASE_IMG=$IMG_EXTERNAL_REF
else
TLOG Unsupported Image storage backend - $IMAGE_STORAGE && exit 1
fi
fi
DOCKER_RUN_CMD=$RUN_CMD
# copy params to the working dir
EXT=$(mktemp -u XXX | head -n 1)
mkdir $APP_DIR/build/params_$EXT
if [[ $USER_PARAMS != null ]]; then
cp $USER_PARAMS $APP_DIR/build/params_$EXT/user_params
DOCKER_RUN_CMD="[\"/bin/bash\", \"-c\", \"source params_$EXT/user_params && $RUN_CMD\"]"
fi
if [[ $SOLUM_PARAMS != null ]]; then
cp $SOLUM_PARAMS $APP_DIR/build/params_$EXT/solum_params
fi
if [[ $IMG_EXTERNAL_REF == "auto" ]]; then
# Build the application image by injecting slug into runner
TLOG Creating Dockerfile
cat << EOF > $APP_DIR/Dockerfile
# SOLUM APP BUILDER
FROM solum/slugrunner
ADD slug.tgz /app
COPY build/params_$EXT /root/params
ENTRYPOINT ["/runner/init"]
CMD ["start","web"]
EOF
else
TLOG Creating Dockerfile
cat << EOF > $APP_DIR/Dockerfile
FROM $BASE_IMG
COPY build /app
WORKDIR /solum/bin
RUN chmod +x build.sh
RUN ./build.sh
WORKDIR /app
CMD $DOCKER_RUN_CMD
EOF
fi
cd $APP_DIR
DU_IMG_TAG="$TENANT-$APP-$TS-$COMMIT_ID"
STORAGE_OBJ_NAME="$APP-$TS-$COMMIT_ID"
echo "build/.git" > .dockerignore
if [[ $IMAGE_STORAGE == "glance" ]]; then
docker_build_with_retry $DU_IMG_TAG .
[[ $? != 0 ]] && TLOG Docker build failed. && exit 1
glance_upload_with_retry $DU_IMG_TAG
image_id="$(app_glance_id $DU_IMG_TAG)"
TLOG ===== finished uploading DU to $IMAGE_STORAGE
elif [[ $IMAGE_STORAGE == "docker_registry" ]]; then
DOCKER_REGISTRY=${DOCKER_REGISTRY:-'10.0.2.15:5042'}
APP_NAME=$DOCKER_REGISTRY/$DU_IMG_TAG
docker_build_with_retry $APP_NAME .
[[ $? != 0 ]] && TLOG Docker build failed. && exit 1
sudo docker push $APP_NAME
[[ $? != 0 ]] && TLOG Docker push failed. && exit 1
# just to make worker/shell easier to process
image_id="${APP_NAME}"
sudo docker rmi -f $APP_NAME
TLOG ===== finished uploading DU to $IMAGE_STORAGE
elif [[ $IMAGE_STORAGE == "swift" ]]; then
docker_build_with_retry $DU_IMG_TAG .
if [[ $? != 0 ]]; then
TLOG Docker build failed. && exit 1
fi
APP_DU_FILE="$TMP_APP_DIR/$STORAGE_OBJ_NAME"
docker_save_with_retry $APP_DU_FILE $DU_IMG_TAG
if [[ $? != 0 ]]; then
TLOG Docker save failed. && exit 1
fi
#TODO(devkulkarni): Read the SECRET and TTL from config file
SECRET=secret
TTL=604800
python $HERE/swift-handler.py $OS_REGION_NAME $OS_AUTH_TOKEN $OS_STORAGE_URL upload solum_du $STORAGE_OBJ_NAME $APP_DU_FILE \
> >(while read ALINE; do TLOG $ALINE; done)
if [[ $? != 0 ]]; then
TLOG Swift upload failed. && exit 1
fi
ACCOUNT=$(echo $OS_STORAGE_URL | sed 's/\// /'g | awk '{print $4}')
TLOG "ACCOUNT=$ACCOUNT"
STORAGE_HOST=$(echo $OS_STORAGE_URL | sed 's/\// /'g | awk '{print $2}')
TLOG "STORAGE_HOST=$STORAGE_HOST"
curl -i -X POST -H X-Auth-Token:$OS_AUTH_TOKEN -H X-Account-Meta-Temp-URL-Key:$TEMP_URL_SECRET $TEMP_URL_PROTOCOL://$STORAGE_HOST/v1/$ACCOUNT
TLOG "HERE:$HERE"
TEMP_URL=$(python $HERE/get-temp-url.py $STORAGE_HOST solum_du $STORAGE_OBJ_NAME $ACCOUNT $TEMP_URL_SECRET $TEMP_URL_TTL $TEMP_URL_PROTOCOL)
TLOG "TEMP_URL:$TEMP_URL"
image_id="${TEMP_URL}"
TLOG ===== finished uploading DU to $IMAGE_STORAGE
else
TLOG Unsupported Image storage backend - $IMAGE_STORAGE && exit 1
fi
TOTAL_TIME=$(elapsed $SCRIPT_START_TIME)
TLOG ===== Total elapsed time: $TOTAL_TIME sec
TLOG created_image_id=$image_id
TLOG docker_image_name=$DU_IMG_TAG
# Need stdout for solum-worker to parse the image_id
echo created_image_id=$image_id
echo docker_image_name=$DU_IMG_TAG
exit 0

@ -1,190 +0,0 @@
#!/bin/bash
# Solum script for building custom languagepacks
SCRIPT_START_TIME=$(date +'%s')
TASKNAME=languagepack
ASSEMBLY_ID=${ASSEMBLY_ID:-null}
REUSE_IMAGES_IF_REPO_UNCHANGED=${REUSE_IMAGES_IF_REPO_UNCHANGED:="0"}
GIT_PRIVATE_KEY=${REPO_DEPLOY_KEYS:-''}
IMAGE_STORAGE=${IMAGE_STORAGE:-null}
DELETE_LOCAL_CACHE=${DELETE_LOCAL_CACHE:-null}
LP_ACCESS=${ACCESS:-null}
OS_AUTH_TOKEN=${OS_AUTH_TOKEN:-null}
OS_REGION_NAME=${OS_REGION_NAME:-null}
OS_STORAGE_URL=${OS_STORAGE_URL:-null}
TEMP_URL_SECRET=${TEMP_URL_SECRET:-null}
TEMP_URL_PROTOCOL=${TEMP_URL_PROTOCOL:-null}
TEMP_URL_TTL=${TEMP_URL_TTL:-null}
OPR_LP_DOWNLOAD_STRATEGY=${OPR_LP_DOWNLOAD_STRATEGY:-null}
# TLOG, PRUN, etc. defined in common/utils
HERE=$(dirname $0)
source $HERE/../../common/utils
LOG_FILE=$(GET_LOGFILE)
# Get the image uuid of the image named $1
function app_glance_id () {
glance --os-image-api-version 2 --os-auth-token $OS_AUTH_TOKEN image-list --sort-key updated_at --sort-dir asc | grep $1 | grep -v "+--" | tail -1 | cut -d'|' -f2
}
function cleanup_on_exit () {
if [[ $DELETE_LOCAL_CACHE == "true" ]]; then
# Not Deleting languagepack image because we found it conflicts with docker load in
# performance tests, which might be due to a concurrency bug in docker load and rmi.
#if [[ -n $LP_IMG_TAG ]]; then
# sudo docker rmi -f $LP_IMG_TAG
#fi
# Delete the cloned git repo
if [[ -n $TMP_LP_DIR ]]; then
rm -rf $TMP_LP_DIR
fi
# Delete temp output file
if [[ -n $LP_FILE ]] && [[ -e $LP_FILE ]]; then
rm -f $LP_FILE
fi
fi
}
TLOG ===== Starting Language Pack Build Script $0 $*
# Make sure tenant auth credentials were passed in.
if [[ -z $OS_AUTH_TOKEN ]]; then
TLOG openstack credentials not passed via ENV. && exit 1
fi
# Check command line arguments
if [[ $# -lt 3 ]]; then
TLOG Usage: $0 git_url lpname project_id && exit 1
fi
PRUN silent sudo docker ps
[[ $? != 0 ]] && TLOG cannot talk to docker. && exit 1
GIT=$1
shift
LPNAME=$1
shift
TENANT=$1
TS=$(date +"%Y%m%dt%H%M%S%N")
GIT_CHECKSUM=$(echo $GIT | md5sum | awk '{print $1;}')
TMP_LP_DIR="/tmp/lps/$TENANT/$ASSEMBLY_ID"
mkdir -p $TMP_LP_DIR
trap cleanup_on_exit EXIT
TLOG ===== Cloning repo
git_clone_with_retry $GIT $TMP_LP_DIR/build
[[ $? != 0 ]] && TLOG Git clone failed. Check repo $GIT && exit 1
if [[ -d "$TMP_LP_DIR/build" ]]; then
cd $TMP_LP_DIR/build
COMMIT_ID=$(git log -1 --pretty=%H)
LP_IMG_TAG="$TENANT-$LPNAME-$TS-$COMMIT_ID"
STORAGE_OBJ_NAME="$LPNAME-$TS-$COMMIT_ID"
echo ".git" > .dockerignore
TLOG Building LP
if [[ $IMAGE_STORAGE == "glance" ]]; then
PRUN silent glance --os-image-api-version 2 --os-auth-token $OS_AUTH_TOKEN image-list
if [[ $? != 0 ]]; then
TLOG Cannot talk to Glance. Check your OpenStack credentials. && exit 1
fi
docker_build_with_retry $LP_IMG_TAG .
[[ $? != 0 ]] && TLOG Docker build failed. && exit 1
TLOG ===== finished docker build
sudo docker save "$LP_IMG_TAG" | glance --os-image-api-version 2 --os-auth-token $OS_AUTH_TOKEN image-create --container-format=docker --disk-format=raw --name "$STORAGE_OBJ_NAME"
if [[ $? != 0 ]]; then
TLOG Failed to upload languagepack to glance. && exit 1
fi
TLOG ===== finished uploading LP to $IMAGE_STORAGE
image_id="$(app_glance_id $STORAGE_OBJ_NAME)"
# Tag the glance image as a solum language pack
TLOG ===== tagging glance image $image_id
glance --os-auth-token $OS_AUTH_TOKEN --os-image-api-version 2 image-tag-update $image_id 'solum::lp'
elif [[ $IMAGE_STORAGE == "swift" ]]; then
docker_build_with_retry $LP_IMG_TAG .
[[ $? != 0 ]] && TLOG Docker build failed. && exit 1
TLOG ===== finished docker build
LP_FILE="$TMP_LP_DIR/$STORAGE_OBJ_NAME"
docker_save_with_retry $LP_FILE $LP_IMG_TAG
[[ $? != 0 ]] && TLOG Docker save failed. && exit 1
python $HERE/swift-handler.py $OS_REGION_NAME $OS_AUTH_TOKEN $OS_STORAGE_URL upload solum_lp $STORAGE_OBJ_NAME $LP_FILE \
> >(while read ALINE; do TLOG $ALINE; done)
if [[ $? != 0 ]]; then
TLOG Failed to upload languagepack to swift. && exit 1
fi
if [[ $LP_ACCESS == "custom" ]]; then
image_id="${STORAGE_OBJ_NAME}"
elif [[ $LP_ACCESS == "operator" ]]; then
if [[ $OPR_LP_DOWNLOAD_STRATEGY == "swift-client" ]]; then
image_id="${STORAGE_OBJ_NAME}"
elif [[ $OPR_LP_DOWNLOAD_STRATEGY == "wget" ]]; then
ACCOUNT=$(echo $OS_STORAGE_URL | sed 's/\// /'g | awk '{print $4}')
TLOG "ACCOUNT=$ACCOUNT"
STORAGE_HOST=$(echo $OS_STORAGE_URL | sed 's/\// /'g | awk '{print $2}')
TLOG "STORAGE_HOST=$STORAGE_HOST"
TLOG "STORAGE_URL:$OS_STORAGE_URL"
TLOG "REGION:$OS_REGION_NAME"
TLOG "AUTH_TOKEN:$OS_AUTH_TOKEN"
curl -i -X POST -H X-Auth-Token:$OS_AUTH_TOKEN -H X-Account-Meta-Temp-URL-Key:$TEMP_URL_SECRET $TEMP_URL_PROTOCOL://$STORAGE_HOST/v1/$ACCOUNT
TLOG "HERE:$HERE"
TEMP_URL=$(python $HERE/get-temp-url.py $STORAGE_HOST solum_lp $STORAGE_OBJ_NAME $ACCOUNT $TEMP_URL_SECRET $TEMP_URL_TTL $TEMP_URL_PROTOCOL)
TLOG "TEMP_URL:$TEMP_URL"
image_id="${TEMP_URL}"
fi
fi
TLOG ===== finished uploading LP to $IMAGE_STORAGE
elif [[ $IMAGE_STORAGE == "docker_registry" ]]; then
DOCKER_REGISTRY=${DOCKER_REGISTRY:-'10.0.2.15:5042'}
docker_build_with_retry $LP_IMG_TAG .
[[ $? != 0 ]] && TLOG Docker build failed. && exit 1
TLOG ===== finished docker build
APP_NAME="$DOCKER_REGISTRY/$LP_IMG_TAG"
TLOG "APP_NAME=$APP_NAME"
PRUN sudo docker tag $LP_IMG_TAG $APP_NAME
sudo docker push $APP_NAME
if [[ $? != 0 ]]; then
TLOG Failed to upload languagepack to docker registry. && exit 1
fi
TLOG ===== finished uploading LP to $IMAGE_STORAGE
# Just to make work/shell easier to process
image_id="${APP_NAME}"
sudo docker rmi -f $APP_NAME
else
TLOG Unsupported Image storage backend - $IMAGE_STORAGE && exit 1
fi
fi
TOTAL_TIME=$(elapsed $SCRIPT_START_TIME)
TLOG ===== Total elapsed time: $TOTAL_TIME sec
TLOG image_external_ref=$image_id
TLOG docker_image_name=$LP_IMG_TAG
# Need stdout for solum-worker to parse the image_id
echo image_external_ref=$image_id
echo docker_image_name=$LP_IMG_TAG
exit 0

@ -1,22 +0,0 @@
#!/bin/bash
sudo docker ps 2> /dev/null > /dev/null
[[ $? != 0 ]] && echo "cannot talk to docker." && exit 1
[[ -f ./openrc ]] && . ./openrc
[[ -f ~/devstack/openrc ]] && . ~/devstack/openrc
nova list 2> /dev/null > /dev/null
if [ $? != 0 ]; then
echo 'cannot talk to nova. check your OpenStack credentials'
exit 1
fi
echo 'Fixing bug where first nova boot of docker container'
echo 'Starts with "sh" rather than embedded "CMD"'
echo 'boot dummy instance'
nova boot --flavor 1 --image docker-busybox:latest dummy
echo 'sleep 60 sec'
sleep 60
echo 'delete dummy instance'
nova delete dummy

@ -1,49 +0,0 @@
# Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Get tempUrl for DU stored in swift."""
import hashlib
import hmac
import sys
import time
if len(sys.argv) < 8:
print('USAGE: python get-temp-url.py storage_host container'
' app_name account secret ttl protocol')
sys.exit(1)
storage_host = sys.argv[1]
container = sys.argv[2]
app_name = sys.argv[3]
account = sys.argv[4]
secret = sys.argv[5]
ttl = sys.argv[6]
protocol = sys.argv[7]
method = 'GET'
expires = int(time.time() + int(ttl))
base = protocol + "://"
base += storage_host
path = '/v1'
path += "/" + account
path += "/" + container
path += "/" + app_name
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(secret, hmac_body, hashlib.sha1).hexdigest()
print('%s%s?temp_url_sig=%s&temp_url_expires=%s' % (base, path, sig, expires))

@ -1,161 +0,0 @@
#!/bin/sh
set -e
#
# This script is meant for quick & easy install via:
# 'curl -sL https://get.docker.io/ | sh'
# or:
# 'wget -qO- https://get.docker.io/ | sh'
#
#
# Docker Maintainers:
# To update this script on https://get.docker.io,
# use hack/release.sh during a normal release,
# or the following one-liner for script hotfixes:
# s3cmd put --acl-public -P hack/install.sh s3://get.docker.io/index
#
url='https://get.docker.io/'
command_exists() {
command -v "$@" > /dev/null 2>&1
}
case "$(uname -m)" in
*64)
;;
*)
echo >&2 'Error: you are not using a 64bit platform.'
echo >&2 'Docker currently only supports 64bit platforms.'
exit 1
;;
esac
if command_exists docker || command_exists lxc-docker; then
echo >&2 'Warning: "docker" or "lxc-docker" command appears to already exist.'
echo >&2 'Please ensure that you do not already have docker installed.'
echo >&2 'You may press Ctrl+C now to abort this process and rectify this situation.'
( set -x; sleep 20 )
fi
sh_c='sh -c'
if [ "$(whoami 2>/dev/null || true)" != 'root' ]; then
if command_exists sudo; then
sh_c='sudo sh -c'
elif command_exists su; then
sh_c='su -c'
else
echo >&2 'Error: this installer needs the ability to run commands as root.'
echo >&2 'We are unable to find either "sudo" or "su" available to make this happen.'
exit 1
fi
fi
curl=''
if command_exists curl; then
curl='curl -sL'
elif command_exists wget; then
curl='wget -qO-'
elif command_exists busybox && busybox --list-modules | grep -q wget; then
curl='busybox wget -qO-'
fi
# perform some very rudimentary platform detection
lsb_dist=''
if command_exists lsb_release; then
lsb_dist=$(lsb_release -si)
fi
if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then
lsb_dist=$(. /etc/lsb-release && echo "$DISTRIB_ID")
fi
if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then
lsb_dist='Debian'
fi
case "$lsb_dist" in
Ubuntu|Debian)
export DEBIAN_FRONTEND=noninteractive
did_apt_get_update=
apt_get_update() {
if [ -z "$did_apt_get_update" ]; then
( set -x; $sh_c 'sleep 3; apt-get update' )
did_apt_get_update=1
fi
}
# TODO remove this section once device-mapper lands
if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
kern_extras="linux-image-extra-$(uname -r)"
apt_get_update
( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true
if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)'
echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!'
( set -x; sleep 10 )
fi
fi
if [ ! -e /usr/lib/apt/methods/https ]; then
apt_get_update
( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https' )
fi
if [ -z "$curl" ]; then
apt_get_update
( set -x; $sh_c 'sleep 3; apt-get install -y -q curl' )
curl='curl -sL'
fi
(
set -x
if [ "https://get.docker.io/" = "$url" ]; then
$sh_c "apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9"
elif [ "https://test.docker.io/" = "$url" ]; then
$sh_c "apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 740B314AE3941731B942C66ADF4FD13717AAD7D6"
else
$sh_c "$curl ${url}gpg | apt-key add -"
fi
$sh_c "echo deb ${url}ubuntu docker main > /etc/apt/sources.list.d/docker.list"
$sh_c 'sleep 3; apt-get update; apt-get install -y -q lxc-docker'
)
if command_exists docker && [ -e /var/run/docker.sock ]; then
(
set -x
$sh_c 'docker run busybox echo "Docker has been successfully installed!"'
) || true
fi
exit 0
;;
Gentoo)
if [ "$url" = "https://test.docker.io/" ]; then
echo >&2
echo >&2 ' You appear to be trying to install the latest nightly build in Gentoo.'
echo >&2 ' The portage tree should contain the latest stable release of Docker, but'
echo >&2 ' if you want something more recent, you can always use the live ebuild'
echo >&2 ' provided in the "docker" overlay available via layman. For more'
echo >&2 ' instructions, please see the following URL:'
echo >&2 ' https://github.com/tianon/docker-overlay#using-this-overlay'
echo >&2 ' After adding the "docker" overlay, you should be able to:'
echo >&2 ' emerge -av =app-emulation/docker-9999'
echo >&2
exit 1
fi
(
set -x
$sh_c 'sleep 3; emerge app-emulation/docker'
)
exit 0
;;
esac
echo >&2
echo >&2 ' Either your platform is not easily detectable, is not supported by this'
echo >&2 ' installer script (yet - PRs welcome!), or does not yet have a package for'
echo >&2 ' Docker. Please visit the following URL for more detailed installation'
echo >&2 ' instructions:'
echo >&2
echo >&2 ' http://docs.docker.io/en/latest/installation/'
echo >&2
exit 1

@ -1,52 +0,0 @@
#!/bin/bash
DOCKER_REGISTRY=${DOCKER_REGISTRY:-'127.0.0.1:5042'}
sudo mkdir -p /opt/solum
sudo chown -R ${USER}:${USER} /opt/solum
sudo mkdir -p /var/log/solum
sudo chown -R ${USER}:${USER} /var/log/solum
mkdir -p /opt/solum/apps
mkdir -p /opt/solum/cache
# Slugbuilder builds the cedarish slug
# We should own/customize this for solum
# but for POC we can grab and build container
echo Creating slugbuilder container
docker pull solum/slugbuilder
docker tag solum/slugbuilder $DOCKER_REGISTRY/slugbuilder
# Slugtester runs unit tests against user code
echo Creating slugtester container
docker pull solum/slugtester
docker tag solum/slugtester $DOCKER_REGISTRY/slugtester
# Slugrunner runs the cedarish slug built
# by slugbuilder.
# We should own/customize this for solum
# now available from trusted builds.
echo Creating slugrunner container
docker pull solum/slugrunner
docker tag solum/slugrunner $DOCKER_REGISTRY/slugrunner
if [ ! -d /opt/solum/buildpacks ]; then
echo Preparing Buildpacks
mkdir -p /opt/solum/buildpacks
pushd /opt/solum/buildpacks
# These buildpacks help with autodetection and app level depedency installs.
git clone --depth 1 https://github.com/heroku/heroku-buildpack-java.git
git clone --depth 1 https://github.com/heroku/heroku-buildpack-ruby.git
git clone --depth 1 https://github.com/heroku/heroku-buildpack-python.git
git clone --depth 1 https://github.com/gabrtv/heroku-buildpack-nodejs
git clone --depth 1 https://github.com/heroku/heroku-buildpack-play.git
git clone --depth 1 https://github.com/CHH/heroku-buildpack-php.git
git clone --depth 1 https://github.com/heroku/heroku-buildpack-clojure.git
git clone --depth 1 https://github.com/kr/heroku-buildpack-go.git
git clone --depth 1 https://github.com/heroku/heroku-buildpack-scala
git clone --depth 1 https://github.com/igrigorik/heroku-buildpack-dart.git
git clone --depth 1 https://github.com/miyagawa/heroku-buildpack-perl.git
git clone --depth 1 https://github.com/paulczar/heroku-buildpack-mono.git
popd
fi

@ -1,185 +0,0 @@
# Copyright 2015 - Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Swift Handler"""
import errno
import httplib
import os
import sys
import six
from swiftclient import client as swiftclient
from swiftclient import exceptions as swiftexp
CHUNKSIZE = 65536
TOTAL_RETRIES = 3
Gi = 1024 * 1024 * 1000
LARGE_OBJECT_SIZE = 5 * Gi
class InvalidObjectSizeError(Exception):
pass
class MaxRetryReached(Exception):
pass
def _get_swift_client(args):
client_args = {
'auth_version': '2.0',
'preauthtoken': args['auth_token'],
'preauthurl': args['storage_url'],
'os_options': {'region_name': args['region_name']},
}
# swiftclient connection will retry the request
# 5 times before failing
return swiftclient.Connection(**client_args)
def _get_file_size(file_obj):
# Analyze file-like object and attempt to determine its size.
if (hasattr(file_obj, 'seek') and hasattr(file_obj, 'tell') and
(six.PY2 or six.PY3 and file_obj.seekable())):
try:
curr = file_obj.tell()
file_obj.seek(0, os.SEEK_END)
size = file_obj.tell()
file_obj.seek(curr)
return size
except IOError as e:
if e.errno == errno.ESPIPE:
# Illegal seek. This means the file object
# is a pipe (e.g. the user is trying
# to pipe image data to the client,
# echo testdata | bin/glance add blah...), or
# that file object is empty, or that a file-like
# object which doesn't support 'seek/tell' has
# been supplied.
return 0
else:
print("Error getting file size")
raise
else:
return 0
def _get_object(container, name, connection_args, start_byte=None):
connection = _get_swift_client(connection_args)
headers = {}
if start_byte is not None:
bytes_range = 'bytes=%d-' % start_byte
headers = {'Range': bytes_range}
try:
resp_headers, resp_body = connection.get_object(
container=container, obj=name, resp_chunk_size=CHUNKSIZE,
headers=headers)
except swiftexp.ClientException as e:
if e.http_status == httplib.NOT_FOUND:
print("Swift could not find object %s." % name)
raise
return (resp_headers, resp_body)
def _retry_iter(resp_iter, length, container, name, connection_args):
length = length if length else (resp_iter.len
if hasattr(resp_iter, 'len') else 0)
retries = 0
bytes_read = 0
while retries <= TOTAL_RETRIES:
try:
for chunk in resp_iter:
yield chunk
bytes_read += len(chunk)
except swiftexp.ClientException as e:
print("Swift exception %s" % e.__class__.__name__)
if bytes_read == length:
break
else:
if retries == TOTAL_RETRIES:
raise MaxRetryReached
else:
retries += 1
print("Retrying Swift download")
# NOTE(james_li): Need a new swift connection to do
# a range request for the same object
(_resp_headers, resp_iter) = _get_object(container, name,
connection_args,
start_byte=bytes_read)
def do_upload(path, container, name, connection_args):
connection = _get_swift_client(connection_args)
with open(path, 'rb') as local_file:
size = _get_file_size(local_file)
if size > 0 and size < LARGE_OBJECT_SIZE:
connection.put_container(container)
connection.put_object(container, name, local_file,
content_length=size)
else:
print("Cannot upload a file with the size exceeding 5GB")
raise InvalidObjectSizeError
def do_download(path, container, name, connection_args):
(resp_headers, resp_data) = _get_object(container, name, connection_args)
length = int(resp_headers.get('content-length', 0))
data_iter = _retry_iter(resp_data, length, container, name,
connection_args)
with open(path, 'wb') as local_file:
for chunk in data_iter:
local_file.write(chunk)
local_file.flush()
def main():
action_to_take = sys.argv[4]
path = str(sys.argv[7])
container = str(sys.argv[5])
obj_name = str(sys.argv[6])
connection_args = {'region_name': str(sys.argv[1]),
'auth_token': str(sys.argv[2]),
'storage_url': str(sys.argv[3])}
if action_to_take == 'download':
try:
do_download(path, container, obj_name, connection_args)
print("Finished swift download.")
sys.exit(0)
except Exception as e:
print("Error download object, got %s" % e.__class__.__name__)
sys.exit(1)
elif action_to_take == 'upload':
try:
do_upload(path, container, obj_name, connection_args)
print("Finished swift upload.")
sys.exit(0)
except Exception as e:
print("Error upload object, got %s" % e.__class__.__name__)
sys.exit(1)
else:
sys.exit(2)
if __name__ == '__main__':
sys.exit(main())

@ -1,260 +0,0 @@
#!/bin/bash
# Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Solum App Unit Test Script for Docker and lp-cedarish
SCRIPT_START_TIME=$(date +"%s")
PROJECT_ID=${PROJECT_ID:-null}
BUILD_ID=${BUILD_ID:-null}
TASKNAME=unittest
DOCKER_REGISTRY=${DOCKER_REGISTRY:-'127.0.0.1:5042'}
USER_PARAMS=${USER_PARAMS:-null}
SOLUM_PARAMS=${SOLUM_PARAMS:-null}
USE_DRONE=${_SYSTEM_USE_DRONE:-null}
GIT_PRIVATE_KEY=${REPO_DEPLOY_KEYS:-''}
ENTRYPOINT=${TEST_CMD:-:}
IMAGE_STORAGE=${IMAGE_STORAGE:-null}
DELETE_LOCAL_CACHE=${DELETE_LOCAL_CACHE:-null}
LP_ACCESS=${ACCESS:-null}
OS_AUTH_TOKEN=${OS_AUTH_TOKEN:-null}
OS_REGION_NAME=${OS_REGION_NAME:-null}
OS_STORAGE_URL=${OS_STORAGE_URL:-null}
OPR_LP_DOWNLOAD_STRATEGY=${OPR_LP_DOWNLOAD_STRATEGY:-null}
OPER_AUTH_TOKEN=${OPER_AUTH_TOKEN:-null}
OPER_OS_STORAGE_URL=${OPER_OS_STORAGE_URL:-null}
# TLOG, PRUN, ENSURE_LOGFILE, and elapsed defined in app-common
HERE=$(dirname $0)
source $HERE/../../common/utils
function TLOG () {
TLOG_FILTERED $*
}
function cleanup_on_exit () {
if [[ $DELETE_LOCAL_CACHE == "true" ]]; then
# Not Deleting languagepack image because we found it conflicts with docker load in
# performance tests, which might be due to a concurrency bug in docker load and rmi.
#if [[ -n $BASE_IMG ]] && [[ $BASE_IMG != "solum/slugtester" ]]; then
# sudo docker rmi -f $BASE_IMG
#fi
# Delete local DU image after running user tests
if [[ -n $RANDOM_NAME ]]; then
sudo docker rmi $RANDOM_NAME
fi
# Delete the cloned git repo
if [[ -n $APP_DIR ]]; then
rm -rf $APP_DIR
fi
if [[ -n $TMP_DIR ]]; then
rm -rf $TMP_DIR
fi
# Delete temp output file
if [[ -n $OUTPUT ]] && [[ -e $OUTPUT ]]; then
rm -f $OUTPUT
fi
remove_ssh_creds "$GIT_PRIVATE_KEY"
fi
}
LOG_FILE=$(GET_LOGFILE)
TLOG ===== Starting Test Script $0 $*
# Check command line arguments
if [[ $# -lt 5 ]]; then
TLOG Usage: $0 git_url commit_sha tenant img_external_ref lp_img_tag && exit 1
fi
PRUN silent sudo docker ps
[[ $? != 0 ]] && TLOG Cannot talk to docker. && exit 1
GIT=$1
shift
COMMIT_SHA=$1
shift
TENANT=$1
shift
IMG_EXTERNAL_REF=$1
shift
LP_IMG_TAG=$1
if ! (test_public_repo $GIT); then
TLOG Could not reach $GIT with curl. Failing. && exit 1
fi
TLOG "Executing test command $ENTRYPOINT"
BASE_DIR=/dev/shm
RANDOM_NAME=$(mktemp -u XXXXXXXXXXXXXXXXXXXXXXX | tr '[:upper:]' '[:lower:]' | head -n 1)
APP_DIR=$BASE_DIR/solum/$RANDOM_NAME
TMP_DIR=/tmp/solum/$RANDOM_NAME
rm -rf $APP_DIR
rm -rf $TMP_DIR
mkdir -p $APP_DIR
mkdir -p $TMP_DIR
# Set base docker image to solum/slugtester for the 'auto' option
BASE_IMG="solum/slugtester"
trap cleanup_on_exit EXIT
# download base image if it is not 'Auto'
if [[ $IMG_EXTERNAL_REF != "auto" ]]; then
TLOG downloading LP image from $IMAGE_STORAGE
if [[ $IMAGE_STORAGE == "glance" ]]; then
OUTPUT="$TMP_DIR/$LP_IMG_TAG"
glance image-download --file $OUTPUT $IMG_EXTERNAL_REF
if [[ $? != 0 ]]; then
TLOG Failed to download image $IMG_EXTERNAL_REF from glance. && exit 1
fi
docker_load_with_retry $OUTPUT
BASE_IMG=$LP_IMG_TAG
elif [[ $IMAGE_STORAGE == "swift" ]]; then
OUTPUT="$TMP_DIR/$LP_IMG_TAG"
if [[ $LP_ACCESS == "custom" ]]; then
python $HERE/swift-handler.py $OS_REGION_NAME $OS_AUTH_TOKEN $OS_STORAGE_URL download solum_lp $IMG_EXTERNAL_REF $OUTPUT \
> >(while read ALINE; do TLOG $ALINE; done)
elif [[ $LP_ACCESS == "operator" ]]; then
if [[ $OPR_LP_DOWNLOAD_STRATEGY == "swift-client" ]]; then
python $HERE/swift-handler.py $OS_REGION_NAME $OPER_AUTH_TOKEN $OPER_OS_STORAGE_URL download solum_lp $IMG_EXTERNAL_REF $OUTPUT \
> >(while read ALINE; do TLOG $ALINE; done)
elif [[ $OPR_LP_DOWNLOAD_STRATEGY == "wget" ]]; then
wget -q "$IMG_EXTERNAL_REF" --output-document=$OUTPUT
fi
fi
if [[ $? != 0 ]]; then
TLOG Failed to download image $IMG_EXTERNAL_REF from swift. && exit 1
fi
docker_load_with_retry $OUTPUT
if [[ $? != 0 ]]; then
OUTPUT_FILE_SIZE=$(stat -c%s "$OUTPUT")
TLOG Docker load failed, file size $OUTPUT_FILE_SIZE && exit 1
fi
BASE_IMG=$LP_IMG_TAG
elif [[ $IMAGE_STORAGE == "docker_registry" ]]; then
sudo docker pull $IMG_EXTERNAL_REF
if [[ $? != 0 ]]; then
TLOG Failed to download image $IMG_EXTERNAL_REF from docker registry. && exit 1
fi
BASE_IMG=$IMG_EXTERNAL_REF
else
TLOG Unsupported Image storage backend - $IMAGE_STORAGE && exit 1
fi
fi
add_ssh_creds "$GIT_PRIVATE_KEY" "$APP_DIR"
[[ $? != 0 ]] && TLOG FAILED to register ssh key with ssh-agent && exit 1
if [[ $COMMIT_SHA ]]; then
git_clone_with_retry $GIT $APP_DIR/code
[[ $? != 0 ]] && TLOG Git clone failed. Check repo $GIT && exit 1
cd $APP_DIR/code
PRUN git checkout -B solum_testing $COMMIT_SHA
else
git_clone_with_retry $GIT $APP_DIR/code --single-branch
[[ $? != 0 ]] && TLOG Git clone failed. Check repo $GIT && exit 1
cd $APP_DIR/code
fi
# if $ENTRYPOINT contains a testing script in repo ($APP_DIR/code)
# and not executable, make it executable
# docker ADD and COPY will inherit permissions
test_cmd_prefix="./"
if [[ -n $ENTRYPOINT ]] && [[ "$ENTRYPOINT" == $test_cmd_prefix* ]]; then
script_name=${ENTRYPOINT:2}
if [[ -e "$script_name" ]] && [[ ! -x "$script_name" ]]; then
chmod +x "$script_name"
fi
fi
DRONE_ENTRYPOINT=$ENTRYPOINT
DOCKER_ENTRYPOINT=$ENTRYPOINT
# copy params to the working dir
EXT=$(mktemp -u XXX | head -n 1)
if [[ $USER_PARAMS != null ]]; then
cp $USER_PARAMS $APP_DIR/code/user_params.$EXT
DRONE_ENTRYPOINT="/bin/bash -c 'source user_params.$EXT && $ENTRYPOINT'"
DOCKER_ENTRYPOINT="[\"/bin/bash\", \"-c\", \"source user_params.$EXT && $ENTRYPOINT\"]"
fi
if [[ $SOLUM_PARAMS != null ]]; then
cp $SOLUM_PARAMS $APP_DIR/code/solum_params.$EXT
fi
echo "$GIT_PRIVATE_KEY" > $APP_DIR/code/id_rsa
# Test the application code
TLOG "===>" Testing App
if [[ $USE_DRONE != null && $(type drone) ]]; then
TLOG "===>" Using Drone
if [[ ! -e $APP_DIR/code/.drone.yml ]]; then
TLOG "===>" Creating .drone.yml
cat << EOF > $APP_DIR/code/.drone.yml
image: $BASE_IMG
script:
- $DRONE_ENTRYPOINT
EOF
else
TLOG "===>" .drone.yml found in source
fi
sudo /usr/local/bin/drone build $APP_DIR/code 2>&1 > >(while read LINE; do TLOG $LINE; done)
else
TLOG Creating Dockerfile
cat << EOF > $APP_DIR/Dockerfile
FROM $BASE_IMG
COPY code /code
COPY code/id_rsa /root/.ssh/id_rsa
RUN chmod 0600 /root/.ssh/id_rsa
RUN echo "Host *\n\tStrictHostKeyChecking no\n\tUserKnownHostsFile=/dev/null" > /root/.ssh/config
WORKDIR /code
RUN ${DOCKER_ENTRYPOINT}
EOF
cd $APP_DIR
echo "code/.git" > .dockerignore
sudo docker build --force-rm=true -t $RANDOM_NAME . 2>&1 > >(while read LINE; do TLOG $LINE; done)
fi
SUCCESS=$?
echo Docker finished with status $SUCCESS.
if [[ $SUCCESS == 0 ]]; then
TLOG ==== Status: SUCCESS
else
TLOG ==== Status: FAIL
fi
TOTAL_TIME=$(elapsed $SCRIPT_START_TIME)
TLOG ===== Total elapsed time: $TOTAL_TIME sec
# Return 0 if the tests went well, or 1 otherwise.
[[ $SUCCESS = 0 ]] && exit 0 || exit 1

@ -1,54 +0,0 @@
build heroku style buildpacks with disk-image-builder
-----------------------------------------------------
## Requirements
### Devstack
If you want to run this in Vagrant you can use the following canned devstack
```
git clone https://github.com/rackerlabs/vagrant-solum-dev.git
cd vagrant-solum-dev
SOLUM=/path/to/code vagrant up devstack
```
## Using VM Builder
### Prepare Environment
This should prepare your (devstack) system to build VMs. It will install a few system packages and the `disk-image-builder` project. Run this as the same user you installed devstack as to get passwordless sudo access.
```
/opt/stack/solum/contrib/lp-cedarish/vm/prepare
```
### Build an Application
The build script takes two positional arguments. The location of the git repo, and the app name. The user running this script must have passwordless sudo access ( use the same user you used to install devstack ).
make sure you have an `openrc` file with your OpenStack credentials in `~/` or `~/devstack/openrc` before running this script.
```
/opt/stack/solum/contrib/lp-cedarish/vm/build-app https://github.com/paulczar/example-nodejs-express.git helloworld
```
The script should update glance with the resultant image as well as set up a sshkey and security-group. it will provide you with the nova command required to boot the instance as well as a basic `user-data.txt` to set any environment variables ( to be passed to your app ) and run the app on boot.
### Deploy an Application
Your nova command to deploy your app should be something like this:
```
$ source ~/devstack/openrc
$ nova boot --flavor=2 --image=helloworld --security-groups=helloworld --key-name=helloworld_key --user-data=/opt/stack/solum/contrib/lp-cedarish/vm/user-data.txt helloworld01
$ nova list
+--------------------------------------+--------------+--------+------------+-------------+----------------------+
| ID | Name | Status | Task State | Power State | Networks |
+--------------------------------------+--------------+--------+------------+-------------+----------------------+
| 92318736-5301-46ce-88e8-5dbaadeb37d6 | helloworld01 | ACTIVE | - | Running | private=192.168.78.2 |
+--------------------------------------+--------------+--------+------------+-------------+----------------------+
$ curl 192.168.78.2:5000
Hello World
```

@ -1,89 +0,0 @@
#!/bin/bash
DIR=`dirname $0`
echo $0
if [[ -z $1 ]] || [[ -z $2 ]]; then
echo "Usage: build git_url appname"
exit 1
fi
GIT=$1
shift
APP=$1
shift
if [[ -z $OS_USERNAME ]]; then
echo 'OpenStack credentials not passed via ENV. hunting for openrc.'
[[ -f ./openrc ]] && . ./openrc
[[ -f ~/devstack/openrc ]] && . ~/devstack/openrc
fi
glance image-list 2> /dev/null > /dev/null
if [ $? != 0 ]; then
echo 'cannot talk to glance. check your OpenStack credentials'
exit 1
fi
ID=`< /dev/urandom tr -dc A-Z-a-z-0-9 | head -c${rnd:-32};echo;`
echo Build ID - $ID
APP_DIR=/opt/solum/apps/$APP
BUILD_DIR=/opt/solum/apps/$APP/build
IMAGE_DIR=/opt/solum/apps/$APP/image
mkdir -p $BUILD_DIR
mkdir -p $IMAGE_DIR
[[ -d $BUILD_DIR ]] && rm -rf $BUILD_DIR
mkdir -p $BUILD_DIR
cp -R $DIR/* $BUILD_DIR
echo "$APP git /app $GIT" > $BUILD_DIR/elements/cedarish/source-repository-app
echo "create image"
export PATH=$PATH:/opt/disk-image-builder/bin
export ELEMENTS_PATH=$BUILD_DIR/elements
disk-image-create --no-tmpfs -a amd64 vm ubuntu -o $IMAGE_DIR/$ID.qcow2 cedarish
if [[ ! -f $IMAGE_DIR/$ID.qcow2 ]]; then
echo something went wrong building image
exit 1
fi
echo image: /tmp/$ID.qcow2
glance image-list 2> /dev/null > /dev/null
if [[ $? == 0 ]]; then
echo it would appear I know how to talk to glance
echo therefore I will attempt to upload your image
glance image-create --name $APP --disk-format qcow2 --container-format bare --file $IMAGE_DIR/$ID.qcow2
else
echo I cannot talk to glance your image is here: $IMAGE_DIR/$ID.qcow2
echo Try this: glance image-create --name $APP --disk-format qcow2 --container-format bare --file $IMAGE_DIR/$ID.qcow2
exit 1
fi
echo I will try and create you security groups
nova secgroup-create $APP "allow ssh/web to instance"
nova secgroup-add-rule $APP tcp 22 22 0.0.0.0/0
nova secgroup-add-rule $APP tcp 5000 5000 0.0.0.0/0
nova keypair-add ${APP}_key > ${APP_DIR}/key.priv
chmod 0600 ${APP_DIR}/key.priv
cp ${DIR}/user-data.txt ${APP_DIR}/user-data.txt
echo modify ${DIR}/user-data.txt to set any needed ENV variables
echo boot your app like this
echo nova boot --flavor=2 --image=${APP} --security-groups=${APP} \
--key-name=${APP}_key --user-data=${APP_DIR}/user-data.txt ${APP}01

@ -1,15 +0,0 @@
cedarish element
----------------
Based on the following projects
* [Cedarish](https://github.com/progrium/cedarish)
* [Slugbuilder](https://github.com/flynn/slugbuilder)
* [Slugrunner](https://github.com/flynn/slugrunner)
builds OpenStack Image suitable for running Heroku Buildpacks.
Currently only supports Ubuntu
The scripts used here are examples of the functionality that would be performed by the solum build process (i.e. intended to be run by a system user ) and are not representitive of end-user tooling.

@ -1,4 +0,0 @@
cache-url
dib-run-parts
dkms
source-repositories

@ -1,47 +0,0 @@
#!/bin/bash
install-packages build-essential
install-packages autoconf
install-packages bind9-host
install-packages bison
install-packages curl
install-packages daemontools
install-packages dnsutils
install-packages ed
install-packages git
install-packages imagemagick
install-packages iputils-tracepath
install-packages libcurl4-openssl-dev
install-packages libevent-dev
install-packages libglib2.0-dev
install-packages libjpeg-dev
install-packages libjpeg62
install-packages libpng12-0
install-packages libpng12-dev
install-packages libmagickcore-dev
install-packages libmagickwand-dev
install-packages libmysqlclient-dev
install-packages libpq-dev
install-packages libsqlite3-dev
install-packages libssl-dev
install-packages libssl0.9.8
install-packages libxml2-dev
install-packages libxslt-dev
install-packages mercurial
install-packages netcat-openbsd
install-packages socat
install-packages syslinux
install-packages sqlite3
install-packages zlib1g-dev
install-packages openssh-client
install-packages openssh-server
# runtime reqs
install-packages ruby
install-packages ruby-dev
gem install foreman
install-packages openjdk-7-jdk
install-packages openjdk-7-jre-headless
install-packages python-dev
install-packages nodejs
ln -s /usr/bin/nodejs /usr/bin/node

@ -1,20 +0,0 @@
#!/bin/bash
echo $0
echo "Install some prereqs"
sudo apt-get -yqq update
sudo apt-get -yqq install qemu-utils git curl wget
echo "install disk image builder"
if [[ -d /opt/disk-image-builder ]]; then
sudo git clone https://github.com/openstack/diskimage-builder.git /opt/disk-image-builder
cd /opt/disk-image-builder
sudo pip install .
fi
sudo mkdir -p /opt/solum/apps
sudo chown -R ${USER}:${USER} /opt/solum

@ -1,8 +0,0 @@
#!/bin/bash
#export PORT=4001
export APP=web
cd /app
foreman start $APP

@ -1,3 +0,0 @@
# docker builder!
build a chef docker image from examples/language-packs/chef

@ -1,18 +0,0 @@
#!/bin/bash
DOCKER_REGISTRY=${DOCKER_REGISTRY:-'127.0.0.1:5042'}
sudo mkdir -p /opt/solum
sudo chown -R ${USER}:${USER} /opt/solum
sudo mkdir -p /var/log/solum
sudo chown -R ${USER}:${USER} /var/log/solum
mkdir -p /opt/solum/apps
HERE=$(dirname $0)
CHEF_DOCKERFILE=$HERE/../../../examples/language-packs/chef
echo Creating chef docker image
sudo docker build -t solum/chef $CHEF_DOCKERFILE
docker tag solum/chef $DOCKER_REGISTRY/chef

@ -1,156 +0,0 @@
#!/bin/bash
# Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Solum App Unit Test Script for Docker
SCRIPT_START_TIME=$(date +"%s")
PROJECT_ID=${PROJECT_ID:-null}
BUILD_ID=${BUILD_ID:-null}
TASKNAME=unittest
DOCKER_REGISTRY=${DOCKER_REGISTRY:-'127.0.0.1:5042'}
USER_PARAMS=${USER_PARAMS:-null}
SOLUM_PARAMS=${SOLUM_PARAMS:-null}
USE_DRONE=${_SYSTEM_USE_DRONE:-null}
GIT_PRIVATE_KEY=${REPO_DEPLOY_KEYS:-''}
# TLOG, PRUN, ENSURE_LOGFILE, and elapsed defined in app-common
HERE=$(dirname $0)
source $HERE/../../common/utils
function TLOG () {
TLOG_FILTERED $*
}
LOG_FILE=$(GET_LOGFILE)
TLOG ===== Starting Test Script $0 $*
# Check command line arguments
if [ $# -lt 4 ]; then
TLOG Usage: $0 git_url commit_sha tenant unit_test_entry_point
exit 1
fi
PRUN silent sudo docker ps
[[ $? != 0 ]] && TLOG Cannot talk to docker. && exit 1
# Make sure the chef LP exists; build if it doesn't.
docker inspect solum/chef || docker build -t solum/chef $HERE/../../../examples/language-packs/chef/
GIT=$1
shift
COMMIT_SHA=$1
shift
TENANT=$1
shift
ENTRYPOINT="$@"
shift
TLOG "Executing test command $ENTRYPOINT"
BASE_DIR=/dev/shm
DIR_NAME=$(mktemp -u XXXXXXXXXXXXXXXXXXXXXXX | tr '[:upper:]' '[:lower:]' | head -n 1)
APP_DIR=$BASE_DIR/solum/$DIR_NAME
rm -rf $APP_DIR
PRUN mkdir -p $APP_DIR
add_ssh_creds "$GIT_PRIVATE_KEY" "$APP_DIR"
if ! (test_public_repo $GIT); then
TLOG Could not reach $GIT with curl. Failing.
exit 1
fi
if [[ $COMMIT_SHA ]]; then
git_clone_with_retry $GIT $APP_DIR/code
cd $APP_DIR/code
PRUN git checkout -B solum_testing $COMMIT_SHA
else
git_clone_with_retry $GIT $APP_DIR/code --single-branch
cd $APP_DIR/code
fi
DRONE_ENTRYPOINT=$ENTRYPOINT
DOCKER_ENTRYPOINT=$ENTRYPOINT
# copy params to the working dir
EXT=$(mktemp -u XXX | head -n 1)
if [[ $USER_PARAMS != null ]]; then
cp $USER_PARAMS $APP_DIR/code/user_params.$EXT
DRONE_ENTRYPOINT="/bin/bash -c 'source user_params.$EXT && $ENTRYPOINT'"
DOCKER_ENTRYPOINT="[\"/bin/bash\", \"-c\", \"source user_params.$EXT && $ENTRYPOINT\"]"
fi
if [[ $SOLUM_PARAMS != null ]]; then
cp $SOLUM_PARAMS $APP_DIR/code/solum_params.$EXT
fi
COMMIT_ID=$(git log -1 --pretty=%H)
echo "$GIT_PRIVATE_KEY" > $APP_DIR/code/id_rsa
# Test the application code
TLOG "===>" Testing App
if [[ $USE_DRONE != null && $(type drone) ]]; then
TLOG "===>" Using Drone
if [[ ! -e $APP_DIR/code/.drone.yml ]]; then
TLOG "===>" Creating .drone.yml
cat << EOF > $APP_DIR/code/.drone.yml
image: solum/chef
script:
- $DRONE_ENTRYPOINT
EOF
else
TLOG "===>" .drone.yml found in source
fi
sudo /usr/local/bin/drone build $APP_DIR/code 2>&1 > >(while read LINE; do TLOG $LINE; done)
else
TLOG Creating Dockerfile
cat << EOF > $APP_DIR/Dockerfile
# SOLUM APP BUILDER
FROM solum/chef
ADD code /code
ADD code/id_rsa /root/.ssh/id_rsa
RUN chmod 0600 /root/.ssh/id_rsa
RUN echo "Host *\n\tStrictHostKeyChecking no\n\tUserKnownHostsFile=/dev/null" > /root/.ssh/config
WORKDIR /code
RUN ${DOCKER_ENTRYPOINT}
EOF
cd $APP_DIR
sudo docker build --rm -t $DIR_NAME . 2>&1 > >(while read LINE; do TLOG $LINE; done)
fi
SUCCESS=$?
remove_ssh_creds "$GIT_PRIVATE_KEY"
echo Docker finished with status $SUCCESS.
if [[ $SUCCESS == 0 ]]; then
TLOG ==== Status: SUCCESS
else
TLOG ==== Status: FAIL
fi
[[ $SUCCESS == 0 ]] && sudo docker rmi $DIR_NAME
cd /tmp
rm -rf $APP_DIR
TOTAL_TIME=$(elapsed $SCRIPT_START_TIME)
TLOG ===== Total elapsed time: $TOTAL_TIME sec
# Return 0 if the tests went well, or 1 otherwise.
[[ $SUCCESS = 0 ]] && exit 0 || exit 1

@ -1,68 +0,0 @@
# docker builder!
build dockerfile repos with docker
# Requirements
## Devstack + Docker Driver
If you want to run this in Vagrant you can use the following canned devstack:
```
git clone https://github.com/rackerlabs/vagrant-solum-dev.git
cd vagrant-solum-dev
DOCKER=true SOLUM=/path/to/code vagrant up devstack
```
# Using Docker Builder
## Prepare Environment
This should prepare your system to use the docker builder. Currently requires sudo access to work with docker. Run it as the same user you installed devstack which has passwordless sudo.
```
/opt/stack/solum/contrib/lp-dockerfile/docker/prepare
```
## Build an Application
The build script takes two positional arguments. The location of the git repo, and the app name. Currently requires sudo access to work with docker. Run it as the same user you installed devstack which has passwordless sudo.
```
$ source ~/devstack/openrc
$ keystone tenant-get $OS_TENANT_NAME
# get the tenant_id and pass that into build-app
/opt/stack/solum/contrib/lp-dockerfile/docker/build-app https://github.com/paulczar/example-nodejs-express.git nodejs $OS_TENANT_NAME
```
This script will upload a docker container with your built application to glance to be run via nova.
## Deploy an Application
Due to a bug with the docker driver the first VM you start will run `sh` instead of the embedded `CMD`. thus we should kick off a dummy build first
```
$ nova image-list
+--------------------------------------+---------------------------------+--------+--------+
| ID | Name | Status | Server |
+--------------------------------------+---------------------------------+--------+--------+
| 0df6ccbc-c43c-4e4d-9caa-3ddf5485b51b | docker-busybox:latest | ACTIVE | |
| 8d558f99-02a9-4b9e-98ac-9bb65009ecda | nodejs:latest | ACTIVE | |
+--------------------------------------+---------------------------------+--------+--------+
$ nova boot --flavor 1 --image docker-busybox:latest dummy
$ nova delete dummy
$ nova boot --flavor 1 --image nodejs:latest nodejs01
$ nova boot --flavor 1 --image nodejs:latest nodejs02
$ nova list
+--------------------------------------+----------+--------+------------+-------------+-----------------------+
| ID | Name | Status | Task State | Power State | Networks |
+--------------------------------------+----------+--------+------------+-------------+-----------------------+
| b4ac0b33-42da-4efa-8580-28b1627199b8 | nodejs01 | ACTIVE | - | Running | private=192.168.78.21 |
| 1697f9fa-9c2c-44b3-b13b-87f7bd3a96c6 | nodejs02 | ACTIVE | - | Running | private=192.168.78.22 |
+--------------------------------------+----------+--------+------------+-------------+-----------------------+
$ curl http://192.168.78.21:5000
Hello World
$ curl http://192.168.78.22:5000
Hello World
```

@ -1,61 +0,0 @@
#!/bin/bash
LOG=${SOLUM_BUILD_LOG:="/opt/stack/logs/solum_build.log"}
# TLOG, PRUN, etc. defined in common/utils
HERE=$(dirname $0)
source $HERE/../../common/utils
PRUN silent docker ps
[[ $? != 0 ]] && TLOG cannot talk to docker. && exit 1
# Check command line arguments
if [ $# -lt 4 ]; then
TLOG Usage: $0 git_url appname project_id base_image [git_private_key]
exit 1
fi
GIT=$1
shift
APP=$1
shift
TENANT=$1
shift
BASE_IMAGE=$1
shift
GIT_PRIVATE_KEY=$1
shift
DOCKER_REGISTRY=${DOCKER_REGISTRY:-'127.0.0.1:5042'}
if [[ -z $OS_USERNAME ]]; then
TLOG OpenStack credentials not passed via ENV.
[[ -f ./openrc ]] && . ./openrc
[[ -f ~/devstack/openrc ]] && . ~/devstack/openrc
fi
APP_DIR=/opt/solum/apps/$TENANT/$APP
PRUN mkdir -p $APP_DIR
add_ssh_creds "$GIT_PRIVATE_KEY" "$APP_DIR"
[[ -d $APP_DIR/build ]] && rm -rf $APP_DIR/build
git_clone_with_retry $GIT $APP_DIR/build
remove_ssh_creds "$GIT_PRIVATE_KEY"
TLOG "===>" Building App
cd $APP_DIR/build
PRUN sudo docker build -t $DOCKER_REGISTRY/$APP .
PRUN sudo docker push $DOCKER_REGISTRY/$APP
sudo docker save "$DOCKER_REGISTRY/$APP" | glance image-create --container-format=docker --disk-format=raw --name "$APP" > /dev/null
image_id=$(glance image-show $APP | grep " id " | cut -d"|" -f3 | tr -d " ")
TLOG created_image_id=$image_id
# Need stdout for solum-worker to parse the image_id
echo created_image_id=$image_id
exit 0

@ -1,11 +0,0 @@
#!/bin/bash
DOCKER_REGISTRY=${DOCKER_REGISTRY:-'127.0.0.1:5042'}
sudo mkdir -p /opt/solum
sudo chown -R ${USER}:${USER} /opt/solum
sudo mkdir -p /var/log/solum
sudo chown -R ${USER}:${USER} /var/log/solum
mkdir -p /opt/solum/apps

@ -1,36 +0,0 @@
==========================
Enabling Solum in DevStack
==========================
1. Install Docker version 1.7.0 using following steps (Solum has been tested with this version of Docker)::
echo deb http://get.docker.com/ubuntu docker main | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt-key adv --keyserver pgp.mit.edu --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
sudo apt-get update
sudo apt-get install lxc-docker-1.7.0
2. Download DevStack::
git clone https://opendev.org/openstack/devstack.git
cd devstack
3. Add this repo as an external repository::
cat > local.conf <<END
[[local|localrc]]
enable_plugin solum https://opendev.org/openstack/solum
END
To use stable branches, make sure devstack is on that branch, and specify
the branch name to enable_plugin, for example::
enable_plugin solum https://opendev.org/openstack/solum stable/mitaka
4. Run ``./stack.sh``.
.. note::
This setup will produce virtual machines, not Docker containers.
For an example of the Docker setup, see::
https://wiki.openstack.org/wiki/Solum/Docker

@ -1,435 +0,0 @@
git|devstack|master[b80e5d7]
os|distro=trusty
os|vendor=Ubuntu
os|release=14.04
git|cinder|master[19f7ef7]
git|dib-utils|master[67fc832]
git|glance|master[70b9301]
git|heat|master[54bd5eb]
git|heat-cfntools|master[c9f938b]
git|heat-templates|master[051822a]
git|horizon|master[2e84d55]
git|keystone|master[bfa13b7]
git|neutron|[6a7ce01]
git|noVNC|master[da82b34]
git|nova|[5d04024]
git|nova-docker|[034a484]
git|os-apply-config|master[69879da]
git|os-collect-config|master[c2bdb4f]
git|os-refresh-config|master[1d828fa]
git|python-solumclient|master[07ad06c]
git|requirements|master[e368208]
git|solum|master[8d39c6b]
git|solum-dashboard|master[d27d97d]
git|swift|master[d819ae0]
git|tempest|master[05640de]
pkg|libkrb5-dev|1.12+dfsg-2ubuntu5.2
pkg|libldap2-dev|2.4.31-1+nmu2ubuntu8.3
pkg|libsasl2-dev|2.1.25.dfsg1-17build1
pkg|memcached|1.4.14-0ubuntu9
pkg|python-mysqldb|1.2.3-2ubuntu1
pkg|sqlite3|3.8.2-1ubuntu2.1
pkg|fping|3.8-1
pkg|conntrack|1:1.4.1-1ubuntu1
pkg|curl|7.35.0-1ubuntu2.8
pkg|dnsmasq-base|2.68-1ubuntu0.1
pkg|dnsmasq-utils|2.68-1ubuntu0.1
pkg|ebtables|2.0.10.4-3ubuntu1
pkg|gawk|1:4.0.1+dfsg-2.1ubuntu2
pkg|genisoimage|9:1.1.11-2ubuntu3
pkg|iptables|1.4.21-1ubuntu1
pkg|iputils-arping|3:20121221-4ubuntu1.1
pkg|kpartx|0.4.9-3ubuntu7.14
pkg|libjs-jquery-tablesorter|8-2
pkg|libmysqlclient-dev|5.5.50-0ubuntu0.14.04.1
pkg|parted|2.3-19ubuntu1.14.04.2
pkg|pm-utils|1.4.1-13ubuntu0.2
pkg|python-mysqldb|1.2.3-2ubuntu1
pkg|socat|1.7.2.3-1
pkg|sqlite3|3.8.2-1ubuntu2.1
pkg|sudo|1.8.9p5-1ubuntu1.2
pkg|vlan|1.9-3ubuntu10
pkg|cryptsetup|2:1.6.1-1ubuntu1
pkg|genisoimage|9:1.1.11-2ubuntu3
pkg|gir1.2-libosinfo-1.0|0.2.9-1
pkg|open-iscsi|2.0.873-3ubuntu9
pkg|qemu-utils|2.0.0+dfsg-2ubuntu1.27
pkg|sg3-utils|1.36-1ubuntu1
pkg|sysfsutils|2.1.0+repack-3ubuntu1
pkg|lvm2|2.02.98-6ubuntu3
pkg|open-iscsi|2.0.873-3ubuntu9
pkg|qemu-utils|2.0.0+dfsg-2ubuntu1.27
pkg|libpcre3-dev|1:8.31-2ubuntu2.3
pkg|dstat|0.7.2-3build1
pkg|acl|2.2.52-1
pkg|dnsmasq-base|2.68-1ubuntu0.1
pkg|ebtables|2.0.10.4-3ubuntu1
pkg|iptables|1.4.21-1ubuntu1
pkg|iputils-arping|3:20121221-4ubuntu1.1
pkg|iputils-ping|3:20121221-4ubuntu1.1
pkg|libmysqlclient-dev|5.5.50-0ubuntu0.14.04.1
pkg|postgresql-server-dev-all|154ubuntu1
pkg|python-mysqldb|1.2.3-2ubuntu1
pkg|sqlite3|3.8.2-1ubuntu2.1
pkg|sudo|1.8.9p5-1ubuntu1.2
pkg|vlan|1.9-3ubuntu10
pkg|ipset|6.20.1-1
pkg|conntrack|1:1.4.1-1ubuntu1
pkg|conntrackd|1:1.4.1-1ubuntu1
pkg|keepalived|1:1.2.7-1ubuntu1
pkg|curl|7.35.0-1ubuntu2.8
pkg|liberasurecode-dev|1.1.0-2~ubuntu14.04.1
pkg|make|3.81-8.2ubuntu3
pkg|memcached|1.4.14-0ubuntu9
pkg|sqlite3|3.8.2-1ubuntu2.1
pkg|xfsprogs|3.1.9ubuntu2
pkg|gettext|0.18.3.1-1ubuntu3
pip|alembic|0.8.7
pip|amqp|1.4.9
pip|anyjson|0.3.3
pip|aodhclient|0.6.0
pip|appdirs|1.4.0
pip|apt-xapian-index|0.45
pip|astroid|1.3.8
pip|automaton|1.3.0
pip|Babel|2.3.4
pip|backports.ssl-match-hostname|3.5.0.1
pip|bandit|1.0.1
pip|bashate|0.5.1
pip|beautifulsoup4|4.5.0
pip|boto|2.42.0
pip|cachetools|1.1.6
pip|castellan|0.4.0
pip|cffi|1.7.0
pip|chardet|2.0.1
pip|Cheetah|2.4.4
pip|https://opendev.org/openstack/cinder.git|19f7ef75a8a5027ab986dd75f3329df90c48e5eb
pip|cliff|2.1.0
pip|cloud-init|0.7.5
pip|cmd2|0.6.8
pip|colorama|0.3.7
pip|configobj|4.7.2
pip|contextlib2|0.5.4
pip|coverage|4.2
pip|croniter|0.3.12
pip|cryptography|1.4
pip|ddt|1.1.0
pip|debtcollector|1.6.0
pip|decorator|4.0.10
pip|Django|1.8.14
pip|django-appconf|1.0.2
pip|django-babel|0.5.1
pip|django-compressor|2.0
pip|django-nose|1.4.4
pip|django-openstack-auth|2.3.0
pip|django-pyscss|2.0.2
pip|dnspython|1.14.0
pip|docker-py|1.7.2
pip|docutils|0.12
pip|dogpile.cache|0.6.1
pip|enum34|1.1.6
pip|eventlet|0.19.0
pip|extras|1.0.0
pip|fasteners|0.14.1
pip|fixtures|3.0.0
pip|flake8|2.2.4
pip|flake8-docstrings|0.2.1.post1
pip|freezegun|0.3.7
pip|funcsigs|1.0.2
pip|functools32|3.2.3.post2
pip|futures|3.0.5
pip|futurist|0.16.0
pip|git-review|1.25.0
pip|gitdb|0.6.4
pip|GitPython|2.0.7
pip|https://opendev.org/openstack/glance.git|70b930136a256413a282aeaf205deb2264f54c1d
pip|glance-store|0.15.0
pip|google-api-python-client|1.5.1
pip|greenlet|0.4.10
pip|hacking|0.10.2
pip|https://opendev.org/openstack/heat.git|54bd5eb1bc279d90b2dcfed7b69c222235d4a5a6
pip|https://opendev.org/openstack/horizon.git|2e84d5591963fdba18faa509484a0646ed79b6a9
pip|html5lib|0.999
pip|httplib2|0.9.2
pip|idna|2.1
pip|ipaddress|1.0.16
pip|iso8601|0.1.11
pip|Jinja2|2.8
pip|jsonpatch|1.14
pip|jsonpointer|1.10
pip|jsonschema|2.5.1
pip|https://opendev.org/openstack/keystone.git|bfa13b7a64011f3f7a6a5b97cd7bd3b69bca9a02
pip|keystoneauth1|2.10.0
pip|keystonemiddleware|4.7.0
pip|kombu|3.0.35
pip|Landscape-Client|14.12
pip|linecache2|1.0.0
pip|lockfile|0.12.2
pip|logilab-astng|0.24.3
pip|logilab-common|1.2.2
pip|logutils|0.3.3
pip|lxml|3.6.1
pip|Mako|1.0.4
pip|MarkupSafe|0.23
pip|mccabe|0.2.1
pip|mock|2.0.0
pip|monotonic|1.1
pip|mox3|0.17.0
pip|msgpack-python|0.4.8
pip|MySQL-python|1.2.3
pip|netaddr|0.7.18
pip|netifaces|0.10.4
pip|networkx|1.11
pip|https://opendev.org/openstack/neutron.git|775893bb7f61c4641acbcb4ae16edf16e0989c39
pip|neutron-lib|0.3.0
pip|nodeenv|0.13.6
pip|nose|1.3.7
pip|nose-exclude|0.4.1
pip|nosehtmloutput|0.0.5
pip|nosexcover|1.0.10
pip|https://opendev.org/openstack/nova.git|859ff4893f699b680fec4db7dedd3bec8c8d0a1c
pip|nova-docker|0.0.1.dev268
pip|numpy|1.11.1
pip|oauth|1.0.1
pip|oauth2client|3.0.0
pip|oauthlib|1.1.2
pip|openstack.nose-plugin|0.11
pip|openstackdocstheme|1.4.0
pip|openstacksdk|0.9.1
pip|os-api-ref|0.3.0
pip|os-brick|1.4.0
pip|os-client-config|1.18.0
pip|os-collect-config|0.1.37
pip|os-testr|0.7.0
pip|os-win|1.1.0
pip|osc-lib|0.4.1
pip|oslo.cache|1.11.0
pip|oslo.concurrency|3.12.0
pip|oslo.config|3.14.0
pip|oslo.context|2.5.0
pip|oslo.db|4.8.0
pip|oslo.i18n|3.8.0
pip|oslo.log|3.12.0
pip|oslo.messaging|5.5.0
pip|oslo.middleware|3.15.0
pip|oslo.policy|1.12.0
pip|oslo.privsep|1.10.0
pip|oslo.reports|1.12.0
pip|oslo.rootwrap|5.0.0
pip|oslo.serialization|2.11.0
pip|oslo.service|1.13.0
pip|oslo.utils|3.16.0
pip|oslo.versionedobjects|1.13.0
pip|oslo.vmware|2.11.0
pip|oslosphinx|4.6.0
pip|oslotest|2.7.0
pip|osprofiler|1.3.0
pip|ovs|2.5.0
pip|PAM|0.4.2
pip|paramiko|2.0.2
pip|passlib|1.6.5
pip|Paste|2.0.3
pip|PasteDeploy|1.5.2
pip|pathlib|1.0.1
pip|pbr|1.10.0
pip|pecan|1.1.2
pip|pep257|0.7.0
pip|pep8|1.5.7
pip|pika|0.10.0
pip|pika-pool|0.1.3
pip|Pint|0.7.2
pip|pluggy|0.3.1
pip|ply|3.8
pip|positional|1.1.1
pip|prettytable|0.7.2
pip|psutil|1.2.1
pip|psycopg2|2.6.2
pip|py|1.4.31
pip|pyasn1|0.1.9
pip|pyasn1-modules|0.0.8
pip|pycadf|2.3.0
pip|pycparser|2.14
pip|pycurl|7.19.3
pip|PyECLib|1.2.0
pip|pyflakes|0.8.1
pip|Pygments|2.1.3
pip|pyinotify|0.9.6
pip|pylint|0.25.2
pip|PyMySQL|0.7.6
pip|pyOpenSSL|16.0.0
pip|pyparsing|2.1.5
pip|pysaml2|4.0.2
pip|pyScss|1.3.4
pip|pysendfile|2.0.1
pip|pyserial|2.6
pip|python-apt=|0.9.3.5ubuntu2
pip|python-barbicanclient|4.0.1
pip|python-ceilometerclient|2.5.0
pip|python-cinderclient|1.8.0
pip|python-dateutil|2.5.3
pip|python-debian=|0.1.21-nmu2ubuntu2
pip|python-designateclient|2.2.0
pip|python-editor|1.0.1
pip|python-glanceclient|2.2.0
pip|python-heatclient|1.3.0
pip|python-ironicclient|1.6.0
pip|python-keystoneclient|3.3.0
pip|python-magnumclient|2.2.0
pip|python-manilaclient|1.10.0
pip|python-memcached|1.58
pip|python-mimeparse|1.5.2
pip|python-mistralclient|2.0.0
pip|python-monascaclient|1.2.0
pip|python-neutronclient|4.2.0
pip|python-novaclient|5.0.0
pip|python-openstackclient|2.6.0
pip|python-saharaclient|0.16.0
pip|python-senlinclient|0.5.0
pip|https://opendev.org/openstack/python-solumclient.git|07ad06c8cc6b3512da69588e5a4a9543dee4d534
pip|python-subunit|1.2.0
pip|python-swiftclient|3.0.0
pip|python-troveclient|2.3.0
pip|python-zaqarclient|1.1.0
pip|pytz|2016.6.1
pip|PyYAML|3.11
pip|qpid-python|0.32.1
pip|rcssmin|1.0.6
pip|reno|1.8.0
pip|repoze.lru|0.6
pip|repoze.who|2.3
pip|requests|2.10.0
pip|requests-mock|1.0.0
pip|requestsexceptions|1.1.3
pip|retrying|1.3.3
pip|rfc3986|0.3.1
pip|rjsmin|1.0.12
pip|Routes|2.3.1
pip|rsa|3.4.2
pip|rtslib-fb|2.1.58
pip|ryu|4.3
pip|selenium|2.53.6
pip|semantic-version|2.5.0
pip|simplegeneric|0.8.1
pip|simplejson|3.8.2
pip|singledispatch|3.4.0.3
pip|six|1.10.0
pip|smmap|0.9.0
pip|https://opendev.org/openstack/solum-dashboard.git|d27d97dfd6b77bd0fc50ff1c279294eea76e6024
pip|Sphinx|1.2.3
pip|sphinxcontrib-httpdomain|1.5.0
pip|sphinxcontrib-pecanwsme|0.8.0
pip|SQLAlchemy|1.0.14
pip|sqlalchemy-migrate|0.10.0
pip|sqlparse|0.2.0
pip|ssh-import-id|3.21
pip|stevedore|1.16.0
pip|suds-jurko|0.6
pip|https://opendev.org/openstack/swift.git|d819ae00a5f589fc7cd11ddf95cfce7323d926e8
pip|taskflow|2.3.0
pip|https://opendev.org/openstack/tempest.git|05640decce8055e6844bdcc5a2c9bb18c1e3b2cb
pip|Tempita|0.5.2
pip|termcolor|1.1.0
pip|testrepository|0.0.20
pip|testresources|2.0.1
pip|testscenarios|0.5.0
pip|testtools|2.2.0
pip|tooz|1.41.0
pip|tox|2.3.1
pip|traceback2|1.4.0
pip|Twisted-Core|13.2.0
pip|Twisted-Names|13.2.0
pip|Twisted-Web|13.2.0
pip|unicodecsv|0.14.1
pip|unittest2|1.1.0
pip|uritemplate|0.6
pip|urllib3|1.16
pip|virtualenv|15.0.2
pip|voluptuous|0.9.1
pip|waitress|0.9.0
pip|warlock|1.2.0
pip|WebOb|1.6.1
pip|websocket-client|0.37.0
pip|websockify|0.8.0
pip|WebTest|2.0.23
pip|wrapt|1.10.8
pip|WSME|0.8.0
pip|xattr|0.8.0
pip|XStatic|1.0.1
pip|XStatic-Angular|1.4.10.1
pip|XStatic-Angular-Bootstrap|0.11.0.8
pip|XStatic-Angular-FileUpload|12.0.4.0
pip|XStatic-Angular-Gettext|2.1.0.2
pip|XStatic-Angular-lrdragndrop|1.0.2.2
pip|XStatic-Bootstrap-Datepicker|1.3.1.0
pip|XStatic-Bootstrap-SCSS|3.3.6.0
pip|XStatic-bootswatch|3.3.6.0
pip|XStatic-D3|3.1.6.2
pip|XStatic-Font-Awesome|4.5.0.0
pip|XStatic-Hogan|2.0.0.2
pip|XStatic-Jasmine|2.4.1.1
pip|XStatic-jQuery|1.10.2.1
pip|XStatic-JQuery-Migrate|1.2.1.1
pip|XStatic-jquery-ui|1.11.0.1
pip|XStatic-JQuery.quicksearch|2.0.3.1
pip|XStatic-JQuery.TableSorter|2.14.5.1
pip|XStatic-JSEncrypt|2.0.0.2
pip|XStatic-mdi|1.4.57.0
pip|XStatic-Rickshaw|1.5.0.0
pip|XStatic-roboto-fontface|0.4.3.2
pip|XStatic-smart-table|1.4.5.3
pip|XStatic-Spin|1.2.5.2
pip|XStatic-term.js|0.0.7.0
pip|xvfbwrapper|0.2.8
pip|yaql|1.1.1
pip|zope.interface|4.2.0
localrc|GIT_BASE=https://opendev.org
localrc|LOGFILE=/opt/stack/logs/stack.sh.log
localrc|DATABASE_PASSWORD=<password>
localrc|RABBIT_PASSWORD=<password>
localrc|SERVICE_TOKEN=password
localrc|SERVICE_PASSWORD=<password>
localrc|ADMIN_PASSWORD=<password>
localrc|NOVNC_FROM_PACKAGE=false
localrc|SOLUM_INSTALL_CEDARISH=False
localrc|SOLUM_INSTALL_DOCKERFILE=False
localrc|ENABLE_IDENTITY_V2=True
localrc|VIRT_DRIVER=docker
localrc|SOLUM_IMAGE_FORMAT=docker
localrc|DEFAULT_IMAGE_NAME=cirros
localrc|IMAGE_URLS=" "
localrc|enable_plugin solum https://opendev.org/openstack/solum
localrc|IP_VERSION=4
localrc|SERVICE_IP_VERSION=4
localrc|enable_service solum-api
localrc|enable_service solum-conductor
localrc|enable_service solum-deployer
localrc|enable_service solum-worker
localrc|disable_service n-net
localrc|enable_service q-svc
localrc|enable_service q-agt
localrc|enable_service q-dhcp
localrc|enable_service q-l3
localrc|enable_service q-meta
localrc|enable_service neutron
localrc|enable_service s-proxy
localrc|enable_service s-object
localrc|enable_service s-container
localrc|enable_service s-account
localrc|enable_service heat
localrc|enable_service h-api
localrc|enable_service h-api-cfn
localrc|enable_service h-api-cw
localrc|enable_service h-eng
localrc|disable_service tempest
vagrant@devstack:~$ docker --version
Docker version 1.9.1, build a34a1d5
wget -qO- https://get.docker.io/gpg | sudo apt-key add -
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"
sudo apt-get update
sudo apt-get install lxc-docker
service docker restart
sudo chmod o=rwx /var/run/docker.sock

@ -1,25 +0,0 @@
Listen %PUBLICPORT%
<VirtualHost *:%PUBLICPORT%>
WSGIDaemonProcess solum-api processes=%API_WORKERS% threads=10 user=%USER% display-name=%{GROUP} %VIRTUALENV%
WSGIProcessGroup solum-api
WSGIScriptAlias / %SOLUM_BIN_DIR%/solum-wsgi-api
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
AllowEncodedSlashes On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/%APACHE_NAME%/solum_api.log
CustomLog /var/log/%APACHE_NAME%/solum_api_access.log combined
<Directory %SOLUM_BIN_DIR%>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>

@ -1,37 +0,0 @@
[[local|localrc]]
LOGFILE=/opt/stack/logs/stack.sh.log
DATABASE_PASSWORD=solum
RABBIT_PASSWORD=solum
SERVICE_TOKEN=solum
SERVICE_PASSWORD=solum
ADMIN_PASSWORD=solum
NOVNC_FROM_PACKAGE=false
SOLUM_INSTALL_CEDARISH=False
SOLUM_INSTALL_DOCKERFILE=False
GIT_BASE=https://opendev.org
# DOCKER_REGISTRY_IMAGE=registry:0.6.9
VIRT_DRIVER=docker
SOLUM_IMAGE_FORMAT=docker
DEFAULT_IMAGE_NAME=cirros
IMAGE_URLS=" "
enable_plugin solum https://opendev.org/openstack/solum
enable_plugin heat https://opendev.org/openstack/heat
enable_plugin zun https://opendev.org/openstack/zun
IP_VERSION=4
SERVICE_IP_VERSION=4
enable_service solum-api
enable_service solum-conductor
enable_service solum-deployer
enable_service solum-worker
enable_service s-proxy
enable_service s-object
enable_service s-container
enable_service s-account

@ -1,468 +0,0 @@
#!/usr/bin/env/ bash
# Plugin file for Solum services
#-------------------------------
# Dependencies:
# ``functions`` file
# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
# ``ADMIN_{TENANT_NAME|PASSWORD}`` must be defined
# ``stack.sh`` calls the entry points in this order:
#
# install_solum
# install_solumclient
# configure_solum
# start_solum
# stop_solum
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set -o xtrace
# Defaults
# --------
GITREPO["solum-dashboard"]=${SOLUMDASHBOARD_REPO:-${GIT_BASE}/openstack/solum-dashboard.git}
GITBRANCH["solum-dashboard"]=${SOLUMDASHBOARD_BRANCH:-master}
GITDIR["solum-dashboard"]=$DEST/solum-dashboard
# Support entry points installation of console scripts
if [[ -d $SOLUM_DIR/bin ]]; then
SOLUM_BIN_DIR=$SOLUM_DIR/bin
else
SOLUM_BIN_DIR=$(get_python_exec_prefix)
fi
# Toggle for deploying Solum-API under HTTPD + mod_wsgi
SOLUM_USE_MOD_WSGI=${SOLUM_USE_MOD_WSGI:-False}
# Toggle for deploying Solum-API under uwsgi
SOLUM_USE_UWSGI=${SOLUM_USE_UWSGI:-True}
SOLUM_UWSGI=$SOLUM_BIN_DIR/solum-wsgi-api
SOLUM_UWSGI_CONF=$SOLUM_CONF_DIR/solum-api-uwsgi.ini
if [[ "$SOLUM_USE_UWSGI" == "True" ]]; then
SOLUM_API_URL="$SOLUM_SERVICE_PROTOCOL://$SOLUM_SERVICE_HOST/application_deployment"
else
SOLUM_API_URL="$SOLUM_SERVICE_PROTOCOL://$SOLUM_SERVICE_HOST:$SOLUM_SERVICE_PORT"
fi
# Functions
# ---------
# create_solum_service_and_endpoint() - Set up required solum service and endpoint
function create_solum_service_and_endpoint() {
SOLUM_UPDATE_ROLE=$(get_or_create_role "solum_assembly_update")
# Give the role to the demo and admin users so they can use git push
# in either of the projects created by devstack
get_or_add_user_project_role $SOLUM_UPDATE_ROLE demo demo
get_or_add_user_project_role $SOLUM_UPDATE_ROLE admin demo
get_or_add_user_project_role $SOLUM_UPDATE_ROLE admin admin
SOLUM_SERVICE=$(get_or_create_service "solum" "application_deployment" "Solum Service")
get_or_create_endpoint "application_deployment" \
"$REGION_NAME" \
"$SOLUM_API_URL" \
"$SOLUM_API_URL" \
"$SOLUM_API_URL"
SOLUM_BUILDER_SERVICE=$(get_or_create_service "solum" "image_builder" "Solum Image Builder")
get_or_create_endpoint "image_builder" \
"$REGION_NAME" \
"$SOLUM_SERVICE_PROTOCOL://$SOLUM_SERVICE_HOST:$SOLUM_BUILDER_SERVICE_PORT" \
"$SOLUM_SERVICE_PROTOCOL://$SOLUM_SERVICE_HOST:$SOLUM_BUILDER_SERVICE_PORT" \
"$SOLUM_SERVICE_PROTOCOL://$SOLUM_SERVICE_HOST:$SOLUM_BUILDER_SERVICE_PORT"
}
# configure_nova_docker - Set config files, create data dirs, etc
function configure_nova_docker {
iniset $NOVA_CONF DEFAULT compute_driver zun.DockerDriver
# CentOS/RedHat distros don't start the services just after the package
# is installed if it is not explicitily set. So the script fails on
# them in this killall because there is nothing to kill.
sudo killall docker || true
# Enable debug level logging
if [ -f "/etc/default/docker" ]; then
sudo cat /etc/default/docker
sudo sed -i 's/^.*DOCKER_OPTS=.*$/DOCKER_OPTS=\"--debug --storage-opt dm.override_udev_sync_check=true\"/' /etc/default/docker
sudo cat /etc/default/docker
fi
if [ -f "/etc/sysconfig/docker" ]; then
sudo cat /etc/sysconfig/docker
sudo sed -i 's/^.*OPTIONS=.*$/OPTIONS=--debug --selinux-enabled/' /etc/sysconfig/docker
sudo cat /etc/sysconfig/docker
fi
if [ -f "/usr/lib/systemd/system/docker.service" ]; then
sudo cat /usr/lib/systemd/system/docker.service
sudo sed -i 's/docker daemon/docker daemon --debug/' /usr/lib/systemd/system/docker.service
sudo cat /usr/lib/systemd/system/docker.service
sudo systemctl daemon-reload
fi
sudo service docker start || true
# setup rootwrap filters
local rootwrap_conf_src_dir="$ZUN_PROJ_DIR/contrib/nova-docker/etc/nova"
sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.d/*.filters /etc/nova/rootwrap.d
cp -r $ZUN_PROJ_DIR/contrib/nova-docker/nova $SOLUM_DIR
setup_develop $SOLUM_DIR
}
# configure_solum() - Set config files, create data dirs, etc
function configure_solum() {
# configure_nova_docker
if [[ ! -d $SOLUM_CONF_DIR ]]; then
sudo mkdir -p $SOLUM_CONF_DIR
fi
sudo chown $STACK_USER $SOLUM_CONF_DIR
# To support private github repos, do not perform host key check for github.com
# Need this change on solum-worker instances
STACK_USER_SSH_DIR=/home/$STACK_USER/.ssh
if [[ ! -d $STACK_USER_SSH_DIR ]]; then
sudo mkdir -p $STACK_USER_SSH_DIR
fi
sudo chown $STACK_USER $STACK_USER_SSH_DIR
echo -e "Host github.com\n\tStrictHostKeyChecking no\n" > $STACK_USER_SSH_DIR/config
# config solum rootwrap
configure_rootwrap solum
# Generate sample config and configure common parameters.
mkdir -p /tmp/solum
pushd $SOLUM_DIR
oslo-config-generator --config-file=${SOLUM_DIR}/etc/solum/config-generator.conf --output-file=/tmp/solum/solum.conf.sample
popd
cp /tmp/solum/solum.conf.sample $SOLUM_CONF_DIR/$SOLUM_CONF_FILE
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE DEFAULT debug $SOLUM_DEBUG
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE DEFAULT use_syslog $SYSLOG
# make trace visible
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s"
# Setup keystone_authtoken section
configure_auth_token_middleware $SOLUM_CONF_DIR/$SOLUM_CONF_FILE $SOLUM_USER $SOLUM_AUTH_CACHE_DIR
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE keystone_authtoken www_authenticate_uri $KEYSTONE_AUTH_URI
# configure the database.
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE database connection `database_connection_url solum`
# configure worker lp operator user/password
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE worker lp_operator_user $SOLUM_USER
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE worker lp_operator_password $SERVICE_PASSWORD
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE worker lp_operator_tenant_name service
# configure the api servers to listen on
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE api host $(ipv6_unquote $SOLUM_SERVICE_HOST)
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE api port $SOLUM_SERVICE_PORT
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE builder host $(ipv6_unquote $SOLUM_SERVICE_HOST)
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE builder port $SOLUM_BUILDER_SERVICE_PORT
# configure assembly handler to create appropriate image format
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE api image_format $SOLUM_IMAGE_FORMAT
# common rpc settings
iniset_rpc_backend solum $SOLUM_CONF_DIR/$SOLUM_CONF_FILE DEFAULT
# service rpc settings
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE conductor topic solum-conductor
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE deployer topic solum-deployer
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE deployer handler heat
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE worker topic solum-worker
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE worker handler shell
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE worker proj_dir $SOLUM_PROJ_DIR
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE deployer max_attempts $SOLUM_MAX_ATTEMPTS
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE deployer wait_interval $SOLUM_WAIT_INTERVAL
iniset $SOLUM_CONF_DIR/$SOLUM_CONF_FILE deployer growth_factor $SOLUM_GROWTH_FACTOR
# configure AllHostsFilter in /etc/nova/nova.conf
iniset $NOVA_CONF_DIR/$NOVA_CONF_FILE DEFAULT scheduler_default_filters AllHostsFilter
if [[ $SOLUM_IMAGE_FORMAT == 'vm' ]]; then
# configure Virtdriver in /etc/nova/nova.conf
iniset $NOVA_CONF_DIR/$NOVA_CONF_FILE DEFAULT compute_driver libvirt.LibvirtDriver
#solum_install_start_docker_registry
solum_install_core_os
else
echo "SOLUM_IMAGE_FORMAT docker or vm"
fi
if [[ "$USE_PYTHON3" = "True" ]]; then
# Switch off glance->swift communication as swift fails under py3.x
iniset /etc/glance/glance-api.conf glance_store default_store file
fi
if [ "$SOLUM_USE_MOD_WSGI" == "True" ]; then
_config_solum_apache_wsgi
fi
if [ "$SOLUM_USE_UWSGI" == "True" ]; then
write_uwsgi_config "$SOLUM_UWSGI_CONF" "$SOLUM_UWSGI" "/application_deployment"
fi
}
#register solum user in Keystone
function add_solum_user() {
create_service_user "solum" "admin"
}
function add_additional_solum_users() {
SOLUM_UPDATE_ROLE=$(get_or_create_role "solum_assembly_update")
for _LETTER in a b c; do
local TENANTNAME=solum_tenant_$_LETTER
get_or_create_project "$TENANTNAME" "default"
local USERNAME=solum_user_$_LETTER
get_or_create_user "$USERNAME" "solum" "default"
get_or_add_user_project_role "$SOLUM_UPDATE_ROLE" "$USERNAME" "$TENANTNAME"
get_or_add_user_project_role "$SOLUM_UPDATE_ROLE" "admin" "$TENANTNAME"
done
}
#create_solum_cache_dir() - Setup keystone signing folder
function create_solum_cache_dir() {
sudo mkdir -p $SOLUM_AUTH_CACHE_DIR
sudo chown $STACK_USER $SOLUM_AUTH_CACHE_DIR
sudo chmod 700 $SOLUM_AUTH_CACHE_DIR
rm -f $SOLUM_AUTH_CACHE_DIR/*
}
# init_solum() - Initialize databases, etc.
function init_solum() {
recreate_database solum utf8
# Run Solum db migrations
solum-db-manage --config-file $SOLUM_CONF_DIR/$SOLUM_CONF_FILE upgrade head
create_solum_cache_dir
# NOTE (devkulkarni): Barbican is causing failures such as below
# http://logs.openstack.org/33/206633/2/check/gate-solum-devstack-dsvm/933cbc3/logs/devstacklog.txt.gz#_2015-08-03_17_13_40_858
# So temorarily commenting out barbican related code below.
# if is_service_enabled barbican; then
# # Fix barbican configuration
# BARBICAN_API_CONF="/etc/barbican/barbican.conf"
# BARBICAN_HOST_HREF=$(iniget $BARBICAN_API_CONF DEFAULT host_href)
# BARBICAN_HOST_HREF=${BARBICAN_HOST_HREF/localhost/$SERVICE_HOST}
# iniset $BARBICAN_API_CONF DEFAULT host_href $BARBICAN_HOST_HREF
# if is_running barbican; then
# # NOTE(ravips): barbican.{pid,failure} is removed to overcome current
# # limitations of stop_barbican. stop_barbican calls screen_stop() only
# # to remove the pid but not to kill the process and this causes pkill
# # in screen_stop to return non-zero exit code which is trapped by
# # devstack/stack.sh
# if [ -f $SERVICE_DIR/$SCREEN_NAME/barbican.pid ]; then
# rm $SERVICE_DIR/$SCREEN_NAME/barbican.pid
# fi
# stop_barbican
# if [ -f $SERVICE_DIR/$SCREEN_NAME/barbican.failure ]; then
# rm $SERVICE_DIR/$SCREEN_NAME/barbican.failure
# fi
# start_barbican
# fi
# fi
}
# install_solumclient() - Collect source and prepare
function install_solumclient {
if use_library_from_git "python-solumclient"; then
git_clone_by_name "python-solumclient"
setup_dev_lib "python-solumclient"
else
pip_install_gr python-solumclient
fi
}
# install_solum() - Collect source and prepare
function install_solum() {
# Install package requirements
install_package expect
git_clone $SOLUM_REPO $SOLUM_DIR $SOLUM_BRANCH
setup_develop $SOLUM_DIR
if [ "$SOLUM_USE_MOD_WSGI" == "True" ]; then
install_apache_wsgi
fi
}
function install_solum_dashboard() {
git_clone_by_name "solum-dashboard"
setup_dev_lib "solum-dashboard"
if [ ! -f $HORIZON_DIR/openstack_dashboard/local/enabled/_50_solum.py ] ; then
ln -s $DEST/solum-dashboard/solumdashboard/local/enabled/_50_solum.py $HORIZON_DIR/openstack_dashboard/local/enabled/_50_solum.py
fi
restart_apache_server
}
function cleanup_solum_dashboard() {
rm $HORIZON_DIR/openstack_dashboard/local/enabled/_50_solum.py
}
# cleanup_solum_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
function cleanup_solum_apache_wsgi {
sudo rm -f $(apache_site_config_for solum-api)
}
# _config_solum_apache_wsgi() - Set WSGI config files of Solum
function _config_solum_apache_wsgi {
local solum_apache_conf=$(apache_site_config_for solum-api)
local solum_api_port=$SOLUM_SERVICE_PORT
local venv_path=""
sudo cp $SOLUM_FILES_DIR/apache-solum-api.template $solum_apache_conf
sudo sed -e "
s|%PUBLICPORT%|$solum_api_port|g;
s|%APACHE_NAME%|$APACHE_NAME|g;
s|%SOLUM_BIN_DIR%|$SOLUM_BIN_DIR|g;
s|%API_WORKERS%|$API_WORKERS|g;
s|%USER%|$STACK_USER|g;
s|%VIRTUALENV%|$venv_path|g
" -i $solum_apache_conf
}
# start_solum() - Start running processes, including screen
function start_solum() {
local enabled_site_file=$(apache_site_config_for solum-api)
if [ -f ${enabled_site_file} ] && [ "$SOLUM_USE_MOD_WSGI" == "True" ]; then
enable_apache_site solum-api
restart_apache_server
tail_log solum-api /var/log/$APACHE_NAME/solum-api.log
elif [ "$SOLUM_USE_UWSGI" == "True" ]; then
run_process solum-api "$(which uwsgi) --ini $SOLUM_UWSGI_CONF"
else
run_process solum-api "$SOLUM_BIN_DIR/solum-api --config-file $SOLUM_CONF_DIR/$SOLUM_CONF_FILE"
fi
run_process solum-conductor "$SOLUM_BIN_DIR/solum-conductor --config-file $SOLUM_CONF_DIR/$SOLUM_CONF_FILE"
run_process solum-deployer "$SOLUM_BIN_DIR/solum-deployer --config-file $SOLUM_CONF_DIR/$SOLUM_CONF_FILE"
run_process solum-worker "$SOLUM_BIN_DIR/solum-worker --config-file $SOLUM_CONF_DIR/$SOLUM_CONF_FILE"
}
# stop_solum() - Stop running processes
function stop_solum() {
# Kill the solum screen windows
if [ "$SOLUM_USE_UWSGI" == "True" ]; then
disable_apache_site solum-api
restart_apache_server
else
stop_process solum-api
fi
stop_process solum-conductor
stop_process solum-deployer
stop_process solum-worker
if [[ $SOLUM_IMAGE_FORMAT == 'vm' ]]; then
solum_stop_docker_registry
fi
}
# install_docker_registry() - Install and Start Docker Registry
# -------------------------------------------------------------
solum_install_start_docker_registry() {
# install dependencies
sudo apt-get update
sudo apt-get -y install build-essential python-dev libevent-dev python-pip liblzma-dev git libssl-dev python-m2crypto swig
# clone docker registry
if [ ! -d /opt/docker-registry ] ; then
sudo git clone https://github.com/dotcloud/docker-registry.git /opt/docker-registry
fi
pushd /opt/docker-registry
sudo pip install -r requirements/main.txt
popd
# install docker registry
pip_command=`which pip`
pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX)
sudo $pip_command install /opt/docker-registry --build=${pip_build_tmp}
# initialize config file
sudo cp /opt/docker-registry/docker_registry/lib/../../config/config_sample.yml /opt/docker-registry/docker_registry/lib/../../config/config.yml
# start docker registry
gunicorn --access-logfile - --debug -k gevent -b 0.0.0.0:5042 -w 1 docker_registry.wsgi:application &
}
solum_stop_docker_registry() {
stop_process docker-registry
rm -rf ${PIP_BUILD_TMP}
}
solum_install_core_os() {
wget https://stable.release.core-os.net/amd64-usr/current/coreos_production_openstack_image.img.bz2
bunzip2 coreos_production_openstack_image.img.bz2
glance image-create --name coreos --container-format bare --disk-format qcow2 --file coreos_production_openstack_image.img
}
# Main dispatcher
#----------------
if is_service_enabled solum-api solum-conductor solum-deployer solum-worker; then
if [[ "$1" == "stack" && "$2" == "install" ]]; then
echo_summary "Installing Solum"
install_solum
install_solumclient
if is_service_enabled horizon; then
echo_summary "Installing Solum Dashboard"
install_solum_dashboard
fi
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
echo_summary "Configuring Solum"
add_solum_user
configure_solum
if is_service_enabled key; then
create_solum_service_and_endpoint
fi
add_additional_solum_users
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
echo_summary "Initializing Solum"
init_solum
start_solum
fi
if [[ "$1" == "unstack" ]]; then
stop_solum
if [ "$SOLUM_USE_MOD_WSGI" == "True" ]; then
cleanup_solum_apache_wsgi
fi
if [[ "$SOLUM_USE_UWSGI" == "True" ]]; then
remove_uwsgi_config "$SOLUM_UWSGI_CONF" "$SOLUM_UWSGI"
fi
if is_service_enabled horizon; then
echo_summary "Cleaning Solum Dashboard up"
cleanup_solum_dashboard
fi
fi
fi
# Restore xtrace
$XTRACE
# Local variables:
# mode: shell-script
# End:

@ -1,65 +0,0 @@
# Set up default repos
#----------------------
SOLUM_REPO=${SOLUM_REPO:-${GIT_BASE}/openstack/solum.git}
SOLUM_BRANCH=${SOLUM_BRANCH:-master}
GITREPO["python-solumclient"]=${SOLUMCLIENT_REPO:-${GIT_BASE}/openstack/python-solumclient.git}
GITBRANCH["python-solumclient"]=${SOLUMCLIENT_BRANCH:-master}
ZUN_REPO=${NOVADOCKER_REPO:-${GIT_BASE}/openstack/zun.git}
ZUN_BRANCH=${ZUN_BRANCH:-master}
# Set up default directories
#----------------------------
SOLUM_DIR=$DEST/solum
GITDIR["python-solumclient"]=$DEST/python-solumclient
SOLUM_CONF_DIR=${SOLUM_CONF_DIR:-/etc/solum}
SOLUM_FILES_DIR=$SOLUM_DIR/devstack/files
NOVA_CONF_DIR=${NOVA_CONF_DIR:-/etc/nova}
SOLUM_CONF_FILE=solum.conf
NOVA_CONF_FILE=nova.conf
ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin}
ADMIN_NAME=${ADMIN_NAME:-admin}
ADMIN_PASSWORD=${ADMIN_PASSWORD:-nova}
SOLUM_DEBUG=${SOLUM_DEBUG:-True}
SOLUM_USER=solum
SOLUM_AUTH_CACHE_DIR=${SOLUM_AUTH_CACHE_DIR:-/var/cache/solum}
SOLUM_INSTALL_CEDARISH=${SOLUM_INSTALL_CEDARISH:-False}
SOLUM_SERVICE_HOST=${SOLUM_SERVICE_HOST:-$SERVICE_HOST}
SOLUM_SERVICE_PORT=${SOLUM_SERVICE_PORT:-9777}
SOLUM_BUILDER_SERVICE_PORT=${SOLUM_BUILDER_SERVICE_PORT:-9778}
SOLUM_SERVICE_PROTOCOL=${SOLUM_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
SOLUM_IMAGE_FORMAT=${SOLUM_IMAGE_FORMAT:-'docker'}
SOLUM_MAX_ATTEMPTS=${SOLUM_MAX_ATTEMPTS:-2000}
SOLUM_WAIT_INTERVAL=${SOLUM_WAIT_INTERVAL:-1}
SOLUM_GROWTH_FACTOR=${SOLUM_GROWTH_FACTOR:-1.1}
SOLUM_PROJ_DIR=${SOLUM_PROJ_DIR:-/opt/stack/solum}
ZUN_PROJ_DIR=${NOVADOCKER_PROJ_DIR:-$DEST/zun}
SOLUM_INSTALL_DOCKER_REGISTRY=${SOLUM_INSTALL_DOCKER_REGISTRY:-False}
SOLUM_DEPLOYMENT_MECHANISM=${SOLUM_DEPLOYMENT_MECHANISM:-'docker_on_nova'}
DEVSTACK_DIR=${HOME}/devstack
# Devstack/local.conf settings
#-----------------------------
LOGFILE=/opt/stack/logs/stack.sh.log
GIT_BASE=https://opendev.org
SOLUM_IMAGE_FORMAT=docker
ZUN_DRIVER=docker
# Enable solum services
enable_service solum-api
enable_service solum-conductor
enable_service solum-deployer
enable_service solum-worker
# Enable swift services
enable_service s-proxy
enable_service s-object
enable_service s-container
enable_service s-account

@ -1,157 +0,0 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
html: check-dependencies
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
.PHONY: check-dependencies
check-dependencies:
@python -c 'import sphinxcontrib.autohttp.flask' >/dev/null 2>&1 || (echo "ERROR: Missing Sphinx dependencies. Run: pip install sphinxcontrib-httpdomain" && exit 1)
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/solum.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/solum.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/solum"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/solum"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."

@ -1,9 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
sphinx>=2.0.0,!=2.1.0 # BSD
openstackdocstheme>=2.2.1 # Apache-2.0
sphinxcontrib-pecanwsme>=0.10.0 # Apache-2.0
sphinxcontrib-httpdomain>=1.6.1 # BSD
reno>=3.1.0 # Apache-2.0

Binary file not shown.

Before

(image error) Size: 1.4 KiB

@ -1,8 +0,0 @@
==============================
Develop applications for Solum
==============================
.. toctree::
:maxdepth: 3
webapi/index

@ -1,9 +0,0 @@
======================
API Complete Reference
======================
.. toctree::
:maxdepth: 2
version
v1

@ -1,145 +0,0 @@
======
V1 API
======
.. autotype:: solum.api.controllers.common_types.Link
:members:
Platform
--------
.. autotype:: solum.api.controllers.v1.root.Platform
:members:
Plans
-----
.. rest-controller:: solum.api.controllers.v1.plan:PlansController
:webprefix: /v1/plans
.. rest-controller:: solum.api.controllers.v1.plan:PlanController
:webprefix: /v1/plans
.. autotype:: solum.api.controllers.v1.datamodel.plan.Plan
:members:
Pipelines
---------
.. rest-controller:: solum.api.controllers.v1.pipeline:PipelinesController
:webprefix: /v1/pipelines
.. rest-controller:: solum.api.controllers.v1.pipeline:PipelineController
:webprefix: /v1/pipelines
.. autotype:: solum.api.controllers.v1.datamodel.pipeline.Pipeline
:members:
Executions
----------
.. rest-controller:: solum.api.controllers.v1.execution:ExecutionsController
:webprefix: /v1/pipelines/(pipeline_id)/executions
.. autotype:: solum.api.controllers.v1.datamodel.execution.Execution
:members:
Assemblies
----------
.. rest-controller:: solum.api.controllers.v1.assembly:AssembliesController
:webprefix: /v1/assemblies
.. rest-controller:: solum.api.controllers.v1.assembly:AssemblyController
:webprefix: /v1/assemblies
.. autotype:: solum.api.controllers.v1.datamodel.assembly.Assembly
:members:
Services
--------
.. rest-controller:: solum.api.controllers.v1.service:ServicesController
:webprefix: /v1/services
.. rest-controller:: solum.api.controllers.v1.service:ServiceController
:webprefix: /v1/services
.. autotype:: solum.api.controllers.v1.datamodel.service.Service
:members:
Operations
----------
.. rest-controller:: solum.api.controllers.v1.operation:OperationsController
:webprefix: /v1/operations
.. rest-controller:: solum.api.controllers.v1.operation:OperationController
:webprefix: /v1/operations
.. autotype:: solum.api.controllers.v1.datamodel.operation.Operation
:members:
Sensors
-------
.. rest-controller:: solum.api.controllers.v1.sensor:SensorsController
:webprefix: /v1/sensors
.. rest-controller:: solum.api.controllers.v1.sensor:SensorController
:webprefix: /v1/sensors
.. autotype:: solum.api.controllers.v1.datamodel.sensor.Sensor
:members:
Components
----------
.. rest-controller:: solum.api.controllers.v1.component:ComponentsController
:webprefix: /v1/components
.. rest-controller:: solum.api.controllers.v1.component:ComponentController
:webprefix: /v1/components
.. autotype:: solum.api.controllers.v1.datamodel.component.Component
:members:
Extensions
----------
.. rest-controller:: solum.api.controllers.v1.extension:ExtensionsController
:webprefix: /v1/extensions
.. rest-controller:: solum.api.controllers.v1.extension:ExtensionController
:webprefix: /v1/extensions
.. autotype:: solum.api.controllers.v1.datamodel.extension.Extension
:members:
LanguagePacks
-------------
.. rest-controller:: solum.api.controllers.v1.language_pack:LanguagePacksController
:webprefix: /v1/language_packs
.. rest-controller:: solum.api.controllers.v1.language_pack:LanguagePackController
:webprefix: /v1/language_packs
.. autotype:: solum.api.controllers.v1.datamodel.language_pack.LanguagePack
:members:
Infrastructure
--------------
.. rest-controller:: solum.api.controllers.v1.infrastructure:InfrastructureController
:webprefix: /v1/infrastructure
.. autotype:: solum.api.controllers.v1.datamodel.infrastructure.Infrastructure
:members:
Triggers
--------
.. rest-controller:: solum.api.controllers.v1.trigger:TriggerController
:webprefix: /v1/triggers

@ -1,6 +0,0 @@
=================
Version discovery
=================
.. autotype:: solum.api.controllers.root.Version
:members:

@ -1,7 +0,0 @@
CLI Reference
=============
.. toctree::
:maxdepth: 1
solum-status

@ -1,78 +0,0 @@
============
solum-status
============
Synopsis
========
::
solum-status <category> <command> [<args>]
Description
===========
:program:`solum-status` is a tool that provides routines for checking the
status of a Solum deployment.
Options
=======
The standard pattern for executing a :program:`solum-status` command is::
solum-status <category> <command> [<args>]
Run without arguments to see a list of available command categories::
solum-status
Categories are:
* ``upgrade``
Detailed descriptions are below.
You can also run with a category argument such as ``upgrade`` to see a list of
all commands in that category::
solum-status upgrade
These sections describe the available categories and arguments for
:program:`solum-status`.
Upgrade
~~~~~~~
.. _solum-status-checks:
``solum-status upgrade check``
Performs a release-specific readiness check before restarting services with
new code. This command expects to have complete configuration and access
to databases and services.
**Return Codes**
.. list-table::
:widths: 20 80
:header-rows: 1
* - Return code
- Description
* - 0
- All upgrade readiness checks passed successfully and there is nothing
to do.
* - 1
- At least one check encountered an issue and requires further
investigation. This is considered a warning but the upgrade may be OK.
* - 2
- There was an upgrade status check failure that needs to be
investigated. This should be considered something that stops an
upgrade.
* - 255
- An unexpected error occurred.
**History of Checks**
**5.8.0 (Stein)**
* Placeholder to be filled in with checks as they are added in Stein.

@ -1,112 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.pecanwsme.rest',
'sphinxcontrib.httpdomain',
'wsmeext.sphinxext',
'openstackdocstheme',
]
wsme_protocols = ['restjson', 'restxml']
suppress_warnings = ['app.add_directive']
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'solum'
copyright = u'2014, Solum Contributors'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
html_theme = 'openstackdocs'
html_static_path = ['_static']
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/solum'
openstackdocs_pdf_link = True
openstackdocs_auto_name = False
openstackdocs_bug_project = 'solum'
openstackdocs_bug_tag = ''
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('man/solum-db-manage', 'solum-db-manage',
u'Script which helps manage specific database operations',
[u'Solum Developers'], 1),
]
# If true, show URL addresses after external links.
man_show_urls = True
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'doc-solum.tex', u'Solum Documentation',
u'OpenStack Foundation', 'manual'),
]
latex_domain_indices = False
latex_elements = {
'makeindex': '',
'printindex': '',
'preamble': r'\setcounter{tocdepth}{3}',
'maxlistdepth': '10',
}
# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664
latex_use_xindy = False
# Disable smartquotes, they don't work in latex
smartquotes_excludes = {'builders': ['latex']}

@ -1,70 +0,0 @@
=======================
Configure and run Solum
=======================
Configuration Reference
-----------------------
To alter the default compute flavor edit /etc/solum/templates/\*.yaml
::
flavor:
type: string
description: Flavor to use for servers
default: m1.tiny
Edit the default section to the desired value.
Administrator Guide
-------------------
.. toctree::
:maxdepth: 2
../man/index
High Availability Guide
-----------------------
Operations Guide
----------------
Solum has been successfully running in production environments with the following example architecture:
.. image:: solum_architecture.png
Solum application deployment follows this flow:
* Load Balancer listening on HTTPS port
* Traffic travels across private net to 2+ nodes to Nginx listening on port 443
* Nginx tears down SSL and redirects traffic over loopback to port 9777 to Solum API service
* Solum API Service authenticates with Keystone service (open up outbound traffic to only keystone service from Solum API)
* To retrieve Solum applications, API service would send messages to Conductor service, which communicates over service net to Trove to retrieve data
* During app deployment, Solum API service sends a queue message to Rabbit MQ service [1] (should be multi-node over private net)
* Solum Worker service picks up a queue message from Rabbit MQ [2] and pulls down a git repository, builds it, runs unit tests (if specified), builds a docker container, and uploads it to Swift
* This is a fairly lengthy process and completely blocks this service. You should scale out your infrastructure to easily accommodate your traffic. A performance test based on your expected load can give you a good idea of how many nodes and how many worker services per node you need.
* Solum Worker persists application state to Trove via Conductor service
* Upon completion, worker service sends a message to Rabbit MQ [3]
* Solum Deployer service picks up the message from Rabbit MQ [4] and calls Heat to deploy a heat stack with user's information and newly created docker container
* Deployer service also blocks on this call so your infrastructure should scale out to support your user load
* Deployer service persists application state to Trove via Conductor service
Solum deployment infrastructure is dependent on existence of the following OpenStack services:
* Nova
* Keystone
* Trove
* Swift
* Glance
* Heat
To assist with deploying a new Solum architecture, please refer to the following cookbooks to get started:
* https://github.com/rackerlabs/cookbook-openstack-paas
* https://github.com/openstack/cookbook-openstack-identity.git
* https://github.com/openstack/cookbook-openstack-common.git
Security Guide
--------------

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

(image error) Size: 98 KiB

@ -1 +0,0 @@
.. include:: ../../../CONTRIBUTING.rst

@ -1,25 +0,0 @@
Welcome to Solum's documentation!
=================================
Contents:
.. toctree::
:maxdepth: 2
user/index
install/index
configuration/index
admin/index
contributor/index
man/index
cli/index
.. only:: html
Indices and tables
==================
* :ref:`genindex`
* :ref:`search`

@ -1,17 +0,0 @@
=============
Install Solum
=============
Distro specific installation
----------------------------
TODO add docs here on how to install on different distros like:
- debian
- redhat
- suse
- ubuntu
For a development installation use devstack
-------------------------------------------
.. include:: ../../../devstack/README.rst

@ -1,13 +0,0 @@
====================================
Man pages for services and utilities
====================================
---------------
Solum utilities
---------------
.. toctree::
:maxdepth: 2
solum-db-manage

@ -1,100 +0,0 @@
===============
solum-db-manage
===============
.. program:: solum-db-manage
SYNOPSIS
========
``solum-db-manage <action> [options]``
DESCRIPTION
===========
solum-db-manage helps manage solum specific database operations.
The migrations in the "alembic_migrations/versions/" directory contain
the changes needed to migrate from older Solum releases to newer
versions. A migration occurs by executing a script that details the
changes needed to upgrade/downgrade the database. The migration
scripts are ordered so that multiple scripts can run sequentially to
update the database. The scripts are executed by Solum's migration wrapper
which uses the Alembic library to manage the migration.
OPTIONS
=======
The standard pattern for executing a solum-db-manage command is:
``solum-db-manage <command> [<args>]``
Run with -h to see a list of available commands:
``solum-db-manage -h``
Commands are:
* version
* upgrade
* downgrade
* stamp
* revision
Detailed descriptions are below.
Upgrading/Downgrading
~~~~~~~~~~~~~~~~~~~~~
If you are a deployer or developer and want to migrate from Icehouse to Juno
or later you must first add version tracking to the database:
``solum-db-manage stamp icehouse``
You can then upgrade to the latest database version via:
``solum-db-manage upgrade head``
To check the current database version:
``solum-db-manage version``
Downgrade the database to a specific revision:
``solum-db-manage downgrade 594288b1585a``
Generating migration templates (developers only)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A database migration script is required when you submit a change to Solum
that alters the database model definition. The migration script is a special
python file that includes code to update/downgrade the database to match the
changes in the model definition. Alembic will execute these scripts in order to
provide a linear migration path between revision. The solum-db-manage command
can be used to generate migration template for you to complete. The operations
in the template are those supported by the Alembic migration library.
``solum-db-manage revision -m "description of revision" --autogenerate``
This generates a prepopulated template with the changes needed to match the
database state with the models. You should inspect the autogenerated template
to ensure that the proper models have been altered.
In rare circumstances, you may want to start with an empty migration template
and manually author the changes necessary for an upgrade/downgrade. You can
create a blank file via:
``solum-db-manage revision -m "description of revision"``
FILES
=====
The /etc/solum/solum.conf file contains global options which can be
used to configure some aspects of solum-db-manage, for example the DB
connection and logging.
BUGS
====
Solum issues are tracked in Launchpad so you can view or report bugs here:
`OpenStack Solum Bugs <https://bugs.launchpad.net/solum>`__

@ -1,374 +0,0 @@
=======================
Solum Quick Start Guide
=======================
Setup Solum development environment
------------------------------------
https://wiki.openstack.org/wiki/Solum/solum-development-setup
The following is a guide to deploying an app with Solum.
Overview
--------
::
$ solum languagepack create <NAME> <GIT_REPO>
$ solum languagepack show <UUID/Name>
$ solum languagepack logs <UUID>
$ solum languagepack list
$ solum app create --app-file <app_file> [--param-file param_file]
$ solum app show <UUID/Name>
$ curl <application_uri>
In this document we will work with a python example to demonstrate how you can use solum to deploy an application.
Create a languagepack
---------------------
Before deploying an app on Solum, we need to create a run time environment, called languagepack, for the application.
A languagepack must exist in Solum, as every application deployed with Solum requires an association to a languagepack to run (even
if the languagepack only implements a no-op). Languagepacks can be added to Solum in the following ways:
1. Solum comes with pre-existing languagepacks
2. Solum System Operator creates and adds languagepack(s) available for all users
3. Solum User creates and adds languagepack(s) available only to that user
To learn more, see the languagepacks section of this document.
1. Authenticate to Keystone.
The easiest way is to use the credentials supplied by Devstack.
::
$ . ~/devstack/openrc
2. Create languagepack
::
$ solum languagepack create python https://github.com/rackspace-solum-samples/solum-languagepack-python.git
+-------------+--------------------------------------------------------------------------+
| Property | Value |
+-------------+--------------------------------------------------------------------------+
| status | QUEUED |
| source_uri | https://github.com/rackspace-solum-samples/solum-languagepack-python.git |
| description | None |
| uuid | 0233f461-5fb0-4de7-8f06-5527721c3e97 |
| name | python |
+-------------+--------------------------------------------------------------------------+
Solum takes a few minutes to build your languagepack. You can check the state by using the languagepack show command.
A languagepack is ready for use once the state changes to 'READY'.
::
$ solum languagepack show python
+-------------+--------------------------------------------------------------------------+
| Property | Value |
+-------------+--------------------------------------------------------------------------+
| status | READY |
| source_uri | https://github.com/rackspace-solum-samples/solum-languagepack-python.git |
| description | None |
| uuid | 0233f461-5fb0-4de7-8f06-5527721c3e97 |
| name | python |
+-------------+--------------------------------------------------------------------------+
You can check logs that were generated while building the languagepack with the following command.
This is a great way to debug your languagepack if it fails to build.
::
$ solum languagepack logs python
+--------------------------------------+---------------------+-----------------------------------------------------------------------------+
| resource_uuid | created_at | local_storage |
+--------------------------------------+---------------------+-----------------------------------------------------------------------------+
| 0233f461-5fb0-4de7-8f06-5527721c3e97 | 2016-04-07 13:33:35 | /var/log/solum/worker/languagepack-2a8cd98e-8b37-4ec7-b17b-f511814a7d6f.log |
+--------------------------------------+---------------------+-----------------------------------------------------------------------------+
You can find all available languagepacks with the following command
::
$ solum languagepack list
+--------------------------------------+--------+-------------+--------+--------------------------------------------------------------------------+
| uuid | name | description | status | source_uri |
+--------------------------------------+--------+-------------+--------+--------------------------------------------------------------------------+
| 95310b74-b3ed-4150-b0bf-e64c21359900 | java | None | READY | https://github.com/rackspace-solum-samples/solum-languagepack-java.git |
| 96f889e7-e8db-4ae3-a38d-0bfda8268e30 | python | None | READY | https://github.com/rackspace-solum-samples/solum-languagepack-python.git |
+--------------------------------------+--------+-------------+--------+--------------------------------------------------------------------------+
Create your app
---------------
Solum clones code from the user's public Git repository or user's public/private GitHub repository. Before you begin, push your code to a Git repo. From within your devstack host, you can now run solum commands to build and deploy your application.
2. To register an app with Solum, you will need to write an appfile to describe it.
The following appfile deploys a sample python application.
You can find other examples in the :code:`examples/apps/` folder of the solum repo on github.
To learn more, see the appfile section of this document.
::
version: 1
name: cherrypy
description: python web app
languagepack: python
source:
repository: https://github.com/rackspace-solum-samples/solum-python-sample-app.git
revision: master
workflow_config:
test_cmd: ./unit_tests.sh
run_cmd: python app.py
trigger_actions:
- unittest
- build
- deploy
ports:
- 80
The app is named :code:`cherrypy`, and it describes a single application, running the code from the given Github repo.
The code in that repo is a Python app that listens for HTTP requests and returns environment variables supplied by the user during app creation.
We have configured this example to listen on port 80.
Deploy your app
---------------
3. Create an app by supplying the appfile. This registers your app with Solum.
For demonstration purposes, we will use the provided example.
::
$ solum app create --app-file appfile.yaml --param-file params.yaml
+-------------+---------------------------------------------------------------------+
| Property | Value |
+-------------+---------------------------------------------------------------------+
| description | Sample Python web app. |
| uri | http://10.0.2.15:9777/v1/plans/4a795b99-936d-4330-be4d-d2099b160075 |
| name | cherrypy |
| trigger_uri | |
| uuid | 4a795b99-936d-4330-be4d-d2099b160075 |
+-------------+---------------------------------------------------------------------+
The :code:`uri` field above refers to the newly-registered app.
At this point, your app is not deployed yet.
Your app is now ready to be deployed using the uuid from above to deploy your app.
4. Deploy app
::
$ solum app deploy 4a795b99-936d-4330-be4d-d2099b160075
+------------+---------------------------------------------------------------------+
| Property | Value |
+------------+---------------------------------------------------------------------+
| wf_id | 1 |
| created_at | 2016-04-07T13:36:45.497519 |
| app_id | 7d64347c-93d6-4adf-bf70-309f9d53c034 |
| actions | [u'unittest', u'build', u'deploy'] |
| updated_at | 2016-04-07T13:36:45.497519 |
| source | {u'repository': u'https://github.com/rackspace-solum-samples/solum- |
| | python-sample-app.git', u'revision': u'master'} |
| config | {u'run_cmd': u'python app.py', u'test_cmd': u'./unit_tests.sh'} |
| id | 97e7e2c1-8ba1-4320-9831-b5baef1d480d |
+------------+---------------------------------------------------------------------+
Solum builds a Docker image by layering your app's code on top of the related languagepack's docker image.
Then, Solum creates a stack via Heat to deploy your app.
At this point, Solum is done, and in a matter of minutes your app will be deployed.
5. You can monitor the progress of your app as it builds and deploys.
The status field will show the progress of your app through the process.
::
$ solum app show 4a795b99-936d-4330-be4d-d2099b160075
+-----------------+------------------------------------------------------------------------+
| Property | Value |
+-----------------+------------------------------------------------------------------------+
| status | BUILDING |
| description | Sample Python web app. |
| application_uri | None |
| created_at | 2015-03-10T22:47:04 |
| updated_at | 2015-03-10T22:49:59 |
| name | cherrypy |
| trigger_uri | http://10.0.2.15:9777/v1/triggers/b6eb26e5-3b7b-416b-b932-302c514071cc |
| uuid | 185f2741-61e0-497e-b2b7-c890c7e151dd |
+-----------------+------------------------------------------------------------------------+
6. Run the :code:`solum app show` command a few times to see the status change. You will notice the :code:`status` field changes to DEPLOYMENT_COMPLETE and the :code:`application_uri` is available.
::
$ solum app show cherrypy
+------------------+---------------------------------------------------------------------+
| Property | Value |
+------------------+---------------------------------------------------------------------+
| app_url | 172.24.4.3:80 |
| entry_points | |
| description | python web app |
| created_at | 2016-04-07T13:36:32 |
| languagepack | python |
| target_instances | 1 |
| ports | [80] |
| source | {u'repository': u'https://github.com/rackspace-solum-samples/solum- |
| | python-sample-app.git', u'revision': u'master'} |
| trigger | [u'unittest', u'build', u'deploy'] |
| trigger_uuid | b85bdf42-d126-4223-9a64-8c10930447e3 |
| id | 4a795b99-936d-4330-be4d-d2099b160075 |
| name | cherrypy |
+------------------+---------------------------------------------------------------------+
'cherrypy' workflows and their status:
+-------+--------------------------------------+----------------------+
| wf_id | id | status |
+-------+--------------------------------------+----------------------+
| 1 | 97e7e2c1-8ba1-4320-9831-b5baef1d480d | DEPLOYMENT_COMPLETE |
+-------+--------------------------------------+----------------------+
Connect to Your App
-------------------
7. Connect to your app using the value in the :code:`app_url` field.
::
$ curl <your_application_uri_here>
Update Your App
---------------
You can set up your Git repository to fire an on_commit action to make a webhook call to Solum each time you make a commit. The webhook call sends a POST request to http://10.0.2.15:9777/v1/triggers/<trigger_id> causing Solum to automatically build a new image and re-deploy your application.
To do this with a GitHub repo, go to your repo on the web, click on Settings, and then select "Webhooks & Services" form the left navigation menu. In the Webhooks section, click "Add Webhook", and enter your GitHub account password when prompted. Copy and paste the value of trigger_uri from your "solum app show" command into the "Payload URL" filed. Note that this will only work if you have a public IP address or hostname in the trigger_uri field. Select the "application/vnd.github.v3+json" Payload version, determine if you only want to trigger this webhook on "git push" or if you want it for other events too by using the radio buttons and Checkboxes provided. Finish by clicking "Add Webhook". Now next time that event is triggered on GitHub, Solum will automatically check out your change, build it, and deploy it for you.
Languagepacks
-------------
Languagepacks define the runtime environment required by your application.
To build a languagepack, solum requires a git repo containing a Dockerfile. Solum creates a Docker and stores the image for use when building and deploying your application.
See the sample languagepack repo below
::
$ https://github.com/rackspace-solum-samples/solum-languagepack-python
Here are some best practices to keep in mind while creating a languagepack
1. A good languagepack is reusable across application
2. All Operating system level libraries should be defined in the languagepack
3. Test tools should be installed in the languagepack
4. Includes a mandatory build.sh script, which Solum CI expects and executes during the build phase
appfile
--------
An appfile is used to define your application and passed in during application creation.
::
$ solum app create --app-file appfile.yaml --param-file params.yaml
In the above command, we use the --app-file flag to provide
::
version: 1
name: cherrypy
description: python web app
languagepack: python
source:
repository: https://github.com/rackspace-solum-samples/solum-python-sample-app.git
revision: master
workflow_config:
test_cmd: ./unit_tests.sh
run_cmd: python app.py
trigger_actions:
- test
- build
- deploy
ports:
- 80
The appfile is used to define the following
1. The git repo where your code exists
2. The languagepack to use
3. A name for your application
4. A command that executes your unittests. This command is executed during the unit test phase of the Solum CI workflow.
5. The port which is exposed publicly for accessing your application.
6. A command that executes your command.
App configuration and environment variables
-------------------------------------------
Applications deployed using Solum can be configured using environment variables. Provide a parameter file during application creation to inject environment variables
::
$ solum app create --app-file appfile.yaml --param-file params.yaml
In the example above, we pass in the parameter file (shown in the table below) using the --param-file flag.
The parameter file contains key value pairs which are injected into the application run time environment.
::
key: secret_key
user: user_name_goes_here
password: password_for_demo
Set up a Development Environment
--------------------------------
These instructions are for those who want to contribute to Solum, or use features that are not yet in the latest release.
1. Clone the Solum repo.
Solum repository is available on the OpenStack Git server.
::
$ mkdir ~/Solum
$ cd Solum
$ git clone https://opendev.org/openstack/solum.git
In addition to Solum, your environment will also need Devstack to configure and run the requisite OpenStack components, including Keystone, Glance, Nova, Neutron, and Heat.
Vagrant Dev Environment
------------------------
2. We have provided a Vagrant environment to deploy Solum and its required OpenStack components via Devstack. We recommend using this approach if you are planning to contribute to Solum. This takes about the same amount of time as setting up Devstack manually, but it automates the setup for you.
By default, it uses Virtualbox as its provisioner. We have tested this with Vagrant 1.5.4.
The environment will need to know where your Solum code is, via the environment variable :code:`SOLUM`.
::
$ cd ~/Solum
$ export SOLUM=~/Solum/solum
$ git clone https://github.com/rackerlabs/vagrant-solum-dev.git vagrant
$ cd vagrant
3. Bring up the devstack vagrant environment.
This may take a while. Allow about an hour, more or less depending on your machine speed and its connection to the internet.
::
$ vagrant up --provision devstack
$ vagrant ssh devstack
Devstack
---------
Using Vagrant is not a requirement for deploying Solum.
You may instead opt to install Solum and Devstack yourself.
The details of integrating Solum with Devstack can be found in :code:`devstack/README.rst`.

@ -1,40 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is an example Apache2 configuration file for using the
# solum API through mod_wsgi.
Listen 9777
<VirtualHost *:9777>
WSGIDaemonProcess solum-api processes=2 threads=10 user=SOMEUSER display-name=%{GROUP}
WSGIProcessGroup solum-api
WSGIScriptAlias / /usr/local/bin/solum-wsgi-api
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
AllowEncodedSlashes On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/apache2/solum_api.log
CustomLog /var/log/apache2/solum_api_access.log combined
<Directory /usr/local/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>

@ -1,4 +0,0 @@
To generate the sample solum.conf file, run the following
command from the top level of the solum directory:
tox -egenconfig

@ -1,8 +0,0 @@
{
"uri": "artifact",
"name": "artifact",
"type": "attribute_definition",
"description": "CAMP 1.1 standard artifact attribute definition",
"documentation": "http://docs.oasis-open.org/camp/camp-spec/v1.1/camp-spec-v1.1.pdf",
"attribute_type": "URI"
}

@ -1,8 +0,0 @@
{
"uri": "artifacts",
"name": "artifacts",
"type": "attribute_definition",
"description": "CAMP 1.1 standard artifacts attribute definition",
"documentation": "http://docs.oasis-open.org/camp/camp-spec/v1.1/camp-spec-v1.1.pdf",
"attribute_type": "ArtifactSpecification[]"
}

@ -1,8 +0,0 @@
{
"uri": "assemblies",
"name": "assemblies",
"type": "attribute_definition",
"description": "CAMP 1.1 standard assemblies attribute definition",
"documentation": "http://docs.oasis-open.org/camp/camp-spec/v1.1/camp-spec-v1.1.pdf",
"attribute_type": "Link[]"
}

@ -1,8 +0,0 @@
{
"uri": "assemblies_uri",
"name": "assemblies_uri",
"type": "attribute_definition",
"description": "CAMP 1.1 standard assemblies_uri attribute definition",
"documentation": "http://docs.oasis-open.org/camp/camp-spec/v1.1/camp-spec-v1.1.pdf",
"attribute_type": "URI"
}

@ -1,8 +0,0 @@
{
"uri": "assembly_links",
"name": "assembly_links",
"type": "attribute_definition",
"description": "CAMP 1.1 standard assembly_links attribute definition",
"documentation": "http://docs.oasis-open.org/camp/camp-spec/v1.1/camp-spec-v1.1.pdf",
"attribute_type": "Link[]"
}

@ -1,8 +0,0 @@
{
"uri": "attribute_definition_links",
"name": "attribute_definition_links",
"type": "attribute_definition",
"description": "CAMP 1.1 standard attribute_definition_links attribute definition",
"documentation": "http://docs.oasis-open.org/camp/camp-spec/v1.1/camp-spec-v1.1.pdf",
"attribute_type": "AttributeLink[]"
}

@ -1,8 +0,0 @@
{
"uri": "attribute_type",
"name": "attribute_type",
"type": "attribute_definition",
"description": "CAMP 1.1 standard attribute_type attribute definition",
"documentation": "http://docs.oasis-open.org/camp/camp-spec/v1.1/camp-spec-v1.1.pdf",
"attribute_type": "String"
}

@ -1,8 +0,0 @@
{
"uri": "auth_scheme",
"name": "auth_scheme",
"type": "attribute_definition",
"description": "CAMP 1.1 standard auth_scheme attribute definition",
"documentation": "http://docs.oasis-open.org/camp/camp-spec/v1.1/camp-spec-v1.1.pdf",
"attribute_type": "String"
}

@ -1,8 +0,0 @@
{
"uri": "backward_compatible_implementation_versions",
"name": "backward_compatible_implementation_versions",
"type": "attribute_definition",
"description": "CAMP 1.1 standard backward_compatible_implementation_versions attribute definition",
"documentation": "http://docs.oasis-open.org/camp/camp-spec/v1.1/camp-spec-v1.1.pdf",
"attribute_type": "String[]"
}

@ -1,8 +0,0 @@
{
"uri": "backward_compatible_specification_versions",
"name": "backward_compatible_specification_versions",
"type": "attribute_definition",
"description": "CAMP 1.1 standard backward_compatible_specification_versions attribute definition",
"documentation": "http://docs.oasis-open.org/camp/camp-spec/v1.1/camp-spec-v1.1.pdf",
"attribute_type": "String[]"
}

Some files were not shown because too many files have changed in this diff Show More