moving to devstack first phase

This patch gets rid from all ansible code
as well as the container vs host installations
and does all install in the devstack way.

Changes summary:
1. Add devstack/plugin.sh
2. modify s2aio.sh to:
  - Get rid of all docker / host installation code
  - Get rid of all ansible related code mainly stuff
    that make sure root and user can ssh without a
    password - ansible requirements
3. modify tox.ini and .gitignore accordingly
4. update docs accordingly

Once phase 1 is completed the next phase needs to:
1. Make plugin.sh more devstack friendly. specifically,
   use devstack funcs as mych as possible (e.g. not
   running python setup install directly.)
2. Offer a patch to devstack that would call
   devstack/plugins.sh with appropriate gate

Change-Id: I3818b38a62e264267ce04fa789f1bca07111397d
This commit is contained in:
Eran Rom 2017-02-06 12:37:28 +02:00
parent 96027448c0
commit 571becce17
61 changed files with 569 additions and 2285 deletions
.gitignore
devstack
doc/source
install
s2aio.shsdeploy.sh
src/java
storlets
gateway/gateways/docker
tools
tests
tox.ini

11
.gitignore vendored

@ -25,15 +25,8 @@ lib64
# Installer logs
pip-log.txt
# auto created for deploy
install/storlets/prepare_host
install/storlets/deploy/
install/storlets/prepare_vars.yml
install/swift/vars.yml
install/swift/hosts
cluster_config.json
install/swift/hosts
Engine/SMScripts/bin/
# auto created for func test
test.conf
# auto created on build
src/java/dependencies/

16
devstack/localrc.sample Normal file

@ -0,0 +1,16 @@
ENABLE_HTTPD_MOD_WSGI_SERVICES=False
ENABLED_SERVICES=key,swift,mysql
HOST_IP=127.0.0.1
ADMIN_PASSWORD=admin
MYSQL_PASSWORD=$ADMIN_PASSWORD
RABBIT_PASSWORD=$ADMIN_PASSWORD
SERVICE_PASSWORD=$ADMIN_PASSWORD
SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-http}
SWIFT_DEFAULT_BIND_PORT=${SWIFT_DEFAULT_BIND_PORT:-8080}
# service local host is used for ring building
SWIFT_SERVICE_LOCAL_HOST=$HOST_IP
# service listen address for prox
SWIFT_SERVICE_LISTEN_ADDRESS=$HOST_IP
SWIFT_LOOPBACK_DISK_SIZE=20G
SWIFT_MAX_FILE_SIZE=5368709122
SWIFT_HASH=1234567890

387
devstack/plugin.sh Normal file

@ -0,0 +1,387 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Functions to control the configuration and operation of the **Swift** service
# Dependencies:
#
# - ``functions`` file
# - ``functions-common`` file
# - ``STACK_USER`` must be defined
# - ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined
# - ``lib/swift`` file
# - ``lib/keystone`` file
#
# - install_storlets
# TODO(eranr):
# Add clean_storlets
# Save trace setting
_XTRACE_LIB_SWIFT=$(set +o | grep xtrace)
set +o xtrace
# Defaults
# --------
# General devstack install tunables
ADMIN_USER=admin
ADMIN_PASSWORD=admin
ADMIN_PROJECT=admin
SWIFT_DEFAULT_PROJECT=test
SWIFT_DEFAULT_USER=tester
SWIFT_DEFAULT_USER_PWD=testing
SWIFT_MEMBER_USER=tester_member
SWIFT_MEMBER_USER_PWD=member
# Storlets install tunables
STORLETS_DEFAULT_USER_DOMAIN_ID=${STORLETS_DEFAULT_USER_DOMAIN_ID:-default}
STORLETS_DEFAULT_PROJECT_DOMAIN_ID=${STORLETS_DEFAULT_PROJECT_DOMAIN_ID:-default}
STORLET_MANAGEMENT_USER=${STORLET_MANAGEMENT_USER:-$USER}
STORLETS_DOCKER_DEVICE=${STORLETS_DOCKER_DEVICE:-/home/docker_device}
STORLETS_DOCKER_BASE_IMG=${STORLETS_DOCKER_BASE_IMG:-ubuntu:16.04}
STORLETS_DOCKER_BASE_IMG_NAME=${STORLETS_DOCKER_BASE_IMG_NAME:-ubuntu_16.04}
STORLETS_DOCKER_SWIFT_GROUP_ID=${STORLETS_DOCKER_SWIFT_GROUP_ID:-1003}
STORLETS_DOCKER_SWIFT_USER_ID=${STORLETS_DOCKER_SWIFT_USER_ID:-1003}
STORLETS_SWIFT_RUNTIME_USER=${STORLETS_SWIFT_RUNTIME_USER:-$USER}
STORLETS_SWIFT_RUNTIME_GROUP=${STORLETS_SWIFT_RUNTIME_GROUP:-$USER}
STORLETS_MIDDLEWARE_NAME=storlet_handler
STORLETS_STORLET_CONTAINER_NAME=${STORLETS_STORLET_CONTAINER_NAME:-storlet}
STORLETS_DEPENDENCY_CONTAINER_NAME=${STORLETS_DEPENDENCY_CONTAINER_NAME:-dependency}
STORLETS_LOG_CONTAIER_NAME=${STORLETS_LOG_CONTAIER_NAME:-log}
STORLETS_GATEWAY_MODULE=${STORLETS_GATEWAY_MODULE:-docker}
STORLETS_GATEWAY_CONF_FILE=${STORLETS_GATEWAY_CONF_FILE:-/etc/swift/storlet_docker_gateway.conf}
STORLETS_PROXY_EXECUTION_ONLY=${STORLETS_PROXY_EXECUTION_ONLY:-false}
STORLETS_SCRIPTS_DIR=${STORLETS_SCRIPTS_DIR:-"$STORLETS_DOCKER_DEVICE"/scripts}
STORLETS_STORLETS_DIR=${STORLETS_STORLETS_DIR:-"$STORLETS_DOCKER_DEVICE"/storlets/scopes}
STORLETS_LOGS_DIR=${STORLETS_LOGS_DIR:-"$STORLETS_DOCKER_DEVICE"/logs/scopes}
STORLETS_CACHE_DIR=${STORLETS_CACHE_DIR:-"$STORLETS_DOCKER_DEVICE"/cache/scopes}
STORLETS_PIPES_DIR=${STORLETS_PIPES_DIR:-"$STORLETS_DOCKER_DEVICE"/pipes/scopes}
STORLETS_RESTART_CONTAINER_TIMEOUT=${STORLETS_RESTART_CONTAINER_TIMEOUT:-3}
STORLETS_RUNTIME_TIMEOUT=${STORLETS_RUNTIME_TIMEOUT:-40}
TMP_REGISTRY_PREFIX=/tmp/registry
# Functions
# ---------
_storlets_swift_start() {
swift-init --run-dir=${SWIFT_DATA_DIR}/run all start || true
}
_storlets_swift_stop() {
swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
}
_storlets_swift_restart() {
swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
}
_export_os_vars() {
export OS_IDENTITY_API_VERSION=3
export OS_AUTH_URL="http://$KEYSTONE_IP/identity/v3"
export OS_REGION_NAME=RegionOne
}
_export_keystone_os_vars() {
_export_os_vars
export OS_USERNAME=$ADMIN_USER
export OS_USER_DOMAIN_ID=$STORLETS_DEFAULT_USER_DOMAIN_ID
export OS_PASSWORD=$ADMIN_PASSWORD
export OS_PROJECT_NAME=$ADMIN_USER
export OS_PROJECT_DOMAIN_ID=$STORLETS_DEFAULT_PROJECT_DOMAIN_ID
}
_export_swift_os_vars() {
_export_os_vars
export OS_USERNAME=$SWIFT_DEFAULT_USER
export OS_USER_DOMAIN_ID=$STORLETS_DEFAULT_USER_DOMAIN_ID
export OS_PASSWORD=$SWIFT_DEFAULT_USER_PWD
export OS_PROJECT_NAME=$SWIFT_DEFAULT_PROJECT
export OS_PROJECT_DOMAIN_ID=$STORLETS_DEFAULT_PROJECT_DOMAIN_ID
}
configure_swift_and_keystone_for_storlets() {
# Add project and users to Keystone
_export_keystone_os_vars
project_test_created=$(openstack project list | grep -w $SWIFT_DEFAULT_PROJECT | wc -l)
if [ $project_test_created -eq 0 ]; then
openstack project create $SWIFT_DEFAULT_PROJECT
fi
user_tester_created=$(openstack user list | grep -w $SWIFT_DEFAULT_USER | wc -l)
if [ $user_tester_created -eq 0 ]; then
openstack user create --project $SWIFT_DEFAULT_PROJECT --password $SWIFT_DEFAULT_USER_PWD $SWIFT_DEFAULT_USER
openstack role add --user $SWIFT_DEFAULT_USER --project $SWIFT_DEFAULT_PROJECT admin
fi
member_user_tester_created=$(openstack user list | grep -w $SWIFT_MEMBER_USER | wc -l)
if [ $member_user_tester_created -eq 0 ]; then
openstack user create --project $SWIFT_DEFAULT_PROJECT --password $SWIFT_MEMBER_USER_PWD $SWIFT_MEMBER_USER
openstack role add --user $SWIFT_MEMBER_USER --project $SWIFT_DEFAULT_PROJECT _member_
fi
# Modify relevant Swift configuration files
_generate_swift_middleware_conf
_generate_storlet-docker-gateway
sudo python devstack/swift_config.py install /tmp/swift_middleware_conf $STORLETS_SWIFT_RUNTIME_USER
rm /tmp/swift_middleware_conf
rm /tmp/storlet-docker-gateway.conf
# Create storlet related containers and set ACLs
_storlets_swift_start
_export_swift_os_vars
openstack object store account set --property Storlet-Enabled=True
swift post --read-acl $SWIFT_DEFAULT_PROJECT:$SWIFT_MEMBER_USER $STORLETS_STORLET_CONTAINER_NAME
swift post --read-acl $SWIFT_DEFAULT_PROJECT:$SWIFT_MEMBER_USER $STORLETS_DEPENDENCY_CONTAINER_NAME
swift post $STORLETS_LOG_CONTAIER_NAME
}
_install_docker() {
# TODO: Add other dirstors.
# This one is geared towards Ubuntu
# See other projects that install docker
DOCKER_UNIX_SOCKET=/var/run/docker.sock
DOCKER_SERVICE_TIMEOUT=5
install_package socat
wget http://get.docker.com -O install_docker.sh
sudo chmod 777 install_docker.sh
sudo bash -x install_docker.sh
sudo rm install_docker.sh
sudo killall docker || true
sudo cat /etc/default/docker
sudo sed -r 's#^.*DOCKER_OPTS=.*$#DOCKER_OPTS="--debug -g /home/docker_device/docker --storage-opt dm.override_udev_sync_check=true"#' /etc/default/docker
# Start the daemon - restart just in case the package ever auto-starts...
restart_service docker
echo "Waiting for docker daemon to start..."
DOCKER_GROUP=$(groups | cut -d' ' -f1)
CONFIGURE_CMD="while ! /bin/echo -e 'GET /version HTTP/1.0\n\n' | socat - unix-connect:$DOCKER_UNIX_SOCKET 2>/dev/null | grep -q '200 OK'; do
# Set the right group on docker unix socket before retrying
sudo chgrp $DOCKER_GROUP $DOCKER_UNIX_SOCKET
sudo chmod g+rw $DOCKER_UNIX_SOCKET
sleep 1
done"
if ! timeout $DOCKER_SERVICE_TIMEOUT sh -c "$CONFIGURE_CMD"; then
die $LINENO "docker did not start"
fi
}
prepare_storlets_install() {
sudo mkdir -p "$STORLETS_DOCKER_DEVICE"/docker
sudo chmod 777 $STORLETS_DOCKER_DEVICE
_install_docker
sudo add-apt-repository -y ppa:openjdk-r/ppa
sudo apt-get update
sudo apt-get install -y openjdk-8-jdk-headless
sudo apt-get install -y ant
sudo apt-get install -y python
sudo apt-get install -y python-setuptools
}
_generate_jre_dockerfile() {
cat <<EOF > ${TMP_REGISTRY_PREFIX}/repositories/${STORLETS_DOCKER_BASE_IMG_NAME}_jre8/Dockerfile
FROM $STORLETS_DOCKER_BASE_IMG
MAINTAINER root
RUN apt-get update && \
apt-get install python -y && \
apt-get install git -y && \
apt-get update && \
apt-get install openjdk-8-jre-headless -y && \
apt-get clean
EOF
}
create_base_jre_image() {
echo "Create base jre image"
docker pull $STORLETS_DOCKER_BASE_IMG
mkdir -p ${TMP_REGISTRY_PREFIX}/repositories/"$STORLETS_DOCKER_BASE_IMG_NAME"_jre8
_generate_jre_dockerfile
cd ${TMP_REGISTRY_PREFIX}/repositories/"$STORLETS_DOCKER_BASE_IMG_NAME"_jre8
docker build -q -t ${STORLETS_DOCKER_BASE_IMG_NAME}_jre8 .
cd -
}
_generate_logback_xml() {
cat <<EOF > ${TMP_REGISTRY_PREFIX}/repositories/"$STORLETS_DOCKER_BASE_IMG_NAME"_jre8_storlets/logback.xml
<configuration>
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>/tmp/SDaemon.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- daily rollover. Make sure the path matches the one in the file element or else
the rollover logs are placed in the working directory. -->
<fileNamePattern>/srv/logs/application_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>1MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!-- keep 30 days' worth of history -->
<maxHistory>30</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%-4relative [%thread] %-5level %logger{35} - %msg%n</pattern>
</encoder>
</appender>
<root level="TRACE">
<appender-ref ref="FILE" />
</root>
</configuration>
EOF
}
_generate_jre_storlet_dockerfile() {
cat <<EOF > ${TMP_REGISTRY_PREFIX}/repositories/"$STORLETS_DOCKER_BASE_IMG_NAME"_jre8_storlets/Dockerfile
FROM ${STORLETS_DOCKER_BASE_IMG_NAME}_jre8
MAINTAINER root
RUN [ "groupadd", "-g", "$STORLETS_DOCKER_SWIFT_GROUP_ID", "swift" ]
RUN [ "useradd", "-u" , "$STORLETS_DOCKER_SWIFT_USER_ID", "-g", "$STORLETS_DOCKER_SWIFT_GROUP_ID", "swift" ]
# Copy files
COPY ["logback.xml", "/usr/local/lib/storlets/"]
RUN ["chmod", "0744", "/usr/local/lib/storlets/logback.xml"]
CMD ["prod", "/mnt/channels/factory_pipe", "DEBUG"]
ENTRYPOINT ["/usr/local/libexec/storlets/init_container.sh"]
EOF
}
create_storlet_engine_image() {
echo "Create Storlet engine image"
mkdir -p ${TMP_REGISTRY_PREFIX}/repositories/"$STORLETS_DOCKER_BASE_IMG_NAME"_jre8_storlets
_generate_logback_xml
_generate_jre_storlet_dockerfile
cd ${TMP_REGISTRY_PREFIX}/repositories/"$STORLETS_DOCKER_BASE_IMG_NAME"_jre8_storlets
docker build -q -t ${STORLETS_DOCKER_BASE_IMG_NAME}_jre8_storlets .
cd -
}
install_storlets_code() {
echo "Installing storlets"
cd $REPO_DIR
sudo ./install_libs.sh
sudo pip install -r requirements.txt
sudo python setup.py install
sudo chown -R ${STORLETS_SWIFT_RUNTIME_USER} storlets.egg-info*
sudo mkdir -p $STORLETS_DOCKER_DEVICE/scripts
sudo chown "$STORLETS_SWIFT_RUNTIME_USER":"$STORLETS_SWIFT_RUNTIME_GROUP" "$STORLETS_DOCKER_DEVICE"/scripts
sudo chmod 0755 "$STORLETS_DOCKER_DEVICE"/scripts
sudo cp scripts/restart_docker_container "$STORLETS_DOCKER_DEVICE"/scripts/
sudo cp scripts/send_halt_cmd_to_daemon_factory.py "$STORLETS_DOCKER_DEVICE"/scripts/
sudo chmod 04755 "$STORLETS_DOCKER_DEVICE"/scripts/restart_docker_container
sudo chown root:root "$STORLETS_DOCKER_DEVICE"/scripts/restart_docker_container
sudo chmod 04755 "$STORLETS_DOCKER_DEVICE"/scripts/send_halt_cmd_to_daemon_factory.py
sudo chown root:root "$STORLETS_DOCKER_DEVICE"/scripts/send_halt_cmd_to_daemon_factory.py
cd -
}
_generate_swift_middleware_conf() {
cat <<EOF > /tmp/swift_middleware_conf
[proxy-confs]
proxy_server_conf_file = /etc/swift/proxy-server.conf
storlet_proxy_server_conf_file = /etc/swift/storlet-proxy-server.conf
[object-confs]
object_server_conf_files = /etc/swift/object-server/1.conf
#object_server_conf_files = /etc/swift/object-server/1.conf, /etc/swift/object-server/2.conf, /etc/swift/object-server/3.conf, /etc/swift/object-server/4.conf
#object_server_conf_files = /etc/swift/object-server.conf
[common-confs]
storlet_middleware = $STORLETS_MIDDLEWARE_NAME
storlet_container = $STORLETS_STORLET_CONTAINER_NAME
storlet_dependency = $STORLETS_DEPENDENCY_CONTAINER_NAME
#storlet_log = $STORLETS_LOG_CONTAIER_NAME
storlet_gateway_module = $STORLETS_GATEWAY_MODULE
storlet_gateway_conf = $STORLETS_GATEWAY_CONF_FILE
storlet_proxy_execution = $STORLETS_PROXY_EXECUTION_ONLY
EOF
}
_generate_storlet-docker-gateway() {
cat <<EOF > /tmp/storlet-docker-gateway.conf
[DEFAULT]
storlet_logcontainer = $STORLETS_LOG_CONTAIER_NAME
cache_dir = $STORLETS_CACHE_DIR
log_dir = $STORLETS_LOGS_DIR
script_dir = $STORLETS_SCRIPTS_DIR
storlets_dir = $STORLETS_STORLETS_DIR
pipes_dir = $STORLETS_PIPES_DIR
restart_linux_container_timeout = $STORLETS_RESTART_CONTAINER_TIMEOUT
storlet_timeout = $STORLETS_RUNTIME_TIMEOUT
EOF
}
_generate_default_tenant_dockerfile() {
cat <<EOF > ${TMP_REGISTRY_PREFIX}/repositories/"$SWIFT_DEFAULT_PROJECT_ID"/Dockerfile
FROM ${STORLETS_DOCKER_BASE_IMG_NAME}_jre8_storlets
MAINTAINER root
EOF
}
create_default_tenant_image() {
SWIFT_DEFAULT_PROJECT_ID=`openstack project list | grep -w $SWIFT_DEFAULT_PROJECT | awk '{ print $2 }'`
mkdir -p ${TMP_REGISTRY_PREFIX}/repositories/$SWIFT_DEFAULT_PROJECT_ID
_generate_default_tenant_dockerfile
cd ${TMP_REGISTRY_PREFIX}/repositories/$SWIFT_DEFAULT_PROJECT_ID
docker build -q -t ${SWIFT_DEFAULT_PROJECT_ID:0:13} .
cd -
}
create_test_config_file() {
testfile=${REPO_DIR}/test.conf
iniset ${testfile} general keystone_default_domain $STORLETS_DEFAULT_PROJECT_DOMAIN_ID
iniset ${testfile} general keystone_public_url $KEYSTONE_PUBLIC_URL
iniset ${testfile} general storlets_default_project_name $SWIFT_DEFAULT_PROJECT
iniset ${testfile} general storlets_default_project_user_name $SWIFT_DEFAULT_USER
iniset ${testfile} general storlets_default_project_user_password $SWIFT_DEFAULT_USER_PWD
iniset ${testfile} general storlets_default_project_member_user $SWIFT_MEMBER_USER
iniset ${testfile} general storlets_default_project_member_password $SWIFT_MEMBER_USER_PWD
iniset ${testfile} general region
}
install_storlets() {
echo "Install storlets dependencies"
prepare_storlets_install
echo "Install storlets code"
install_storlets_code
echo "Configure swift and keystone for storlets"
configure_swift_and_keystone_for_storlets
echo "Create Docker images"
create_base_jre_image
create_storlet_engine_image
create_default_tenant_image
echo "Create test configuration file"
create_test_config_file
echo "restart swift"
_storlets_swift_restart
}
uninstall_storlets() {
sudo service docker stop
sudo sed -r 's#^.*DOCKER_OPTS=.*$#DOCKER_OPTS="--debug --storage-opt dm.override_udev_sync_check=true"#' /etc/default/docker
echo "Cleaning all storlets runtime stuff..."
sudo rm -fr ${STORLETS_DOCKER_DEVICE}
}

@ -1,286 +0,0 @@
=================================================
Deploying storlets over an existing Swift cluster
=================================================
This guide describes how to install the storlet engine over an existing Swift with Keystone
cluster. This guide follows an example where the Swift cluster has one proxy and 3 object nodes.
In addition the guide assume a management machine form which the installation takes place.
The management machine in the example also acts as a Keystone server as well as a Docker
repository for the Docker images created during the deployment.
------------------------
The installation process
------------------------
We bring here the installation process high level steps so as to make
more sense of the various installation configuration parameters described
below. Feel free to skip if you wish to get on with the installation.
- Install a private Docker registry. This is where the various Docker
images are kept before being deployed across the cluster.
- Building the Docker image for running storlets. The process starts with
a generic image containing Java and the storlets stuff, and proceeds
with creating an image for a default tenant over which the tests can
be executed.
- Deploy the storlet engine python code that runs within Swift, including
swift configuration changes to incorporate the storlets middleware.
- Create a default tenant that is enabled for storlets.
- Install the storlets management code on the management host. This code
allows to create new tenants that can use storlets as well as deploy
Docker images. The installation of this code requires Keystone credentials
for the creation of a storlet management swift account that keeps the cluster
configuration.
-----------------------------
The assumed Swift cluster IPs
-----------------------------
The guide uses the following addresses:
::
management / keystone / docker repository: 192.168.56.200
proxy 192.168.56.210
object1 192.168.56.220
object2 192.168.56.230
object3 192.168.56.240
.. note::
The Ansible installation scripts used throughout the deployment assume that the user root
can ssh from the management machine all other machines without a password. This includes
root ssh from the machine to itself, either through 127.0.0.1 or the management address
(192.168.56.200) in our example
-----------------------------
Clone the storlets repository
-----------------------------
To clone the storlets repository use:
::
sudo apt-get install git
git clone https://github.com/openstack/storlets.git
.. note::
The rest of this guide assumes that everything is
being executed as root. Specifically, it assumes that
the checkout is done under /root
--------------------------------
Create the preparation host file
--------------------------------
Create the file '/install/storlets/deploy/prepare_host'
and make sure that the address appearing there
is the addressed configured for root passwordless
ssh to the machine itself. e.g.
::
[s2aio]
192.168.56.200
------------------------------------------
Create the installation configuration file
------------------------------------------
Create the file 'install/storlets/deploy/installation_vars.yml'
with the following variables definitions. The below reflects our
deployment example.
::
ansible_ssh_user: root
repo_root: /root/storlets/
mgmt_group: [ "192.168.56.200" ]
proxy_group: [ "192.168.56.210" ]
storage_group: [ "192.168.56.220", "192.168.56.230", "192.168.56.240" ]
docker_group: [ "192.168.56.200" ]
storlets_management_user: root
storlet_management_account: "storlet_management"
storlet_management_admin_username: "storlet_manager"
storlet_manager_admin_password: "storlet_manager"
storlets_default_tenant_name: "test"
storlets_default_tenant_user_name: "tester"
storlets_default_tenant_user_password: "testing"
keystone_endpoint_host: 192.168.56.200
keystone_admin_user: admin
keystone_admin_password: admin
keystone_admin_project: admin
keystone_default_domain: default
swift_endpoint_host: 192.168.56.210
swift_endpoint_port: 80
swift_run_time_user: swift
swift_run_time_group: swift
docker_device: /srv/node/sdb
This file is used for creating the cluster_config.json which is
used by the Ansible installation. We give a full description of
the file below.
------------------------
Running the installation
------------------------
If no further tuning is required above the definitiones in
'install/storlets/deploy/installation_vars.yml'
then just run 'sdeploy.sh' from the repository root.
.. note::
You must run sdeploy.sh as root.
If further tuning is required, edit 'sdeploy.sh', remark out the last line:
'install/storlets/install_storlets.sh' deploy and execute the script.
Once it is done, edit 'install/storlets/deploy/cluster_config.json' as required
and then run 'install/storlets/install_storlets.sh' from the repository root
We give below all the variables used in the installation, as they appear
in 'install/storlets/deploy/cluster_config.json'
Cluster config
==============
Below is the full set of variables being used by the storlets installation.
Please refer to the installation instructions below for controlling variables
that do not appear in the above configurable installation_vars.yml
.. note::
The variables that are controlled using the above installation_vars.yml
appear below within double curly braces.
::
{
"groups" : {
"storlet-mgmt": [ "192.168.56.200" ],
"storlet-proxy": [ "192.168.56.210" ],
"storlet-storage": [ "192.168.56.220", "192.168.56.230", "192.168.56.240" ],
"docker": [ "192.168.56.200" ]
},
"all" : {
"docker_device": "{{ docker_device }}",
"storlet_source_dir": "~/storlets",
"python_dist_packages_dir": "usr/local/lib/python2.7/dist-packages",
"storlet_gateway_conf_file": "/etc/swift/storlet_docker_gateway.conf",
"keystone_endpoint_host": "{{ keystone_endpoint_host }}",
"keystone_public_url": "http://{{ keystone_endpoint_host }}:5000/v3",
"keystone_admin_password": "{{ keystone_admin_password }}",
"keystone_admin_user": "{{keystone_admin_user}}",
"keystone_admin_project": "{{keystone_admin_project}}",
"keystone_default_domain": "{{keystone_default_domain}}",
"swift_endpoint_host": "{{ swift_endpoint_host }}",
"swift_public_url": "http://{{ swift_endpoint_host }}:{{ swift_endpoint_port }}/v1",
"swift_run_time_user" : "{{ swift_run_time_user }}",
"swift_run_time_group" : "{{ swift_run_time_group }}",
"swift_run_time_dir": "{{ swift_run_time_dir }}",
"storlets_management_user": "{{ storlets_management_user }}",
"storlet_management_account": "{{ storlet_management_account }}",
"storlet_management_admin_username": "{{ storlet_management_admin_username }}",
"storlet_manager_admin_password": "{{ storlet_manager_admin_password }}",
"storlet_management_swift_topology_container": "swift_cluster",
"storlet_management_swift_topology_object": "cluster_config.json",
"storlet_management_ansible_dir": "/opt/ibm/ansible/playbook",
"storlet_management_install_dir": "/opt/ibm",
"storlets_enabled_attribute_name": "storlet-enabled",
"docker_registry_random_string": "ABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJ1234",
"docker_registry_port": "5001",
"base_image_maintainer": "root",
"base_os_image": "ubuntu_14.04",
"storlets_image_name_suffix": "ubuntu_14.04_jre8_storlets",
"swift_user_id": "1003",
"swift_group_id": "1003",
"storlets_default_project_name": "{{ storlets_default_tenant_name }}",
"storlets_default_project_user_name": "{{ storlets_default_tenant_user_name }}",
"storlets_default_project_user_password": "{{ storlets_default_tenant_user_password }}",
"storlets_default_project_member_user" : "tester_member",
"storlets_default_project_member_password" : "member",
"storlet_middleware": "storlet_handler",
"storlet_container": "storlet",
"storlet_dependency": "dependency",
"storlet_log": "storletlog",
"storlet_images": "docker_images",
"storlet_timeout": "40",
"storlet_gateway_module": "docker",
"storlet_execute_on_proxy_only": "false",
"restart_linux_container_timeout": "3"
}
}
- The "groups" entry is a standard Ansible entry for the hosts taking part in the installation.
As mentioned, in our example we have a management host, acting as a docker repository host,
as well as a proxy and 3 object hosts. The IPs in each group represent management IPs through
which root can ssh.
- For each IP in the group entry, we have an entry that specifies the user Ansible will use to ssh to
that IP. In this guide we use root, and assume that a paswordless ssh has been set up for root to ssh
to all hosts from the management host.
- The "all" entry lists all the variables Ansible uses in the installation:
- "lxc-device". A directory within each swift host (proxy or storage) where the storlet run time stuff is to be
placed. This includes the docker images, the storlets code being downloaded locally, the storlets logs, etc.
It might be a good idea to dedicate a device for this. Note that all hosts must use the same location.
The value of this entry is an Ansible variable, which is specified in 'install/storlets/deploy/installation_vars.yml'
- "storlet_source_dir". A full path of the directory where the storlets repository is checked out.
- "python_dist_packages_dir". The destination where to install the storlet generated python packages. This serves
both for the host side code as well as the container side code.
- "storlet_gateway_conf_file". The location where the storlet gateway plugin configuration file is to be placed.
- Keystone related variables:
- "keystone_endpoint_host": The host where keystone is installed. The value of this entry is an Ansible variable,
which is specified in 'install/storlets/deploy/installation_vars.yml'
- "keystone_public_url": The Keystone public url. This entry makes use of the keystone endpoint host defined above.
- "keystone_admin_user": The Keystone administration user
- "keystone_admin_password": Currently not used. Serves for future alternative to the token.
- Swift related variables:
- "swift_endpoint_host". The proxy host. The value of this entry is an Ansible variable,
which is specified in 'install/storlets/deploy/installation_vars.yml'
- "swift_public_url". The Swift public url. This entry makes use of the swift endpoint host defined above.
- "swift_run_time_user", "swift_run_time_group". The user and group under which Swift runs. The value of these entries is an
Ansible variable, which is specified in 'install/storlets/deploy/installation_vars.yml'
- Storlet management related variables:
- "storlets_management_user". The management code makes use of Ansible. The user specified here is the user that
ansible would use to ssh to the various hosts when activated from the management code.. The value of this entry is an
Ansible variable, which is specified in 'install/storlets/deploy/installation_vars.yml'
- "storlet_management_account". The Swift account used by the storlet manager.
- "storlet_management_admin_username", "storlet_manager_admin_password". The Swift credentials of the user that acts as the
storlet engine manager.
- "storlet_management_swift_topology_container", "storlet_management_swift_topology_object". The Swift path were the cluster config is kept in Swift.
- "storlet_management_ansible_dir", "storlet_management_install_dir". The directories where to place the storlet engine management code and the
Ansible playbooks.
- Docker private registry variables:
- "docker_registry_random_string". A random string required by the registry installation.
- "docker_registry_port". The port the registry daemon listens on. Note that this is different form
the default port which is used by Keystone.
- Docker images variables
- "base_image_maintainer". The maintainer of the docker images. Note that the user specified is a user withing
the Lunix container user namespace.
- "base_os_image". The base OS image used for the Docker images. Serves as a prefix for the generic images created
by the process.
- "storlets_image_name_suffix". The suffix used for the base image that containes the storlets stuff.
- "swift_user_id", "swift_group_id". The user and group id of a Docker container user that is used to run the storlets daemons.
- The default swift project parameters created by the installation process:
- "storlets_default_project_name", "storlets_default_project_user_name", "storlets_default_project_user_password"
- The config paramaters of the storlet middleware:
- "storlet_middleware". The name of the storlet middleware to appear in the swift config files.
- "storlet_container". The name of the container where storlets are uploaded to.
- "storlet_dependency". The name of the container where dependencies are uploaded to.
- "storlet_log". Curently not in use. Placeholder for future log upload feature.
- "storlet_images". The name of the container for uploading user tailored images.
- "storlet_gateway_module". The class implementing the storlets plugin used. Currently, we have only one
such plugin.
- "storlet_execute_on_proxy_only". Controls whether storlets will run only on the proxy servers.
- The config parameters of the storlet gateway plugin
- "storlet_timeout". The time Swift gives the a storlet to start producung output.
- "restart_linux_container_timeout": The number of times the middleware tries to spwans a Docker container
before giving up.

@ -1,8 +1,8 @@
Installing a Development Environment
====================================
This guide gives a step by step installation instructions that are simpler
then what the s2aio.sh script does (e.g. it does not involve a docker registry
installation and configuration).
This guide gives a step by step installation instructions that are equivalent
to what s2aio.sh does. The intention is to make the reader more familiar with
what is involved in installing Storlets on top of Swift
The below steps must be executed using a passwordless sudoer user.
@ -20,13 +20,10 @@ Create a localrc file under the devstack repository root directory:
::
ENABLE_HTTPD_MOD_WSGI_SERVICES=False
KEYSTONE_IP=127.0.0.1
SWIFT_IP=127.0.0.1
ENABLED_SERVICES=key,swift,mysql
ADMIN_USER=admin
ADMIN_PASSWORD=$ADMIN_USER
ADMIN_PROJECT=ADMIN_USER
DATABASE_PASSWORD=admin
HOST_IP=127.0.0.1
ADMIN_PASSWORD=admin
MYSQL_PASSWORD=$ADMIN_PASSWORD
RABBIT_PASSWORD=$ADMIN_PASSWORD
SERVICE_PASSWORD=$ADMIN_PASSWORD
@ -40,18 +37,21 @@ Create a localrc file under the devstack repository root directory:
OS_REGION_NAME=RegionOne
SERVICE_HOST=$SWIFT_IP
SWIFT_SERVICE_PROTOCOL=http
SWIFT_DEFAULT_BIND_PORT=8080
SWIFT_SERVICE_LOCAL_HOST=$SERVICE_HOST
SWIFT_SERVICE_LISTEN_ADDRESS=$SERVICE_HOST
# Use minimum 2GB for running the storlets tests
SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-http}
SWIFT_DEFAULT_BIND_PORT=${SWIFT_DEFAULT_BIND_PORT:-8080}
# service local host is used for ring building
SWIFT_SERVICE_LOCAL_HOST=$HOST_IP
# service listen address for prox
SWIFT_SERVICE_LISTEN_ADDRESS=$HOST_IP
SWIFT_LOOPBACK_DISK_SIZE=20G
SWIFT_MAX_FILE_SIZE=5368709122
SWIFT_HASH=1234567890
IDENTITY_API_VERSION=3
Run the stack.sh script.
Before proceeding, we need to stop the
swift instances that were executed by the
stack.sh. From the same directory do:
stack.sh script. From the same directory do:
::
@ -83,13 +83,15 @@ defining some environment variables:
export OS_PROJECT_DOMAIN_ID=default
export OS_REGION_NAME=RegionOne
We now create the project and user:
We now create the project and users with Keystone.
::
openstack project create test
openstack user create --project test --password testing tester
openstack role add --user tester --project test admin
openstack user create --project test --password member tester_member
openstack role add --user tester --project test _member_
We now test that the setup by having the user 'tester' to stat the account 'test'. We use the Swift client cli.
A convenient way to do so is to edit the user's .bashrc adding the lines:
@ -141,7 +143,6 @@ Get and install the storlets code
cd storlets
sudo ./install_libs.sh
sudo python setup.py install
tar czf /tmp/storlets.tar.gz .
cd -
.. note:: You don't need sudo for 'python setup.py install' when installing the storlets package into your python virtualenv.
@ -153,20 +154,20 @@ Step 1: Create a working space for building the docker images, e.g.
::
mkdir -p $HOME/docker_repos
sudo docker pull ubuntu:14_04
sudo docker pull ubuntu:16_04
Step 2: Create a Docker image with Java
::
mkdir -p $HOME/docker_repos/ubuntu_14.04_jre8
mkdir -p $HOME/docker_repos/ubuntu_16.04_jre8
Create the file: $HOME/docker_repos/ubuntu_14.04_jre8/Dockerfile
Create the file: $HOME/docker_repos/ubuntu_16.04_jre8/Dockerfile
with the following content:
::
FROM ubuntu:14.04
FROM ubuntu:16.04
MAINTAINER root
# The following operations shoud be defined in one line
@ -183,8 +184,8 @@ Build the image
::
cd $HOME/docker_repos/ubuntu_14.04_jre8
sudo docker build -q -t ubuntu_14.04_jre8 .
cd $HOME/docker_repos/ubuntu_16.04_jre8
sudo docker build -q -t ubuntu_16.04_jre8 .
cd -
@ -192,16 +193,16 @@ Step 3: Augment the above created image with the storlets stuff
::
mkdir -p $HOME/docker_repos/ubuntu_14.04_jre8_storlets
mkdir -p $HOME/docker_repos/ubuntu_16.04_jre8_storlets
cp $HOME/storlets/install/storlets/roles/docker_storlet_engine_image/files/logback.xml .
cd -
Create the file: $HOME/docker_repos/ubuntu_14.04_jre8_storlets/Dockerfile
Create the file: $HOME/docker_repos/ubuntu_16.04_jre8_storlets/Dockerfile
with the following content:
::
FROM ubuntu_14.04_jre8
FROM ubuntu_16.04_jre8
MAINTAINER root
@ -221,8 +222,8 @@ Build the image
::
cd $HOME/docker_repos/ubuntu_14.04_jre8_storlets
sudo docker build -q -t ubuntu_14.04_jre8_storlets .
cd $HOME/docker_repos/ubuntu_16.04_jre8_storlets
sudo docker build -q -t ubuntu_16.04_jre8_storlets .
cd -
Step 4: Create a tenant specific image. The engine looks for images
@ -241,22 +242,20 @@ The response from the above contains the account line, e.g.:
The account id is the number following the 'AUTH\_' prefix.
Next create the file $HOME/docker_repos/ubuntu_14.04_jre8_storlets_<account id>/Dockerfile
Next create the file $HOME/docker_repos/ubuntu_16.04_jre8_storlets_<account id>/Dockerfile
with the following content:
::
FROM ubuntu_14.04_jre8_storlets
FROM ubuntu_16.04_jre8_storlets
MAINTAINER root
RUN apt-get install vim
Build the image
::
cd $HOME/docker_repos/ubuntu_14.04_jre8_storlets_<account id>
cd $HOME/docker_repos/ubuntu_16.04_jre8_storlets_<account id>
sudo docker build -q -t <account id> .
cd -
@ -402,6 +401,7 @@ We use the swift cli as follows:
--os-password=testing \
--os-project-name=test \
--os-project-domain-name default \
--read-acl test:tester_member \
storlet
swift post \
@ -410,6 +410,7 @@ We use the swift cli as follows:
--os-password=testing \
--os-project-name=test \
--os-project-domain-name default \
--read-acl test:tester_member \
dependency
swift post \
@ -430,26 +431,20 @@ The functional tests are designed to run over a clustered installation
(that is not an all in one install). Hence, running the tests require
a cluster connfiguration file.
Step 1: Create the file $HOME/storlets/cluster_config.json with the below
Step 1: Create the file $HOME/storlets/test.conf with the below
content.
::
{
"all" : {
"docker_device": "/home/docker_device",
"storlet_source_dir": "~/storlets",
"keystone_public_url": "http://127.0.0.1/identity/v3",
"swift_endpoint_host": "127.0.0.1",
"swift_public_url": "http://127.0.0.1:8080/v1",
"storlets_enabled_attribute_name": "storlet-enabled",
"storlets_default_project_name": "test",
"storlets_default_project_user_name": "tester",
"storlets_default_project_user_password": "testing",
"storlets_default_project_member_user": "tester_member",
"storlets_default_project_member_password": "member",
}
}
[general]
region = RegionOne
storlets_default_project_member_password = member
storlets_default_project_member_user = tester_member
storlets_default_project_user_password = testing
storlets_default_project_user_name = tester
storlets_default_project_name = test
keystone_public_url = http://127.0.0.1/identity/v3
keystone_default_domain = default
Step 2: Run the functional tests

@ -11,36 +11,22 @@ the development environment installation instructions_ or in the getting started
Building
--------
The storlets repository consists of code written in Python, Java and C. We have chose ant to serve as a 'make' tool for all of the code.
The main build task in build.xml is dependent on two other build tasks:
The storlets repository consists of code written in Python, Java and C.
The C and Java code reside under the 'src/' directory. The C code is built and
installed using Makefiles, and the Java code is built and installed using ant
build.xml files. Refer to the instal_libs.sh script under the repo root directory
for the exact procedure of building and installing the C and Java code.
#. build_storlets task. This task builds all the sample storlets used in the system tests.
#. build engine task. This task builds/packages the following components:
The python code resides under the storlets directory and is installed using the usual
setup.py script.
#. The storlet middleware and the "storlet docker gateway" python code. These are built as two packages in a single 'storlets' egg:
In addition there are several storlet examples written in both Python and Java under the
StorletSamples directory. This directory has a build.xml script used to build the samples.
To build the storlets cd to the StorletSamples/java directory and run:
* storlet_middleware
* storlet_gateway
::
#. The SBus code. This is the communication module between the gateway and the Docker container. It has a transport layer written in "C" with
'bindings' to both Java and Python.
#. The Python written storlet_factory_daemon, which is packaged for installation in a Docker image
#. The Java SDaemon code, which is the daemon code that loads the storlets in run time. This code is compiled to a .jar that is later installed
in the Docker image.
#. The Java SCommon code, which has the storlet interface declaration, as well as the accompanying classes appearing in the interface. This code
is compiled to a .jar that is required both in the Docker image as well as for building storlets.
Deploying
---------
Two additional tasks of interest in our build.xml are the deploy_host_engine and deploy_container_engine.
These tasks are based on the Ansible installation scripts and do the following:
#. deploy_host_engine would get all the code that is relevant to the host side
(python middleware and SBus) and deploy it on the hosts as descrined in the
cluster_config.json file
#. deploy_container_engine, would create an updated image of the tenant defined
in the cluster_config.json and distribute it to all nodes as defined in
the configuration.
ant build
Running the Tests
-----------------

@ -75,19 +75,6 @@ Storlets Developers
writing_and_deploying_java_storlets
writing_and_deploying_python_storlets
Storlets Deployers
==================
.. toctree::
:maxdepth: 1
deployer_installation
deployer_guide
Storlets Management
===================
Coming Soon!
Indices and tables
==================

@ -87,7 +87,7 @@ storlets with EC and encryption. Valid values are true / false
# Keystone access information
keystone_endpoint_host: 127.0.0.1
keystone_admin_url: http://{{ keystone_endpoint_host }}:35357/v3
keystone_public_url: http://{{ keystone_endpoint_host }}:5000/v3
keystone_public_url: http://{{ keystone_endpoint_host }}/identity/v3
keystone_admin_token: ADMIN
keystone_admin_password: passw0rd

@ -23,7 +23,7 @@ default storlets development environment installation (`s2aio <http://storlets.r
import os
os.environ['OS_AUTH_VERSION'] = '3'
os.environ['OS_AUTH_URL'] = 'http://127.0.0.1:5000/v3'
os.environ['OS_AUTH_URL'] = 'http://127.0.0.1/v3'
os.environ['OS_USERNAME'] = 'tester'
os.environ['OS_PASSWORD'] = 'testing'
os.environ['OS_USER_DOMAIN_NAME'] = 'default'

@ -1,7 +1,7 @@
s2aio
=====
s2aio is a script that installs Swift (with Keystone) and Storlets all on one.
s2aio is a script that installs Swift and Storlets all on one.
The script allows to do the installation either on the host
where it is invoked or in a Docker container.

@ -7,4 +7,4 @@ With that user just do:
sudo apt-get install python-tox python-nose git
git clone https://github.com/openstack/storlets.git
cd storlets
./s2aio.sh install dev host
./s2aio.sh install

@ -1,22 +0,0 @@
# Install Ansible. Current scripts rely on
# features which are not in ubuntu repo Ansible
ANSIBLE_EXISTS=1
ansible --version >/dev/null 2>&1 || { ANSIBLE_EXISTS=0; }
if [ "$ANSIBLE_EXISTS" -eq 1 ]
then
VERSION_LINE=$(ansible --version)
IFS=' ' read -ra VERSION_TUPLE <<< "$VERSION_LINE" &> /dev/null
IFS='.' read -ra VERSION <<< "${VERSION_TUPLE[1]}" &> /dev/null
if [ "${VERSION[0]}" -lt 2 ]
then
ANSIBLE_EXISTS=0
fi
fi
if [ "$ANSIBLE_EXISTS" -eq 0 ]
then
sudo apt-get install -y software-properties-common
sudo apt-add-repository -y ppa:ansible/ansible
sudo apt-get update
sudo apt-get install -y ansible
fi

@ -1,22 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: storlet-proxy:storlet-storage:docker
roles:
- role: test_os_version
tasks:
- name: create docker_device directory
file: path={{ docker_device }} state=directory mode=0777 recurse=yes

@ -1,20 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: docker
roles:
- role: docker_storlet_engine_image
- role: deploy_default_project

@ -1,20 +0,0 @@
# Copyright (c) 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: docker
roles:
- role: docker_client
- role: docker_base_jre_image
- role: docker_storlet_engine_image

@ -1,21 +0,0 @@
# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: storlet-mgmt
roles:
- role: enable_default_swift_account_for_storlets
- role: create_default_project_docker_image
project_image_name: "{{ base_os_image }}_jre8_storlets"
project_name: "{{ storlets_default_project_name }}"

@ -1,29 +0,0 @@
# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: storlet-proxy[0]
tasks:
- fetch: src=/etc/swift/proxy-server.conf dest=/tmp/ flat=yes
- hosts: storlet-storage
tasks:
- copy: src=/tmp/proxy-server.conf dest=/etc/swift/proxy-server.conf
- hosts: storlet-proxy:storlet-storage
roles:
- role: docker_client
- role: host_storlet_engine_install
- role: host_storlet_engine_configure
engine_install_dir: /tmp/swift/

@ -1,17 +0,0 @@
#!/bin/bash
set -eu
echo "Running hosts cluster_check playbook"
ansible-playbook -s -i storlets_dynamic_inventory.py cluster_check.yml
echo "Running hosts docker_cluster playbook"
ansible-playbook -s -i storlets_dynamic_inventory.py docker_cluster.yml
echo "Running the host_side_storlet_engine playbook"
ansible-playbook -i storlets_dynamic_inventory.py host_side_storlet_engine.yml
echo "Running the enable_storlets_for_default_project playbook"
ansible-playbook -i storlets_dynamic_inventory.py enable_storlets_for_default_project.yml
# TODO(eranr): Get back to the ant dev playbooks!!!
set +eu

@ -1,40 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ansible_ssh_user: <ANSIBLE_USER>
repo_root: <STORLETS_REPO_ROOT>
mgmt_group: [ "127.0.0.1" ]
proxy_group: [ "127.0.0.1" ]
storage_group: [ "127.0.0.1" ]
docker_group: [ "127.0.0.1" ]
storlets_management_user: <MGMT_USER>
storlet_management_account: "storlet_management"
storlet_management_admin_username: "storlet_manager"
storlet_manager_admin_password: "storlet_manager"
storlets_default_project_name: "test"
storlets_default_project_user_name: "tester"
storlets_default_project_user_password: "testing"
keystone_endpoint_host: 127.0.0.1
keystone_admin_user: admin
keystone_admin_password: admin
keystone_admin_project: admin
keystone_default_domain: default
swift_endpoint_host: 127.0.0.1
swift_endpoint_port: 8080
swift_run_time_user: <SWIFT_RUNTIME_USER>
swift_run_time_group: <SWIFT_RUNTIME_GROUP>
swift_run_time_dir: <SWIFT_RUNTIME_DIR>
docker_device: /home/docker_device

@ -1,17 +0,0 @@
#---------------------------------------------------------------------------
# Copyright (c) 2010-2016 OpenStack Foundation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# Limitations under the License.
#---------------------------------------------------------------------------
[s2aio]
<PREPARING_HOST>

@ -1,88 +0,0 @@
#!/bin/bash
# The storlets installation has 3 flavors:
# 1. Jenkins job installation, for running the funciotal tests.
# 2. Developer instalation.
# 3. Deployment over existing Swift cluster
# There are two possible targets:
# 1. host (localhost)
# 2. docker
if [ "$#" -ne 2 ]; then
echo "Usage: prepare_storlets_install.sh <flavor> <target>"
echo "flavor = jenkins | dev | deploy"
echo "target = host | docker"
exit 1
fi
TARGET="$2"
if [ "$TARGET" != "host" ] && [ "$TARGET" != "docker" ]; then
echo "target must be either \"host\" or \"docker\""
exit 1
fi
if [ "$TARGET" == "host" ]; then
export SWIFT_IP='127.0.0.1'
else
export SWIFT_IP=`sudo docker exec s2aio ifconfig | grep "inet addr" | head -1 | awk '{print $2}' | awk -F":" '{print $2}'`
fi
FLAVOR="$1"
if [ "$FLAVOR" != "jenkins" ] && [ "$FLAVOR" != "dev" ] && [ "$FLAVOR" != "deploy" ]; then
echo "flavor must be either \"jenkins\" or \"dev\" or \"deploy\""
exit 1
fi
# Install Storlets build prerequisite
sudo add-apt-repository -y ppa:openjdk-r/ppa
sudo apt-get update
sudo apt-get install -y openjdk-8-jdk-headless
sudo apt-get install -y ant
sudo apt-get install -y python
sudo apt-get install -y python-setuptools
sudo ./install_libs.sh
# Note(takashi): Currently we need to use tar ball to storelet repo
# files, to avoid slow perfomance of copy module
# when dealing with a directory
tar czf /tmp/storlets.tar.gz .
# The rest of the operations are all relative to
# install/storlets/
cd install/storlets
if [ ! -d deploy ]; then
mkdir deploy
fi
if [ "$FLAVOR" == "deploy" ]; then
if [ ! -e deploy/installation_vars.yml ]; then
echo "deploy installation must have deploy/installation_vars.yml in place"
exit 1
fi
else
cp installation_vars.yml-sample deploy/installation_vars.yml
if [ $TARGET == 'host' ]; then
sed -i 's/<ANSIBLE_USER>/'$USER'/g' deploy/installation_vars.yml
sed -i 's/<MGMT_USER>/'$USER'/g' deploy/installation_vars.yml
else
sed -i 's/<ANSIBLE_USER>/root/g' deploy/installation_vars.yml
sed -i 's/<MGMT_USER>/root/g' deploy/installation_vars.yml
sed -i 's/127.0.0.1/'$SWIFT_IP'/g' deploy/installation_vars.yml
fi
sed -i 's/<SWIFT_RUNTIME_USER>/'$USER'/g' deploy/installation_vars.yml
sed -i 's/<SWIFT_RUNTIME_GROUP>/'$USER'/g' deploy/installation_vars.yml
sed -i 's/<SWIFT_RUNTIME_DIR>/\/opt\/stack\/data\/swift\/run/g' deploy/installation_vars.yml
if [ "$FLAVOR" == "jenkins" ]; then
source /etc/lsb-release
sed -i 's/<STORLETS_REPO_ROOT>/\/home\/'$USER'\/workspace\/gate-storlets-functional-'${DISTRIB_ID,,}'-'$DISTRIB_CODENAME'\//g' deploy/installation_vars.yml
else
sed -i 's/<STORLETS_REPO_ROOT>/~\/storlets\//g' deploy/installation_vars.yml
fi
cp prepare_host-sample deploy/prepare_host
sed -i 's/<PREPARING_HOST>/'$SWIFT_IP'/g' deploy/prepare_host
fi
ansible-playbook -s -i deploy/prepare_host prepare_storlets_install.yml
cd -

@ -1,27 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: cluster_config.json from template
hosts: localhost
vars_files:
- [deploy/installation_vars.yml]
tasks:
- stat: path="{{ playbook_dir }}/deploy/cluster_config.json"
register: st0
- template:
src: templates/cluster_config
dest: "{{ playbook_dir }}/deploy/cluster_config.json"
when: not st0.stat.exists

@ -1,11 +0,0 @@
[DEFAULT]
storlet_logcontainer = {{ storlet_log }}
lxc_root = {{ docker_device }}/scopes
cache_dir = {{ docker_device }}/cache/scopes
log_dir = {{ docker_device }}/logs/scopes
script_dir = {{ docker_device }}/scripts
storlets_dir = {{ docker_device }}/storlets/scopes
pipes_dir = {{ docker_device }}/pipes/scopes
docker_repo = ""
restart_linux_container_timeout = {{ restart_linux_container_timeout }}
storlet_timeout = {{ storlet_timeout }}

@ -1,16 +0,0 @@
[proxy-confs]
proxy_server_conf_file = /etc/swift/proxy-server.conf
storlet_proxy_server_conf_file = /etc/swift/storlet-proxy-server.conf
[object-confs]
object_server_conf_files = /etc/swift/object-server/1.conf, /etc/swift/object-server/2.conf, /etc/swift/object-server/3.conf, /etc/swift/object-server/4.conf
#object_server_conf_files = /etc/swift/object-server.conf
[common-confs]
storlet_middleware = {{ storlet_middleware }}
storlet_container = {{ storlet_container }}
storlet_dependency = {{ storlet_dependency }}
#storlet_log = {{ storlet_log }}
storlet_gateway_module = {{ storlet_gateway_module }}
storlet_gateway_conf = {{ storlet_gateway_conf_file }}
storlet_proxy_execution = {{ storlet_execute_on_proxy_only }}

@ -1,48 +0,0 @@
# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(eranr): Change the below to use Ansible os_project_facts
- name: Get the project id from Keystone
shell: openstack --os-auth-url {{ keystone_public_url }} --os-identity-api-version {{ keystone_auth_version }} --os-username {{ keystone_admin_user }} --os-password {{ keystone_admin_password }} --os-project-name {{ keystone_admin_project }} --os-default-domain-name {{ keystone_default_domain }} project list | grep -w {{ project_name }} | awk '{print $2}'
register: project_id
- name: Print the project id whose container we are processing
debug:
msg: "{{ project_id.stdout_lines[0] }}"
- name: create the project specific docker image step 1 - create repo dir
file:
state: directory
dest: "/data/registry/repositories/{{ project_id.stdout_lines[0] }}"
owner: root
mode: 0755
become: true
- name: create the project specific docker image step 2 - create Docker file
template:
src: default_project_image_Dockerfile
dest: "/data/registry/repositories/{{ project_id.stdout_lines[0] }}/Dockerfile"
owner: root
mode: 0755
become: true
# To match the gateway runtime scope we need to use only the first 13 chars from project_id.stdout_lines[0]
- name: "Build the image {{ project_id.stdout_lines[0][0:13] }}"
command: "docker build -q -t {{ project_id.stdout_lines[0][0:13] }} ."
args:
chdir: "/data/registry/repositories/{{ project_id.stdout_lines[0] }}"
register: command_result
become: true
failed_when: "'sha256:' not in command_result.stdout"

@ -1,18 +0,0 @@
# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM {{ project_image_name }}
MAINTAINER root

@ -1,112 +0,0 @@
#---------------------------------------------------------------------------
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# Limitations under the License.
#---------------------------------------------------------------------------
# TODO(eranr): Change thebelow to use Ansible os_project_facts module
- name: Get the defaut project id from Keystone
shell: openstack --os-auth-url {{ keystone_public_url }} --os-username {{ keystone_admin_user }} --os-password {{ keystone_admin_password }} --os-project-name {{ keystone_admin_project }} --os-default-domain-name {{ keystone_default_domain }} project list | grep -w {{ storlets_default_project_name }} | awk '{print $2}'
register: project_id
- name: Print the project id whose container we are processing
debug:
msg: "{{ project_id.stdout_lines[0] }}"
- name: Enable the default project for storlets
command: swift
--os-auth-url="{{ keystone_public_url }}"
--os-username="{{ storlets_default_project_user_name }}"
--os-password="{{ storlets_default_project_user_password }}"
--os-project-name="{{ storlets_default_project_name }}"
--os-project-domain-name="{{ keystone_default_domain }}"
post
--meta "Storlet-Enabled:True"
- name: Create Repository for building the image
file:
path: /data/registry/repositories/default_project_image
state: directory
mode: 0755
owner: root
- name: Create default_project_image Docker file
stat: path=/data/registry/repositories/default_project_image/Dockerfile
register: st0
- template:
src: "default_project_image_Dockerfile"
dest: "/data/registry/repositories/default_project_image/Dockerfile"
owner: root
mode: 0644
when: not st0.stat.exists
- name: Find the default project image id
shell: docker images | grep "{{project_id.stdout_lines[0]}}" | awk '{print $3}'
register: image_id
- name: Image id found is
debug: msg="{{image_id}}"
- name: Find the default project image id
shell: docker images | grep "{{project_id.stdout_lines[0]}}" | wc -l
register: image_exists
- name: Image exists is
debug: msg="{{image_exists}}"
# If the image id exists, set project_container_running
# according to the docker ps output
- name: Test if the default project container is running
shell: docker ps | grep "{{image_id.stdout_lines[0]}}" | wc -l
register: project_container_running
when: "'1' in image_exists.stdout_lines[0]"
- name: Container running state is
debug: msg="{{project_container_running}}"
# If the image id exists, set project_container_exists
# according to the docker ps -a output
- name: Test if the default project container exists
shell: docker ps -a | grep "{{image_id.stdout_lines[0]}}" | wc -l
register: project_container_exists
when: "'1' in image_exists.stdout_lines[0]"
- name: Container existance state is
debug: msg="{{project_container_exists}}"
# If project_container_exists get its id
- name: Get the container id
shell: docker ps -a | grep "{{image_id.stdout}}" | awk '{print $2}'
register: container_id
when: "project_container_exists.stdout is defined and '1' in project_container_exists.stdout"
- name: Container ID is
debug: msg="{{container_id}}"
- name: Stop the default project container
command: docker stop -t 1 "{{ container_id.stdout }}"
when: "project_container_running.stdout is defined and '1' in project_container_running.stdout"
- name: Remove the default project container
command: docker rm "{{ container_id.stdout }}"
when: "project_container_exists.stdout is defined and '1' in project_container_exists.stdout"
- name: Remove the default project container image
command: "docker rmi {{image_id.stdout}}"
when: "'1' in image_exists.stdout"
# To match the gateway runtime scope we need to use only the first 13 chars from project_id.stdout_lines[0]
- name: Build the new default project container image
command: "docker build -t {{ project_id.stdout_lines[0][0:13] }} ."
args:
chdir: "/data/registry/repositories/default_project_image"
register: command_result
failed_when: "'Successfully built' not in command_result.stdout"

@ -1,21 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM {{base_os_image}}_jre8_storlets
MAINTAINER root
RUN ["apt-get", "install", "-y", "--force-yes", "vim"]

@ -1,29 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Create {{ dockerfile_prefix }} Docker file
template:
src: "{{ dockerfile_prefix }}_Dockerfile"
dest: "/data/registry/repositories/{{ base_os_image }}_{{ layer_suffix }}/Dockerfile"
owner: root
mode: 0644
- name: Build the image {{ layer_suffix }}
command: "docker build -q -t {{ base_os_image }}_{{ layer_suffix }} ."
args:
chdir: "/data/registry/repositories/{{ base_os_image }}_{{ layer_suffix }}"
register: command_result
failed_when: "'sha256:' not in command_result.stdout"

@ -1,18 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Include here task file for various base images, e.g., centosXXX-jre8.yml
- include: ubuntu_16.04_jre8.yml

@ -1,38 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Tag the Ubuntu 16.04 image
shell: docker images | grep ubuntu_16.04 | wc -l
register: ubuntu_image
- command: "{{ item }}"
with_items:
- "docker pull ubuntu:16.04"
- "docker tag ubuntu:16.04 ubuntu_16.04"
when: "'0' in ubuntu_image.stdout_lines[0]"
- name: Create Repository for building the image
stat: path=/data/registry/repositories/ubuntu_16.04_jre8
register: st0
- file:
path: /data/registry/repositories/ubuntu_16.04_jre8
state: directory
mode: 0755
owner: root
when: not st0.stat.exists
- include: create_layer.yml dockerfile_prefix={{ base_os_image}}_jre8 layer_suffix=jre8

@ -1,28 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:16.04
MAINTAINER {{ base_image_maintainer }}
# The following operations should be defined in one line
# to prevent docker images from including apt cache file.
RUN apt-get update && \
apt-get install python -y && \
apt-get install git -y && \
apt-get update && \
apt-get install openjdk-8-jre-headless -y && \
apt-get clean

@ -1,154 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Test for missing packages
shell: dpkg --get-selections | grep aufs-tools | wc -l
register: aufs
become: yes
- shell: dpkg --get-selections | grep linux-image-extra | wc -l
register: linux_image_extra
become: yes
- shell: dpkg --get-selections | grep docker-engine | awk {'print $2'} | grep -w install | wc -l
register: docker_engine
become: yes
when: "ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'trusty'"
- shell: dpkg --get-selections | grep docker.io | awk {'print $2'} | grep -w install | wc -l
register: docker_engine
become: yes
when: "ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'xenial'"
- command: uname -r
register: uname
- name: install necessary linux-image-extra
apt:
name: linux-image-generic-lts-xenial
update-cache: yes
force: yes
when: "'0' in linux_image_extra.stdout_lines[0]"
- name: install aufs-tools
apt:
name: aufs-tools=1:3.2+20130722-1.1
update-cache: yes
force: yes
when: "'0' in aufs.stdout_lines[0] and ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'trusty'"
become: yes
- name: install aufs-tools
apt:
name: aufs-tools
update-cache: yes
force: yes
when: "'0' in aufs.stdout_lines[0] and ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'xenial'"
become: yes
- name: Set Docker Ubuntu Repo Key
apt_key:
keyserver: hkp://p80.pool.sks-keyservers.net:80
id: 58118E89F3A912897C070ADBF76221572C52609D
state: present
ignore_errors: True
when: "'0' in docker_engine.stdout_lines[0]"
become: yes
- name: Install Apt Transport HTTPS
apt: name=apt-transport-https state=present update-cache=yes force=yes
when: "'0' in docker_engine.stdout_lines[0]"
become: yes
- name: Add Docker Apt repository
apt_repository:
repo: 'deb https://apt.dockerproject.org/repo ubuntu-trusty main'
state: present
when: "'0' in docker_engine.stdout_lines[0] and ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'trusty'"
become: yes
- name: Add Docker Apt repository
apt_repository:
repo: 'deb https://apt.dockerproject.org/repo ubuntu-xenial main'
state: present
when: "'0' in docker_engine.stdout_lines[0] and ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'trusty'"
become: yes
- name: Install Docker Client
apt:
name: docker-engine
state: present
update-cache: yes
force: yes
when: "'0' in docker_engine.stdout_lines[0] and ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'trusty'"
become: yes
- name: Install Docker Client
apt:
name: docker.io
state: present
update-cache: yes
force: yes
when: "'0' in docker_engine.stdout_lines[0] and ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'xenial'"
become: yes
# There seem to be a bug in the installation where config files are not installed.
# Might be a bug in the un-install where docket thinks it is upgrading.
# the below will overcome this as follows:
# If config files are installed we only change the default dir for docker data
# Otherwise, install the conf files from templates.
- stat: path=/etc/default/docker
register: st1
- shell: grep {{ docker_device }} /etc/default/docker | wc -l
register: edited
- name: Configure the Docker client to use the LXC block device.
lineinfile:
dest: /etc/default/docker
regexp: "^DOCKER_OPTS.*"
line: DOCKER_OPTS="-g {{ docker_device }}/docker"
backup: yes
state: present
when: st1.stat.exists and '0' in edited.stdout
# Hack to overcome installation problem where no conf files are installed
- stat: path=/etc/default/docker
register: st2
- name: Install docker defaults from template
template: src=etc_default_docker dest=/etc/default/docker owner=root mode=0644
when: not st2.stat.exists
# Hack to overcome installation problem where no conf files are installed
- stat: path=/etc/init/docker.conf
register: st3
- name: Install service config file from template
template: src=etc_init_docker.conf dest=/etc/init/docker.conf owner=root mode=0755
when: not st3.stat.exists
# Hack to overcome installation problem where no conf files are installed
- stat: path=/etc/init.d/docker
register: st4
- name: Install initd config from template
template: src=etc_init.d_docker dest=/etc/init.d/docker owner=root mode=0755
when: not st4.stat.exists
- name: Stop Docker Daemon
shell: bash -c "/usr/sbin/service docker stop"
become: yes
ignore_errors: yes
- name: Restart Docker Daemon
shell: bash -c "/usr/sbin/service docker start"
become: yes

@ -1,13 +0,0 @@
# Docker Upstart and SysVinit configuration file
# Customize location of Docker binary (especially for development testing).
#DOCKER="/usr/local/bin/docker"
# Use DOCKER_OPTS to modify the daemon startup options.
DOCKER_OPTS="-g {{ docker_device }}/docker"
# If you need Docker to use an HTTP proxy, it can also be specified here.
#export http_proxy="http://127.0.0.1:3128/"
# This is also a handy place to tweak where Docker's temporary files go.
#export TMPDIR="/mnt/bigdrive/docker-tmp"

@ -1,140 +0,0 @@
#!/bin/sh
set -e
### BEGIN INIT INFO
# Provides: docker
# Required-Start: $syslog $remote_fs
# Required-Stop: $syslog $remote_fs
# Should-Start: cgroupfs-mount cgroup-lite
# Should-Stop: cgroupfs-mount cgroup-lite
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Create lightweight, portable, self-sufficient containers.
# Description:
# Docker is an open-source project to easily create lightweight, portable,
# self-sufficient containers from any application. The same container that a
# developer builds and tests on a laptop can run at scale, in production, on
# VMs, bare metal, OpenStack clusters, public clouds and more.
### END INIT INFO
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
BASE=$(basename $0)
# modify these in /etc/default/$BASE (/etc/default/docker)
DOCKER=/usr/bin/$BASE
# This is the pid file managed by docker itself
DOCKER_PIDFILE=/var/run/$BASE.pid
# This is the pid file created/managed by start-stop-daemon
DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid
DOCKER_LOGFILE=/var/log/$BASE.log
DOCKER_OPTS=
DOCKER_DESC="Docker"
# Get lsb functions
. /lib/lsb/init-functions
if [ -f /etc/default/$BASE ]; then
. /etc/default/$BASE
fi
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it)
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1"
exit 1
fi
# Check docker is present
if [ ! -x $DOCKER ]; then
log_failure_msg "$DOCKER not present or not executable"
exit 1
fi
fail_unless_root() {
if [ "$(id -u)" != '0' ]; then
log_failure_msg "$DOCKER_DESC must be run as root"
exit 1
fi
}
cgroupfs_mount() {
# see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
if grep -v '^#' /etc/fstab | grep -q cgroup \
|| [ ! -e /proc/cgroups ] \
|| [ ! -d /sys/fs/cgroup ]; then
return
fi
if ! mountpoint -q /sys/fs/cgroup; then
mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
fi
(
cd /sys/fs/cgroup
for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
mkdir -p $sys
if ! mountpoint -q $sys; then
if ! mount -n -t cgroup -o $sys cgroup $sys; then
rmdir $sys || true
fi
fi
done
)
}
case "$1" in
start)
fail_unless_root
cgroupfs_mount
touch "$DOCKER_LOGFILE"
chgrp docker "$DOCKER_LOGFILE"
ulimit -n 1048576
if [ "$BASH" ]; then
ulimit -u 1048576
else
ulimit -p 1048576
fi
log_begin_msg "Starting $DOCKER_DESC: $BASE"
start-stop-daemon --start --background \
--no-close \
--exec "$DOCKER" \
--pidfile "$DOCKER_SSD_PIDFILE" \
--make-pidfile \
-- \
-d -p "$DOCKER_PIDFILE" \
$DOCKER_OPTS \
>> "$DOCKER_LOGFILE" 2>&1
log_end_msg $?
;;
stop)
fail_unless_root
log_begin_msg "Stopping $DOCKER_DESC: $BASE"
start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE"
log_end_msg $?
;;
restart)
fail_unless_root
docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null`
[ -n "$docker_pid" ] \
&& ps -p $docker_pid > /dev/null 2>&1 \
&& $0 stop
$0 start
;;
force-reload)
fail_unless_root
$0 restart
;;
status)
status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" "$DOCKER_DESC"
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
;;
esac

@ -1,41 +0,0 @@
description "Docker daemon"
start on (local-filesystems and net-device-up IFACE!=lo)
stop on runlevel [!2345]
limit nofile 524288 1048576
limit nproc 524288 1048576
respawn
pre-start script
# see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
if grep -v '^#' /etc/fstab | grep -q cgroup \
|| [ ! -e /proc/cgroups ] \
|| [ ! -d /sys/fs/cgroup ]; then
exit 0
fi
if ! mountpoint -q /sys/fs/cgroup; then
mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
fi
(
cd /sys/fs/cgroup
for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
mkdir -p $sys
if ! mountpoint -q $sys; then
if ! mount -n -t cgroup -o $sys cgroup $sys; then
rmdir $sys || true
fi
fi
done
)
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
DOCKER=/usr/bin/$UPSTART_JOB
DOCKER_OPTS=
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$DOCKER" -d $DOCKER_OPTS
end script

@ -1,3 +0,0 @@
# hide docker's loopback devices from udisks, and thus from user desktops
SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1"
SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1"

@ -1,42 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Create docker-registry service
stat: path=/etc/init.d/docker-registry
register: st0
- template:
src: etc/init.d/docker-registry
dest: /etc/init.d/docker-registry
owner: root
mode: 0755
when: not st0.stat.exists
- name: Start Docker repository service
shell: bash -c "/usr/sbin/service docker-registry start"
- name: Test repository
shell: docker images | grep my_busybox | wc -l
register: busy_box
- command: docker pull busybox
when: "'0' in busy_box.stdout_lines[0]"
- command: docker tag busybox "{{ inventory_hostname }}:{{ docker_registry_port }}/my_busybox"
when: "'0' in busy_box.stdout_lines[0]"
- command: docker push "{{ inventory_hostname }}:{{ docker_registry_port }}/my_busybox"
when: "'0' in busy_box.stdout_lines[0]"

@ -1,23 +0,0 @@
<configuration>
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>/tmp/SDaemon.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- daily rollover. Make sure the path matches the one in the file element or else
the rollover logs are placed in the working directory. -->
<fileNamePattern>/srv/logs/application_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>1MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!-- keep 30 days' worth of history -->
<maxHistory>30</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%-4relative [%thread] %-5level %logger{35} - %msg%n</pattern>
</encoder>
</appender>
<root level="TRACE">
<appender-ref ref="FILE" />
</root>
</configuration>

@ -1,29 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Create {{ dockerfile_prefix }} Docker file
template:
src: "{{ dockerfile_prefix }}_Dockerfile"
dest: "/data/registry/repositories/{{ base_os_image }}_{{ layer_suffix }}/Dockerfile"
owner: root
mode: 0644
- name: Build the image {{ layer_suffix }}
command: "docker build -q -t {{ base_os_image }}_{{ layer_suffix }} ."
args:
chdir: "/data/registry/repositories/{{ base_os_image }}_{{ layer_suffix }}"
register: command_result
failed_when: "'sha256:' not in command_result.stdout"

@ -1,31 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Create Repository for building the image
file:
path: "/data/registry/repositories/{{ base_os_image }}_jre8_storlets"
state: directory
mode: 0755
owner: root
- name: Copy logback.xml
copy:
src: "{{item}}"
dest: "/data/registry/repositories/{{ base_os_image }}_jre8_storlets/"
with_items:
- logback.xml
- include: create_layer.yml dockerfile_prefix={{ base_os_image }}_jre8_storlets layer_suffix=jre8_storlets

@ -1,32 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM {{base_os_image}}_jre8
MAINTAINER {{ base_image_maintainer }}
# Create swift user/group
RUN [ "groupadd", "-g", "{{ swift_group_id }}", "swift" ]
RUN [ "useradd", "-u" , "{{ swift_user_id }}", "-g", "{{ swift_group_id }}", "swift" ]
# Copy files
COPY ["logback.xml", "/usr/local/lib/storlets/"]
RUN ["chmod", "0744", "/usr/local/lib/storlets/logback.xml"]
CMD ["prod", "/mnt/channels/factory_pipe", "DEBUG"]
ENTRYPOINT ["/usr/local/libexec/storlets/init_container.sh"]

@ -1,79 +0,0 @@
# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the Liscense.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(eran): Get the enabled flag name from config file
- name: Set account metadata in swift -- enable storlets
command: swift post
--os-auth-url="{{ keystone_public_url }}"
--os-username="{{ storlets_default_project_user_name }}"
--os-password="{{ storlets_default_project_user_password }}"
--os-project-name="{{ storlets_default_project_name }}"
--os-project-domain-name="{{ keystone_default_domain }}"
--meta "Storlet-Enabled:True"
# TODO(eran): Get the log container name from config file
- name: create the Swift log container
command: swift post
--os-auth-url="{{ keystone_public_url }}"
--os-username="{{ storlets_default_project_user_name }}"
--os-password="{{ storlets_default_project_user_password }}"
--os-project-name="{{ storlets_default_project_name }}"
--os-project-domain-name="{{ keystone_default_domain }}"
log
# TODO(eran): Get the storlet container name from config file
- name: create the Swift storlet container
command: swift post
--os-auth-url="{{ keystone_public_url }}"
--os-username="{{ storlets_default_project_user_name }}"
--os-password="{{ storlets_default_project_user_password }}"
--os-project-name="{{ storlets_default_project_name }}"
--os-project-domain-name="{{ keystone_default_domain }}"
--read-acl "{{ storlets_default_project_name }}:{{ storlets_default_project_member_user }}"
storlet
# TODO(eran): Get the dependency container name from config file
- name: create the Swift dependency container
command: swift post
--os-auth-url="{{ keystone_public_url }}"
--os-username="{{ storlets_default_project_user_name }}"
--os-password="{{ storlets_default_project_user_password }}"
--os-project-name="{{ storlets_default_project_name }}"
--os-project-domain-name="{{ keystone_default_domain }}"
dependency
# TODO(eran): Get the docker_images container name from config file
# This is for future use when we bring back the management stuff
# Specifically note the acls given to the storlet manager
- name: create the Swift docker_images container
command: swift post
--os-auth-url="{{ keystone_public_url }}"
--os-username="{{ storlets_default_project_user_name }}"
--os-password="{{ storlets_default_project_user_password }}"
--os-project-name="{{ storlets_default_project_name }}"
--os-project-domain-name="{{ keystone_default_domain }}"
--read-acl "{{ storlet_management_account }}:{{ storlet_management_admin_username }}"
--write-acl "{{ storlet_management_account }}:{{ storlet_management_admin_username }}"
docker_images
#- name: test if need to create member user
# shell: openstack --os-auth-url {{ keystone_admin_url }} --os-identity-api-version {{ keystone_auth_version }} --os-username {{ keystone_admin_user }} --os-password {{ keystone_admin_password }} --os-project-name {{ keystone_admin_project }} --os-default-domain-name {{ keystone_default_domain }} user list --project {{ storlets_default_project_name }} | grep {{ storlets_default_project_member_user }} | wc -l
# register: member_user
#- debug: msg={{ member_user }}
#- name: Create member user
# shell: openstack --os-auth-url {{ keystone_admin_url }} --os-identity-api-version {{ keystone_auth_version }} --os-username {{ keystone_admin_user }} --os-password {{ keystone_admin_password }} --os-project-name {{ keystone_admin_project }} --os-default-domain-name {{ keystone_default_domain }} user create --project {{ storlets_default_project_name }} --password {{ storlets_default_project_member_password }} --enable {{ storlets_default_project_member_user }}
# when: "'0' in member_user.stdout"

@ -1,48 +0,0 @@
# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: create installation directory
file: path={{ engine_install_dir }} state=directory recurse=yes
- name: Copy Swift middleware conf template
template:
src: ../../common_templates/swift_middleware_conf
dest: "{{ engine_install_dir }}"
owner: "{{ swift_run_time_user }}"
group: "{{ swift_run_time_group }}"
mode: 0644
- name: copy install script
copy:
src: "{{ item }}"
dest: "{{ engine_install_dir }}"
with_items:
- ../../common_files/swift_config.py
- name: Copy gateway conf template
template:
src: ../../common_templates/storlet-docker-gateway.conf-sample
dest: "{{ storlet_gateway_conf_file }}"
owner: "{{ swift_run_time_user }}"
group: "{{ swift_run_time_group }}"
mode: 0644
- name: configure swift
shell: chdir={{ engine_install_dir }}
python swift_config.py install swift_middleware_conf "{{ swift_run_time_user }}"
- name: restart swift
shell: swift-init --run-dir="{{swift_run_time_dir}}" all restart
ignore_errors: yes

@ -1,21 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: restart swift
shell: swift-init --run-dir="{{swift_run_time_dir}}" all {{ item }}
with_items:
- stop
- start

@ -1,79 +0,0 @@
# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Copy python code tar ball
copy:
src: "/tmp/storlets.tar.gz"
dest: "/tmp"
- name: Create temp directory for python code
file:
path: "/tmp/storlets_src"
state: directory
- name: Unarchive python code tar ball
unarchive:
src: "/tmp/storlets.tar.gz"
dest: "/tmp/storlets_src/"
- name: Change permission of install scripts
file:
path: "/tmp/storlets_src/install_libs.sh"
mode: "0755"
- name: Install C/Java codes on remote host
shell: "/tmp/storlets_src/install_libs.sh"
args:
chdir: "/tmp/storlets_src"
become: true
- name: Install python requirements
shell: "pip install -r /tmp/storlets_src/requirements.txt"
args:
chdir: "/tmp/storlets_src"
become: true
- name: Install Python codes on remote host
command: "python setup.py install"
args:
chdir: "/tmp/storlets_src"
become: true
- name: Create scripts directory
file:
path: "{{ docker_device }}/scripts/"
state: directory
owner: "{{ swift_run_time_user }}"
group: "{{ swift_run_time_group }}"
mode: 0755
become: true
- name: install runtime scripts used by swift middleware
copy:
src: "{{ storlet_source_dir }}/{{ item }}"
dest: "{{ docker_device }}/scripts/"
mode: "04755"
owner: root
group: root
with_items:
- "scripts/restart_docker_container"
- "scripts/send_halt_cmd_to_daemon_factory.py"
become: true
- name: Remove code repository from remote host
file:
path: "/tmp/storlets_src"
state: absent
become: true

@ -1,37 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: create installation directory
shell: mkdir -p {{ engine_install_dir }} creates={{ engine_install_dir }}
- name: copy install script
copy: src="{{ item }}" dest={{ engine_install_dir }}
with_items:
- ../../common_files/swift_config.py
- name: Copy middleware conf template
template:
src: ../../common_templates/swift_middleware_conf
dest: "{{ engine_install_dir }}"
owner: root
mode: 0644
- name: configure swift
shell: chdir={{ engine_install_dir }}
python swift_config.py remove swift_middleware_conf "{{ swift_run_time_user }}"
- name: restart swift
shell: swift-init --run-dir="{{swift_run_time_dir}}" all restart

@ -1,21 +0,0 @@
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: test os version
assert:
that:
- "ansible_distribution == 'Ubuntu'"
- "ansible_distribution_version >= '14.04'"

@ -1,66 +0,0 @@
#! /usr/bin/python
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
class Inventory(object):
'''
Ansible inventory , generated from config file
'''
def __init__(self, fname):
self.__load_config__(fname)
def __load_config__(self, name):
with open(name) as f:
self.conf = json.loads(f.read())
def show_list(self):
g = {}
for group in ['storlet-mgmt', 'storlet-proxy', 'storlet-storage',
'docker']:
g[group] = dict()
g[group]['hosts'] = self.conf['groups'][group]
g[group]['vars'] = dict()
g[group]['vars'].update(self.conf['all'])
return g
def show_host(self, name):
res = dict()
res['ansible_ssh_user'] = self.conf['all']['ansible_ssh_user']
return res
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--list', action='store_true')
parser.add_argument('--host')
args = parser.parse_args()
inventory = Inventory('deploy/cluster_config.json')
out = {}
if args.list:
out = inventory.show_list()
if args.host:
out = inventory.show_host(args.host)
print(json.dumps(out))
if __name__ == '__main__':
main()

@ -1,57 +0,0 @@
{
"groups" : {
"storlet-mgmt": {{ mgmt_group | to_json }},
"storlet-proxy": {{ proxy_group | to_json }},
"storlet-storage": {{ storage_group | to_json }},
"docker": {{ docker_group | to_json }}
},
"all" : {
"ansible_ssh_user" : "{{ ansible_ssh_user }}",
"docker_device": "{{ docker_device }}",
"storlet_source_dir": "{{ repo_root }}",
"python_dist_packages_dir": "usr/local/lib/python2.7/dist-packages",
"storlet_gateway_conf_file": "/etc/swift/storlet_docker_gateway.conf",
"keystone_endpoint_host": "{{ keystone_endpoint_host }}",
"keystone_public_url": "http://{{ keystone_endpoint_host }}/identity/v3",
"keystone_admin_url": "http://{{ keystone_endpoint_host }}/identity_admin",
"keystone_admin_password": "{{ keystone_admin_password }}",
"keystone_admin_user": "{{keystone_admin_user}}",
"keystone_admin_project": "{{keystone_admin_project}}",
"keystone_default_domain": "{{keystone_default_domain}}",
"keystone_auth_version": "3",
"swift_endpoint_host": "{{ swift_endpoint_host }}",
"swift_run_time_user" : "{{ swift_run_time_user }}",
"swift_run_time_group" : "{{ swift_run_time_group }}",
"swift_run_time_dir": "{{ swift_run_time_dir }}",
"storlets_management_user": "{{ storlets_management_user }}",
"storlet_management_account": "{{ storlet_management_account }}",
"storlet_management_admin_username": "{{ storlet_management_admin_username }}",
"storlet_manager_admin_password": "{{ storlet_manager_admin_password }}",
"storlet_management_swift_topology_container": "swift_cluster",
"storlet_management_swift_topology_object": "cluster_config.json",
"storlet_management_ansible_dir": "/opt/ibm/ansible/playbook",
"storlet_management_install_dir": "/opt/ibm",
"storlets_enabled_attribute_name": "storlet-enabled",
"docker_registry_random_string": "ABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJ1234",
"docker_registry_port": "5001",
"storlets_default_project_name": "{{ storlets_default_project_name }}",
"storlets_default_project_user_name": "{{ storlets_default_project_user_name }}",
"storlets_default_project_user_password": "{{ storlets_default_project_user_password }}",
"storlets_default_project_member_user" : "tester_member",
"storlets_default_project_member_password" : "member",
"base_image_maintainer": "root",
"base_os_image": "ubuntu_16.04",
"storlets_image_name_suffix": "ubuntu_16.04_jre8_storlets",
"swift_user_id": "1003",
"swift_group_id": "1003",
"storlet_middleware": "storlet_handler",
"storlet_container": "storlet",
"storlet_dependency": "dependency",
"storlet_log": "storletlog",
"storlet_images": "docker_images",
"storlet_timeout": "40",
"storlet_gateway_module": "docker",
"storlet_execute_on_proxy_only": "false",
"restart_linux_container_timeout": "3"
}
}

@ -1,83 +0,0 @@
#!/bin/bash
set -eu
# Invokes a devstack install that consists of
# keyastone and swift.
if [ "$#" -ne 2 ]; then
echo "Usage: $0 [target] [ip]"
echo "target = host | docker"
exit
fi
TARGET=$1
if [ "$TARGET" != "host" ] && [ "$TARGET" != "docker" ]; then
echo "target must be either \"host\" or \"docker\""
exit 1
fi
SWIFT_IP=$2
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
DEVSTACK_DIR=~/devstack
# checkout devstack, run it and add fstab entry
if [ ! -e $DEVSTACK_DIR ]; then
git clone git://github.com/openstack-dev/devstack.git $DEVSTACK_DIR
cp $DIR/localrc.sample $DEVSTACK_DIR/localrc
sed -i 's/<set ip!>/'$SWIFT_IP'/g' $DEVSTACK_DIR/localrc
sed -i 's/<set db password!>/admin/g' $DEVSTACK_DIR/localrc
fi
# run devstack
cd $DEVSTACK_DIR
# This is an ugly hack to overcome
# devstack installation problem in docker
# TODO(eranr): address this after
# adding a devstack plugin to storlets!
if [ "$TARGET" == "docker" ]; then
set +e
./stack.sh
sudo service mysql start
set -e
fi
./stack.sh
# stack.sh starts swift in a non-standard manner
# we thus stop it before continuing
set +u
source functions
source lib/swift
stop_swift
set -u
cd -
# Make sure all keystone services are up.
/usr/local/bin/uwsgi /etc/keystone/keystone-uwsgi-public.ini &> /dev/null &
/usr/local/bin/uwsgi /etc/keystone/keystone-uwsgi-admin.ini &> /dev/null &
# add tester, testing, test which is admin
source $DEVSTACK_DIR/localrc
project_test_created=$(openstack project list | grep -w $SWIFT_DEFAULT_PROJECT | wc -l)
if [ $project_test_created -eq 0 ]; then
openstack project create $SWIFT_DEFAULT_PROJECT
fi
user_tester_created=$(openstack user list | grep -w $SWIFT_DEFAULT_USER | wc -l)
if [ $user_tester_created -eq 0 ]; then
openstack user create --project $SWIFT_DEFAULT_PROJECT --password $SWIFT_DEFAULT_USER_PWD $SWIFT_DEFAULT_USER
openstack role add --user $SWIFT_DEFAULT_USER --project $SWIFT_DEFAULT_PROJECT admin
fi
user_member_created=$(openstack user list | grep -w $SWIFT_MEMBER_USER | wc -l)
if [ $user_member_created -eq 0 ]; then
openstack user create --project $SWIFT_DEFAULT_PROJECT --password $SWIFT_MEMBER_USER_PWD $SWIFT_MEMBER_USER
openstack role add --user $SWIFT_MEMBER_USER --project $SWIFT_DEFAULT_PROJECT _member_
fi
# add entry to fstab
mount_added=$(grep swift.img /etc/fstab | wc -l)
if [ $mount_added -eq 0 ]; then
sudo sh -c 'echo "/opt/stack/data/swift/drives/images/swift.img /opt/stack/data/swift/drives/sdb1 xfs loop" >> /etc/fstab'
fi
set +eu

@ -1,35 +0,0 @@
ENABLE_HTTPD_MOD_WSGI_SERVICES=False
KEYSTONE_IP=<set ip!>
SWIFT_IP=<set ip!>
ENABLED_SERVICES=key,swift,mysql
ADMIN_USER=admin
ADMIN_PASSWORD=admin
ADMIN_PROJECT=admin
MYSQL_PASSWORD=<set db password!>
RABBIT_PASSWORD=$ADMIN_PASSWORD
SERVICE_PASSWORD=$ADMIN_PASSWORD
export OS_IDENTITY_API_VERSION=3
export OS_AUTH_URL="http://$KEYSTONE_IP/identity_admin"
export OS_USERNAME=$ADMIN_USER
export OS_USER_DOMAIN_ID=default
export OS_PASSWORD=$ADMIN_PASSWORD
export OS_PROJECT_NAME=$ADMIN_USER
export OS_PROJECT_DOMAIN_ID=default
export OS_REGION_NAME=RegionOne
HOST_IP=$SWIFT_IP
SWIFT_SERVICE_PROTOCOL=http
SWIFT_DEFAULT_BIND_PORT=8080
# service local host is used for ring building
SWIFT_SERVICE_LOCAL_HOST=$HOST_IP
# service listen address for prox
SWIFT_SERVICE_LISTEN_ADDRESS=$HOST_IP
SWIFT_LOOPBACK_DISK_SIZE=20G
SWIFT_MAX_FILE_SIZE=5368709122
SWIFT_HASH=1234567890
SWIFT_DEFAULT_PROJECT=test
SWIFT_DEFAULT_USER=tester
SWIFT_DEFAULT_USER_PWD=testing
SWIFT_MEMBER_USER=tester_member
SWIFT_MEMBER_USER_PWD=member

234
s2aio.sh

@ -1,182 +1,124 @@
#!/bin/bash
# s2aio controls an all in one installation of swift, keystone and storlets
# s2aio has 3 sub commands: install, start and stop
# install would install from scratch an all in one swift with the storlet engine.
# the installation has two flavors:
# 1. Jenkins job installation, for running the funciotal tests.
# 2. Developer instalation.
# start and stop are currently supported only for the host flavor.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -e
REPO_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
DEVSTACK_DIR=~/devstack
SWIFT_IP=127.0.0.1
KEYSTONE_IP=$SWIFT_IP
KEYSTONE_PROTOCOL=http
KEYSTONE_PUBLIC_URL=${KEYSTONE_PROTOCOL}://${KEYSTONE_IP}/identity/v3
IDENTITY_API_VERSION=3
SWIFT_DATA_DIR=/opt/stack/data/swift
usage() {
echo "Usage: s2aio.sh install <flavor> <target>"
echo "Usage: s2aio.sh install"
echo " s2aio.sh start"
echo " s2aio.sh stop"
echo "flavor = jenkins | dev"
echo "target = host | docker"
exit 1
}
_prepare_devstack_env() {
# Checkout devstack
if [ ! -e $DEVSTACK_DIR ]; then
git clone git://github.com/openstack-dev/devstack.git $DEVSTACK_DIR
cp devstack/localrc.sample $DEVSTACK_DIR/localrc
fi
source $DEVSTACK_DIR/functions
source $DEVSTACK_DIR/functions-common
source $DEVSTACK_DIR/lib/swift
source devstack/plugin.sh
}
start_s2aio() {
set -e
swift-init --run-dir /opt/stack/data/swift/run/ all start
sudo mkdir -p /var/run/uwsgi
sudo chown ${USER}:`id -g -n ${USER}` /var/run/uwsgi
swift-init --run-dir ${SWIFT_DATA_DIR}/run/ all start
/usr/local/bin/uwsgi /etc/keystone/keystone-uwsgi-public.ini &> /dev/null &
/usr/local/bin/uwsgi /etc/keystone/keystone-uwsgi-admin.ini &> /dev/null &
exit 0
}
stop_s2aio() {
sh -c 'swift-init --run-dir /opt/stack/data/swift/run/ all stop'
_stop_s2aio() {
set +e
swift-init --run-dir ${SWIFT_DATA_DIR}/run/ all stop
sh -c 'ps aux | pgrep uwsgi | xargs kill -9'
set -e
}
stop_s2aio() {
_stop_s2aio
exit 0
}
install_docker() {
sudo apt-get install apt-transport-https aufs-tools linux-image-generic-lts-xenial -y --force-yes
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
sudo sh -c "echo deb https://apt.dockerproject.org/repo ubuntu-xenial main > /etc/apt/sources.list.d/docker.list"
sudo apt-get update
sudo apt-get install docker-engine -y --force-yes
sudo sh -c "echo DOCKER_OPTS=\"--storage-driver=vfs\" >> /etc/default/docker"
sudo service docker restart
}
install_swift_on_container() {
# run the swift docker container
S2AIO_RUNNING=`sudo docker ps | grep -c s2aio`
S2AIO_EXISTS=`sudo docker ps -a | grep -c s2aio`
if [ "$S2AIO_RUNNING" == "0" ]; then
if [ "$S2AIO_EXISTS" == "1" ]; then
sudo docker rm s2aio
fi
sudo docker run -i -d --privileged=true --name s2aio -t ubuntu:14.04
fi
export S2AIO_IP=`sudo docker exec s2aio ifconfig | grep "inet addr" | head -1 | awk '{print $2}' | awk -F":" '{print $2}'`
# Take care of host key verification
touch ~/.ssh/known_hosts
ssh-keygen -R $S2AIO_IP -f ~/.ssh/known_hosts
ssh-keyscan -H $S2AIO_IP >> ~/.ssh/known_hosts
sudo docker exec s2aio sh -c "echo deb http://us.archive.ubuntu.com/ubuntu/ xenial-backports main restricted universe multiverse >> /etc/apt/sources.list"
sudo docker exec s2aio apt-get update
sudo docker exec s2aio apt-get install software-properties-common -y --force-yes
sudo docker exec s2aio apt-add-repository -y ppa:ansible/ansible
sudo docker exec s2aio apt-get update
sudo docker exec s2aio apt-get install openssh-server git ansible -y --force-yes
sudo docker exec s2aio service ssh start
# Add the key to the user's authorized keys
sudo docker exec s2aio mkdir -p /root/.ssh
sudo docker exec s2aio bash -c "echo `cat ~/.ssh/id_rsa.pub` > /root/.ssh/authorized_keys"
sudo docker exec s2aio useradd stack
sudo docker exec s2aio mkdir /home/stack
sudo docker exec s2aio bash -c 'grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers ||\
echo "#includedir /etc/sudoers.d" >> /etc/sudoers'
sudo docker exec s2aio bash -c '( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" >\
/etc/sudoers.d/50_stack_sh )'
sudo docker cp install/swift/install_swift.sh s2aio:/home/stack/install_swift.sh
sudo docker cp install/swift/localrc.sample s2aio:/home/stack/localrc.sample
sudo docker exec s2aio chown -R stack:stack /home/stack
sudo docker exec --user stack s2aio chmod -R 0755 /home/stack
sudo docker exec --user stack s2aio /home/stack/install_swift.sh docker $S2AIO_IP
sudo docker exec s2aio service rsyslog restart
}
install_swift_on_host() {
export S2AIO_IP='127.0.0.1'
# Add the key to the user's authorized keys
grep -s -F ${USER} ~/.ssh/authorized_keys || cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
# Take care of host key verification for the current user
if [ -f ~/.ssh/known_hosts ]; then
ssh-keygen -R localhost -f ~/.ssh/known_hosts
fi
ssh-keyscan -H localhost >> ~/.ssh/known_hosts
ssh-keyscan -H 127.0.0.1 >> ~/.ssh/known_hosts
# Allow Ansible to ssh locally as root without a password
sudo mkdir -p /root/.ssh
sudo grep -s -F ${USER} /root/.ssh/authorized_keys || sudo sh -c 'cat ~/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys'
sudo sh -c 'echo "" >> /etc/ssh/sshd_config'
sudo sh -c 'echo "# allow ansible connections from local host" >> /etc/ssh/sshd_config'
sudo sh -c 'echo "Match Address 127.0.0.1" >> /etc/ssh/sshd_config'
sudo sh -c 'echo "\tPermitRootLogin without-password" >> /etc/ssh/sshd_config'
sudo service ssh restart
# Install Swift
cd install/swift
./install_swift.sh host $S2AIO_IP
cd -
}
install_storlets() {
install/storlets/prepare_storlets_install.sh "$FLAVOR" "$TARGET"
# Install Storlets
cd install/storlets
./install_storlets.sh
install_swift_using_devstack() {
cd $DEVSTACK_DIR
./stack.sh
stop_swift
cd -
# TODO: this is for tests. Deal accordingly.
cp install/storlets/deploy/cluster_config.json .
sudo chown $USER:$USER cluster_config.json
# add entry to fstab
mount_added=$(grep swift.img /etc/fstab | wc -l)
if [ $mount_added -eq 0 ]; then
sudo sh -c 'echo "/opt/stack/data/swift/drives/images/swift.img /opt/stack/data/swift/drives/sdb1 xfs loop" >> /etc/fstab'
fi
}
install_s2aio() {
# Make sure hostname is resolvable
grep -q -F ${HOSTNAME} /etc/hosts || sudo sed -i '1i127.0.0.1\t'"$HOSTNAME"'' /etc/hosts
install/install_ansible.sh
# Allow Ansible to ssh as the current user without a password
# While at it, take care of host key verification.
# This involves:
# 1. Generate an rsa key for the current user if necessary
if [ ! -f ~/.ssh/id_rsa.pub ]; then
ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N ""
fi
if [ "$TARGET" == "docker" ]; then
install_docker
install_swift_on_container
else
install_swift_on_host
fi
_prepare_devstack_env
install_swift_using_devstack
install_storlets
echo "export OS_IDENTITY_API_VERSION=3" >> ~/.bashrc
echo "export OS_USERNAME=tester; export OS_PASSWORD=testing" >> ~/.bashrc
echo "export OS_PROJECT_NAME=test; OS_DEFAULT_DOMAIN=default" >> ~/.bashrc
echo "export OS_AUTH_URL=http://"$S2AIO_IP":5000/v3" >> ~/.bashrc
echo "export OS_IDENTITY_API_VERSION=$KEYSTONE_IDENTITY_API_VERSION" >> ~/.bashrc
echo "export OS_USERNAME=$SWIFT_DEFAULT_USER; export OS_PASSWORD=$SWIFT_DEFAULT_USER_PWD" >> ~/.bashrc
echo "export OS_PROJECT_NAME=$SWIFT_DEFAULT_PROJECT; export OS_DEFAULT_DOMAIN=default" >> ~/.bashrc
echo "export OS_AUTH_URL=$KEYSTONE_PUBLIC_URL" >> ~/.bashrc
}
set -eu
if [ "$#" -ne 1 ] && [ "$#" -ne 3 ]; then
usage
fi
uninstall_swift_using_devstack() {
_stop_s2aio
cd $DEVSTACK_DIR
./unstack.sh
cd -
echo "Removing swift device mount, creating /etc/fstab.bak"
sudo sed -i.bak '/swift.img/d' /etc/fstab
}
uninstall_s2aio() {
_prepare_devstack_env
echo "Removing all storlets run time data"
uninstall_storlets
echo "Uninstalling Swift"
uninstall_swift_using_devstack
}
COMMAND="$1"
case $COMMAND in
"install" )
if [ "$#" -ne 3 ]; then
usage
fi
FLAVOR="$2"
if [ "$FLAVOR" != "jenkins" ] && [ "$FLAVOR" != "dev" ]; then
echo "flavor must be either \"jenkins\" or \"dev\""
exit 1
fi
TARGET="$3"
if [ "$TARGET" != "host" ] && [ "$TARGET" != "docker" ]; then
echo "target must be either \"host\" or \"docker\""
exit 1
fi
install_s2aio
;;
"uninstall" )
uninstall_s2aio
;;
"start" )
start_s2aio
;;
@ -188,8 +130,4 @@ case $COMMAND in
usage
esac
echo "export OS_IDENTITY_API_VERSION=3" >> ~/.bashrc
echo "export OS_USERNAME=tester; export OS_PASSWORD=testing" >> ~/.bashrc
echo "export OS_PROJECT_NAME=test; OS_DEFAULT_DOMAIN=default" >> ~/.bashrc
echo "export OS_AUTH_URL=http://"$S2AIO_IP"/identity/v3" >> ~/.bashrc
set +eu
set +e

@ -1,14 +0,0 @@
#! /bin/bash
# This script deploys storlets over existing swift cluster.
# Make sure we have ansible
install/install_ansible.sh
# Prepare the storlets installation
install/storlets/prepare_storlets_install.sh deploy
# Deploy storlets
cd install/storlets
./install_storlets.sh deploy
cd -

@ -34,8 +34,6 @@
dest="dependencies/slf4j-api-1.7.7.jar"
verbose="true"
usetimestamp="true"/>
<copy file="../../install/storlets/roles/docker_storlet_engine_image/files/logback.xml"
tofile="dependencies/logback.xml"/>
</target>
<!-- Install build Dependencies-->
<target name="install_dependencies" depends="dependencies">

@ -234,7 +234,7 @@ class RunTimeSandbox(object):
self.sandbox_wait_timeout = \
int(conf.get('restart_linux_container_timeout', 3))
self.docker_repo = conf.get('docker_repo', 'localhost:5001')
self.docker_repo = conf.get('docker_repo', '')
self.docker_image_name_prefix = 'tenant'
# TODO(add line in conf)

@ -14,25 +14,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
'''
*VERY* initial cluster configuration file
The intention is to have this as the single source
for all cluster information needs such as:
- Swift Install
- Storlets Install
- Tests
- Deploy storlets tools
- etc.
'''
import json
import ConfigParser
class ClusterConfig(object):
def __init__(self, config_path):
with open(config_path, 'r') as f:
conf_string = f.read()
self.conf = json.loads(conf_string)
config = ConfigParser.ConfigParser()
config.read(config_path)
options = config.options('general')
self.conf = {}
for option in options:
self.conf[option] = config.get('general', option)
self._auth_version = '3'
# TODO(eran) get rid of returning raw conf
@ -41,35 +34,35 @@ class ClusterConfig(object):
@property
def domain_name(self):
return self.conf['all']['keystone_default_domain']
return self.conf['keystone_default_domain']
@property
def auth_uri(self):
return self.conf['all']['keystone_public_url']
return self.conf['keystone_public_url']
@property
def project_name(self):
return self.conf['all']['storlets_default_project_name']
return self.conf['storlets_default_project_name']
@property
def admin_user(self):
return self.conf['all']['storlets_default_project_user_name']
return self.conf['storlets_default_project_user_name']
@property
def admin_password(self):
return self.conf['all']['storlets_default_project_user_password']
return self.conf['storlets_default_project_user_password']
@property
def member_user(self):
return self.conf['all']['storlets_default_project_member_user']
return self.conf['storlets_default_project_member_user']
@property
def member_password(self):
return self.conf['all']['storlets_default_project_member_password']
return self.conf['storlets_default_project_member_password']
@property
def region(self):
return self.conf['all'].get('region', '')
return self.conf.get('region', '')
# TODO(eranr) move to cluster_config
@property

@ -23,7 +23,7 @@ from storlets.tools.utils import deploy_storlet, get_admin_auth, put_local_file
import os
CONFIG_DIR = os.environ.get('CLUSTER_CONF_DIR', os.getcwd())
CONFIG_FILE = os.path.join(CONFIG_DIR, 'cluster_config.json')
CONFIG_FILE = os.path.join(CONFIG_DIR, 'test.conf')
PATH_TO_STORLETS = os.environ.get(
'STORLET_SAMPLE_PATH',
# assuming, current working dir is at top of storlet repo

@ -1,2 +1,2 @@
#!/bin/bash
./s2aio.sh install jenkins host
./s2aio.sh install

13
tox.ini

@ -72,18 +72,6 @@ commands =
--exclude-dir .git \
{toxinidir} | xargs bashate --error . --verbose --ignore=E003,E006"
[testenv:ansible-lint]
# Perform an Ansible lint check
# Ignores the following rules:
# ANSIBLE0011: All tasks should be named
# ANSIBLE0012: Commands should not change things if nothing needs doing
# ANSIBLE0013: Use shell only when shell functionality is required
commands =
bash -c "ansible-lint -x ANSIBLE0011,ANSIBLE0012,ANSIBLE0013 \
{toxinidir}/install/*/*.yml"
passenv =
HOME
[flake8]
# E123, E125 skipped as they are invalid PEP-8.
# H301: one import per line
@ -100,6 +88,5 @@ basepython = python2.7
commands =
{[testenv:pep8python]commands}
{[testenv:bashate]commands}
{[testenv:ansible-lint]commands}
passenv =
HOME