Towards new installation: Deploy/playbook -> install/storlets

Change-Id: Ic549c94453da3325e4dce37a9dff6b5fb671792f
This commit is contained in:
Eran Rom 2016-04-03 14:01:15 +03:00
parent f266cbb20b
commit 6dcd6c511f
68 changed files with 93 additions and 16 deletions

View File

@ -100,7 +100,7 @@
(2) hosts file configured
-->
<target name="deploy_host_engine" depends="build_engine">
<exec executable="ansible-playbook" dir="Deploy/playbook" failonerror="true">
<exec executable="ansible-playbook" dir="install/storlets" failonerror="true">
<arg value="-s"/>
<arg value="-i"/>
<arg value="deploy/hosts"/>
@ -109,7 +109,7 @@
</target>
<target name="deploy_container_engine" depends="build_engine">
<exec executable="ansible-playbook" dir="Deploy/playbook" failonerror="true">
<exec executable="ansible-playbook" dir="install/storlets" failonerror="true">
<arg value="-s"/>
<arg value="-i"/>
<arg value="deploy/hosts"/>

View File

@ -34,8 +34,8 @@ Deploying
Two additional tasks of interest in our build.xml are the deploy_host_engine and deploy_container_engine. These tasks are based on the Ansible installation scripts and do the following:
#. deploy_host_engine would get all the code that is relevant to the host side (python middleware and SBus) and deploy it on the hosts, as described in Deploy/playbook/hosts file
#. deploy_container_engine, would create an updated image of the tenant defined in Deploy/playbook/common.yml and distribute it to all nodes as defined in Deploy/playbook/hosts. Typically, the hosts file will describe an all-in-one type of installation.
#. deploy_host_engine would get all the code that is relevant to the host side (python middleware and SBus) and deploy it on the hosts, as described in install/storlets/hosts file
#. deploy_container_engine, would create an updated image of the tenant defined in install/storlets/common.yml and distribute it to all nodes as defined in install/storlets/hosts. Typically, the hosts file will describe an all-in-one type of installation.
Running the Tests
=================

View File

@ -25,7 +25,7 @@ The installation scripts take two input files:
#. storlet-storage. The list of the Swift cluster object servers
#. root or a sudoer credentials Ansible can use to ssh the machines. In the below example we assume all nodes have the same credentials.
#. An Ansible var file with various inputs, such as the Keystone IP and credentials, the Storlet management account information, etc. The file is located in Deploy/playbook/common.yml, and we give below the entries of interest that may need editing.
#. An Ansible var file with various inputs, such as the Keystone IP and credentials, the Storlet management account information, etc. The file is located in install/storlets/common.yml, and we give below the entries of interest that may need editing.
At a high level the installation consists of the following steps:
@ -113,9 +113,9 @@ Install
to perform the installation follow these steps:
#. Create a hosts file as described above
#. Edit the file Deploy/playbook/common.yml according to the above
#. Edit the file install/storlets/common.yml according to the above
#. Under the root dir of the repo run 'ant build'
#. Under Deploy/playbook/ run 'ansible-playbook -i <hosts file> storlet.yml'
#. Under install/storlets run 'ansible-playbook -i <hosts file> storlet.yml'
in case the hosts file has credentials of a sudoer user, you will need to run: 'ansible-playbook -s -i <hosts file> storlet.yml'
Tip: you might want to "export ANSIBLE_HOST_KEY_CHECKING=False" before running the playbook in case the hosts are not in known_hosts.

View File

@ -0,0 +1,75 @@
{
"groups" : {
"keystone": [ "127.0.0.1" ],
"swift-proxy": [ "127.0.0.1" ],
"swift-md": [ "127.0.0.1" ],
"swift-object": [ "10.20.0.1" , "10.20.0.3" ],
"swift-ring-builder": [ "127.0.0.1" ]
},
"127.0.0.1" : {
"rings_info": {
"ip" : "127.0.0.1",
"zone": "1",
"region": "1"
},
"swift_devices": {
"object_devices": [
{ "name" : "sda", "weight": "100", "fs":"xfs", "port": "6000" }
],
"container_devices": [
{ "name" : "sda", "weight": "100", "fs":"xfs", "port": "6001" }
],
"account_devices" : [
{ "name" : "sda", "weight": "100", "fs":"xfs", "port": "6002" }
]
},
"ansible_ssh_user" : "root",
"internal_ip" : "127.0.0.1"
},
"keystone" : {
"vars": {
"keystone_endpoint_host": "127.0.0.1",
"keystone_internal_url": "http://127.0.0.1:5000/v2.0",
"keystone_admin_url": "http://127.0.0.1:35357/v2.0",
"keystone_public_url": "http://127.0.0.1:5000/v2.0",
"keystone_admin_token": "ADMIN",
"swift_identity_password": "passw0rd",
"openstack_region" : "1",
"swift_public_endpoint": "127.0.0.1",
"swift_management_endpoint": "127.0.0.1",
"swift_internal_endpoint": "127.0.0.1",
"proxy_port": "80"
}
},
"swift": {
"vars": {
"swift_hash_path_prefix": "d55ca1881f1e09b1",
"swift_hash_path_suffix": "a3f3c381c916a198",
"log_swift_statsd": true,
"openstack_version": "liberty",
"installation_source": "git",
"swift_git": "https://github.com/openstack/swift.git",
"swift_git_dir": "/tmp/git/swift",
"swift_git_tag": "2.7.0"
}
},
"swift-ring-builder": {
"ring_builder" : {
"account": {
"min_part_hours": "1",
"part_power": "18",
"replicas": "1"
},
"object": {
"min_part_hours": "1",
"part_power": "18",
"replicas": "1"
},
"container": {
"min_part_hours": "1",
"part_power": "18",
"replicas": "1"
}
}
}
}

View File

@ -21,29 +21,31 @@ ant build
ssh-keygen -q -t rsa -f /home/$USER/.ssh/id_rsa -N ""
cp /home/$USER/.ssh/id_rsa.pub /home/$USER/.ssh/authorized_keys
# Install Swift
ansible-playbook -s -i tests/swift_install/hosts tests/swift_install/swift_install.yml
cd /tmp/swift_install/swift-install
sudo sed -i 's/<Set Me!>/'$USER'/g' localhost_config.json
ansible-playbook -s -i inventory/vagrant/localhost_dynamic_inventory.py main-install.yml
# Install Storlets
cd -
sudo mkdir Deploy/playbook/deploy
sudo mkdir install/storlets/deploy
echo "Copying vars and hosts file to deploy directory"
sudo cp Deploy/playbook/common.yml-sample Deploy/playbook/deploy/common.yml
sudo cp Deploy/playbook/hosts-sample Deploy/playbook/deploy/hosts
sudo chown -R $USER:$USER Deploy/playbook/deploy
sed -i 's/<Set Me!>/127.0.0.1/g' Deploy/playbook/deploy/common.yml
sed -i 's/<Set Me!>/'$USER'/g' Deploy/playbook/deploy/hosts
sed -i '/ansible_ssh_pass/d' Deploy/playbook/deploy/hosts
sudo cp install/storlets/common.yml-sample install/storlets/deploy/common.yml
sudo cp install/storlets/hosts-sample install/storlets/deploy/hosts
sudo chown -R $USER:$USER install/storlets/deploy
sed -i 's/<Set Me!>/127.0.0.1/g' install/storlets/deploy/common.yml
sed -i 's/<Set Me!>/'$USER'/g' install/storlets/deploy/hosts
sed -i '/ansible_ssh_pass/d' install/storlets/deploy/hosts
# If no arguments are supplied, assume we are under jenkins job, and
# we need to edit common.yml to set the appropriate source dir
if [ -z "$1" ]
then
sed -i 's/~\/storlets/\/home\/'$USER'\/workspace\/gate-storlets-functional\//g' Deploy/playbook/deploy/common.yml
sed -i 's/~\/storlets/\/home\/'$USER'\/workspace\/gate-storlets-functional\//g' install/storlets/deploy/common.yml
fi
cd Deploy/playbook
cd install/storlets
echo "Running hosts cluster_check playbook"
ansible-playbook -s -i deploy/hosts cluster_check.yml
echo "Running docker_repository playbook"