add tests/fix bug eternus volume/fix readme

This commit is contained in:
marco 2016-01-25 23:49:50 +01:00
parent 017204cb52
commit daa52fa325
17 changed files with 545 additions and 10 deletions

4
.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
tests/build/
*.swp
*.pyc
.ropeproject

View File

@ -202,7 +202,7 @@ Cinder setup with Hitachi VPS
enabled: true
backend:
hus100_backend:
name: HUS100
type_name: HUS100
backend: hus100_backend
engine: hitachi_vsp
connection: FC
@ -265,6 +265,7 @@ Cinder setup with Fujitsu Eternus
user: username
password: pass
connection: FC/iSCSI
name: 10kThinPro
10k_SAS:
type_name: 10k_SAS
pool: SAS10K
@ -274,6 +275,7 @@ Cinder setup with Fujitsu Eternus
user: username
password: pass
connection: FC/iSCSI
name: 10k_SAS
Cinder setup with IBM GPFS filesystem
@ -287,7 +289,7 @@ Cinder setup with IBM GPFS filesystem
type_name: GPFS-GOLD
engine: gpfs
mount_point: '/mnt/gpfs-openstack/cinder/gold'
GPFS-SILVER
GPFS-SILVER:
type_name: GPFS-SILVER
engine: gpfs
mount_point: '/mnt/gpfs-openstack/cinder/silver'
@ -315,7 +317,7 @@ Extra parameters for HP LeftHand
cinder type-key normal-storage set hplh:data_pl=r-10-2 hplh:provisioning=full
Cinder setup with HP LeftHand
Cinder setup with Solidfire
.. code-block:: yaml

View File

@ -2,4 +2,4 @@
[{{ backend_name }}]
volume_backend_name={{ backend_name }}
volume_driver=cinder.volume.drivers.fujitsu.fujitsu_eternus_dx_fc.FJDXFCDriver
cinder_eternus_config_file=/etc/cinder/cinder_fujitsu_eternus_dx_{{ backend.backend }}.xml
cinder_eternus_config_file=/etc/cinder/cinder_fujitsu_eternus_dx_{{ backend_name }}.xml

View File

@ -109,13 +109,11 @@ hp3parclient:
{%- if backend.engine == 'fujitsu' %}
cinder_driver_fujitsu:
cinder_driver_fujitsu_{{ loop.index }}:
pkg.latest:
- name: cinder-driver-fujitsu
{%- for backend_name, backend in volume.get('backend', {}).iteritems() %}
/etc/cinder/cinder_fujitsu_eternus_dx_{{ backend.name }}.xml:
/etc/cinder/cinder_fujitsu_eternus_dx_{{ backend_name }}.xml:
file.managed:
- source: salt://cinder/files/{{ volume.version }}/cinder_fujitsu_eternus_dx.xml
- template: jinja
@ -124,8 +122,6 @@ cinder_driver_fujitsu:
- require:
- pkg: cinder-driver-fujitsu
{%- endfor %}
{%- endif %}
{%- endfor %}

3
metadata.yml Normal file
View File

@ -0,0 +1,3 @@
name: "cinder"
version: "0.2"
source: "https://github.com/tcpcloud/salt-formula-cinder"

View File

@ -0,0 +1,25 @@
cinder:
controller:
enabled: true
version: liberty
backend:
ceph_backend:
type_name: standard-iops
backend: ceph_backend
pool: volumes
engine: ceph
user: cinder
secret_uuid: password
client_cinder_key: password
volume:
enabled: true
version: liberty
backend:
ceph_backend:
type_name: standard-iops
backend: ceph_backend
pool: volumes
engine: ceph
user: cinder
secret_uuid: password
client_cinder_key: password

View File

@ -0,0 +1,31 @@
cinder:
controller:
enabled: true
version: liberty
osapi:
host: 127.0.0.1
database:
engine: mysql
host: 127.0.0.1
port: 3306
name: cinder
user: cinder
password: password
identity:
engine: keystone
host: 127.0.0.1
port: 35357
tenant: service
user: cinder
password: password
glance:
host: 127.0.0.1
port: 9292
message_queue:
engine: rabbitmq
host: 127.0.0.1
port: 5672
user: openstack
password: password
virtual_host: '/openstack'
ha_queues: true

View File

@ -0,0 +1,31 @@
cinder:
controller:
enabled: true
version: liberty
osapi:
host: 127.0.0.1
database:
engine: mysql
host: localhost
port: 3306
name: cinder
user: cinder
password: password
identity:
engine: keystone
host: 127.0.0.1
port: 35357
tenant: service
user: cinder
password: password
glance:
host: 127.0.0.1
port: 9292
message_queue:
engine: rabbitmq
host: 127.0.0.1
port: 5672
user: openstack
password: password
virtual_host: '/openstack'
ha_queues: false

View File

@ -0,0 +1,49 @@
cinder:
volume:
enabled: true
version: liberty
backend:
10kThinPro:
type_name: 10kThinPro
engine: fujitsu
pool: 10kThinPro
host: 127.0.0.1
port: 5988
user: username
password: password
connection: FC
name: 10kThinPro
10k_SAS:
type_name: 10k_SAS
pool: SAS10K
engine: fujitsu
host: 127.0.0.1
port: 5988
user: username
password: password
connection: FC
name: 7k2RAID6
controller:
enabled: true
version: liberty
backend:
10kThinPro:
type_name: 10kThinPro
engine: fujitsu
pool: 10kThinPro
host: 127.0.0.1
port: 5988
user: username
password: password
connection: FC
name: 10kThinPro
10k_SAS:
type_name: 10k_SAS
pool: SAS10K
engine: fujitsu
host: 127.0.0.1
port: 5988
user: username
password: password
connection: FC
name: 7k2RAID6

View File

@ -0,0 +1,25 @@
cinder:
volume:
enabled: true
version: liberty
backend:
GPFS-GOLD:
type_name: GPFS-GOLD
engine: gpfs
mount_point: '/mnt/gpfs-openstack/cinder/gold'
GPFS-SILVER:
type_name: GPFS-SILVER
engine: gpfs
mount_point: '/mnt/gpfs-openstack/cinder/silver'
controller:
enabled: true
version: liberty
backend:
GPFS-GOLD:
type_name: GPFS-GOLD
engine: gpfs
mount_point: '/mnt/gpfs-openstack/cinder/gold'
GPFS-SILVER:
type_name: GPFS-SILVER
engine: gpfs
mount_point: '/mnt/gpfs-openstack/cinder/silver'

View File

@ -0,0 +1,34 @@
cinder:
controller:
enabled: true
version: liberty
backend:
hp3par_backend:
type_name: hp3par
backend: hp3par_backend
user: admin
password: password
url: http://localhost/api/v1
cpg: OpenStackCPG
host: localhost
login: admin
sanpassword: password
debug: True
snapcpg: OpenStackSNAPCPG
volume:
enabled: true
version: liberty
backend:
hp3par_backend:
type_name: hp3par
backend: hp3par_backend
user: admin
password: password
url: http://localhost/api/v1
cpg: OpenStackCPG
host: localhost
login: admin
sanpassword: password
debug: True
snapcpg: OpenStackSNAPCPG
engine: hp3par

View File

@ -0,0 +1,25 @@
cinder:
volume:
enabled: true
version: liberty
backend:
HP-LeftHand:
type_name: normal-storage
engine: hp_lefthand
api_url: 'https://127.0.0.1:8081/lhos'
username: username
password: password
clustername: cluster1
iscsi_chap_enabled: false
controller:
enabled: true
version: liberty
backend:
HP-LeftHand:
type_name: normal-storage
engine: hp_lefthand
api_url: 'https://127.0.0.1:8081/lhos'
username: username
password: password
clustername: cluster1
iscsi_chap_enabled: false

View File

@ -0,0 +1,25 @@
cinder:
volume:
enabled: true
version: liberty
backend:
solidfire:
type_name: normal-storage
engine: solidfire
san_ip: 127.0.0.1
san_login: username
san_password: password
clustername: cluster1
sf_emulate_512: false
controller:
enabled: true
version: liberty
backend:
solidfire:
type_name: normal-storage
engine: solidfire
san_ip: 127.0.0.1
san_login: username
san_password: password
clustername: cluster1
sf_emulate_512: false

View File

@ -0,0 +1,75 @@
cinder:
volume:
enabled: true
version: liberty
backend:
7k2_SAS:
engine: storwize
type_name: 7k2_SAS
host: 127.0.0.1
port: 22
user: username
password: password
connection: FC
multihost: true
multipath: true
pool: SAS7K2
10k_SAS:
engine: storwize
type_name: 10k_SAS
host: 127.0.0.1
port: 22
user: username
password: password
connection: FC
multihost: true
multipath: true
pool: SAS10K
15k_SAS:
engine: storwize
type_name: 15k_SAS
host: 127.0.0.1
port: 22
user: username
password: password
connection: FC
multihost: true
multipath: true
pool: SAS15K
controller:
enabled: true
version: liberty
backend:
7k2_SAS:
engine: storwize
type_name: 7k2_SAS
host: 127.0.0.1
port: 22
user: username
password: password
connection: FC
multihost: true
multipath: true
pool: SAS7K2
10k_SAS:
engine: storwize
type_name: 10k_SAS
host: 127.0.0.1
port: 22
user: username
password: password
connection: FC
multihost: true
multipath: true
pool: SAS10K
15k_SAS:
engine: storwize
type_name: 15k_SAS
host: 127.0.0.1
port: 22
user: username
password: password
connection: FC
multihost: true
multipath: true
pool: SAS15K

View File

@ -0,0 +1,31 @@
cinder:
volume:
enabled: true
version: liberty
osapi:
host: 127.0.0.1
database:
engine: mysql
host: 127.0.0.1
port: 3306
name: cinder
user: cinder
password: password
identity:
engine: keystone
host: 127.0.0.1
port: 35357
tenant: service
user: cinder
password: password
glance:
host: 127.0.0.1
port: 9292
message_queue:
engine: rabbitmq
host: 127.0.0.1
port: 5672
user: openstack
password: password
virtual_host: '/openstack'
ha_queues: true

View File

@ -0,0 +1,19 @@
cinder:
controller:
enabled: true
version: liberty
backend:
hus100_backend:
type_name: HUS100
backend: hus100_backend
engine: hitachi_vsp
connection: FC
volume:
enabled: true
version: liberty
backend:
hus100_backend:
type_name: HUS100
backend: hus100_backend
engine: hitachi_vsp
connection: FC

160
tests/run_tests.sh Executable file
View File

@ -0,0 +1,160 @@
#!/usr/bin/env bash
set -e
[ -n "$DEBUG" ] && set -x
CURDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
METADATA=${CURDIR}/../metadata.yml
FORMULA_NAME=$(cat $METADATA | python -c "import sys,yaml; print yaml.load(sys.stdin)['name']")
## Overrideable parameters
PILLARDIR=${PILLARDIR:-${CURDIR}/pillar}
BUILDDIR=${BUILDDIR:-${CURDIR}/build}
VENV_DIR=${VENV_DIR:-${BUILDDIR}/virtualenv}
DEPSDIR=${BUILDDIR}/deps
SALT_FILE_DIR=${SALT_FILE_DIR:-${BUILDDIR}/file_root}
SALT_PILLAR_DIR=${SALT_PILLAR_DIR:-${BUILDDIR}/pillar_root}
SALT_CONFIG_DIR=${SALT_CONFIG_DIR:-${BUILDDIR}/salt}
SALT_CACHE_DIR=${SALT_CACHE_DIR:-${SALT_CONFIG_DIR}/cache}
SALT_OPTS="${SALT_OPTS} --retcode-passthrough --local -c ${SALT_CONFIG_DIR}"
if [ "x${SALT_VERSION}" != "x" ]; then
PIP_SALT_VERSION="==${SALT_VERSION}"
fi
## Functions
log_info() {
echo "[INFO] $*"
}
log_err() {
echo "[ERROR] $*" >&2
}
setup_virtualenv() {
log_info "Setting up Python virtualenv"
virtualenv $VENV_DIR
source ${VENV_DIR}/bin/activate
pip install salt${PIP_SALT_VERSION}
}
setup_pillar() {
[ ! -d ${SALT_PILLAR_DIR} ] && mkdir -p ${SALT_PILLAR_DIR}
echo "base:" > ${SALT_PILLAR_DIR}/top.sls
for pillar in ${PILLARDIR}/*; do
state_name=$(basename ${pillar%.sls})
echo -e " ${state_name}:\n - ${state_name}" >> ${SALT_PILLAR_DIR}/top.sls
done
}
setup_salt() {
[ ! -d ${SALT_FILE_DIR} ] && mkdir -p ${SALT_FILE_DIR}
[ ! -d ${SALT_CONFIG_DIR} ] && mkdir -p ${SALT_CONFIG_DIR}
[ ! -d ${SALT_CACHE_DIR} ] && mkdir -p ${SALT_CACHE_DIR}
echo "base:" > ${SALT_FILE_DIR}/top.sls
for pillar in ${PILLARDIR}/*.sls; do
state_name=$(basename ${pillar%.sls})
echo -e " ${state_name}:\n - ${FORMULA_NAME}" >> ${SALT_FILE_DIR}/top.sls
done
cat << EOF > ${SALT_CONFIG_DIR}/minion
file_client: local
cachedir: ${SALT_CACHE_DIR}
verify_env: False
file_roots:
base:
- ${SALT_FILE_DIR}
- ${CURDIR}/..
pillar_roots:
base:
- ${SALT_PILLAR_DIR}
- ${PILLARDIR}
EOF
}
fetch_dependency() {
dep_root="${DEPSDIR}/$(basename $1 .git)"
dep_metadata="${dep_root}/metadata.yml"
[ -d $dep_root ] && log_info "Dependency $1 already fetched" && return 0
log_info "Fetching dependency $1"
[ ! -d ${DEPSDIR} ] && mkdir -p ${DEPSDIR}
git clone $1 ${DEPSDIR}/$(basename $1 .git)
dep_name=$(cat $dep_metadata | python -c "import sys,yaml; print yaml.load(sys.stdin)['name']")
ln -s ${dep_root}/${dep_name} ${SALT_FILE_DIR}/${dep_name}
METADATA="${dep_metadata}" install_dependencies
}
install_dependencies() {
grep -E "^dependencies:" ${METADATA} >/dev/null || return 0
(python - | while read dep; do fetch_dependency "$dep"; done) << EOF
import sys,yaml
for dep in yaml.load(open('${METADATA}', 'ro'))['dependencies']:
print dep["source"]
EOF
}
clean() {
log_info "Cleaning up ${BUILDDIR}"
[ -d ${BUILDDIR} ] && rm -rf ${BUILDDIR} || exit 0
}
salt_run() {
source ${VENV_DIR}/bin/activate
salt-call ${SALT_OPTS} $*
}
prepare() {
[ -d ${BUILDDIR} ] && mkdir -p ${BUILDDIR}
setup_virtualenv
setup_pillar
setup_salt
install_dependencies
}
run() {
for pillar in ${PILLARDIR}/*.sls; do
state_name=$(basename ${pillar%.sls})
salt_run --id=${state_name} state.show_sls ${FORMULA_NAME} || (log_err "Execution of ${FORMULA_NAME}.${state_name} failed"; exit 1)
done
}
_atexit() {
RETVAL=$?
trap true INT TERM EXIT
if [ $RETVAL -ne 0 ]; then
log_err "Execution failed"
else
log_info "Execution successful"
fi
return $RETVAL
}
## Main
trap _atexit INT TERM EXIT
case $1 in
clean)
clean
;;
prepare)
prepare
;;
run)
run
;;
*)
prepare
run
;;
esac