Merge "Fix pep8 errors"
This commit is contained in:
@@ -1,50 +1,49 @@
|
|||||||
import os
|
import os
|
||||||
import sys, getopt
|
import sys
|
||||||
import socket
|
|
||||||
import time
|
import time
|
||||||
import ConfigParser
|
|
||||||
from novaclient.v1_1 import client as nc
|
from novaclient.v1_1 import client as nc
|
||||||
import requests
|
|
||||||
from random import randint
|
|
||||||
from keystoneclient.v2_0 import client as kc
|
from keystoneclient.v2_0 import client as kc
|
||||||
from heatclient import client as hc
|
from heatclient import client as hc
|
||||||
from heatclient import exc as hc_exc
|
|
||||||
from cinderclient import client as cc
|
from cinderclient import client as cc
|
||||||
import re
|
import re
|
||||||
|
|
||||||
CONF = dict()
|
CONF = dict()
|
||||||
CONF_FILE = '/etc/jenkins_jobs/credentials.conf'
|
keys = ["os_username", "os_password", "os_auth_url", "os_tenant_name", "os_image_endpoint"]
|
||||||
|
|
||||||
|
|
||||||
def load_conf():
|
def load_conf():
|
||||||
# load credentials and configs
|
for item in keys:
|
||||||
config = ConfigParser.ConfigParser()
|
CONF[item] = os.environ[item]
|
||||||
config.readfp(open(CONF_FILE))
|
|
||||||
for key, val in config.items("default"):
|
|
||||||
CONF[key] = val
|
|
||||||
|
|
||||||
for env_item in os.environ:
|
|
||||||
CONF[env_item] = os.environ[env_item]
|
|
||||||
|
|
||||||
def get_nova_client():
|
def get_nova_client():
|
||||||
return nc.Client(username = CONF["os_username"],
|
return nc.Client(username=CONF["os_username"],
|
||||||
api_key = CONF["os_password"],
|
api_key=CONF["os_password"],
|
||||||
auth_url = CONF["os_auth_url"],
|
auth_url=CONF["os_auth_url"],
|
||||||
project_id = CONF["os_tenant_name"]
|
project_id=CONF["os_tenant_name"])
|
||||||
)
|
|
||||||
|
|
||||||
def get_auth_token():
|
def get_auth_token():
|
||||||
keystone = kc.Client(username = CONF["os_username"],
|
keystone = kc.Client(username=CONF["os_username"],
|
||||||
password = CONF["os_password"],
|
password=CONF["os_password"],
|
||||||
tenant_name = CONF["os_tenant_name"],
|
tenant_name=CONF["os_tenant_name"],
|
||||||
auth_url = CONF["os_auth_url"]
|
auth_url=CONF["os_auth_url"])
|
||||||
)
|
|
||||||
return keystone.auth_token
|
return keystone.auth_token
|
||||||
|
|
||||||
|
|
||||||
def get_heat_client():
|
def get_heat_client():
|
||||||
return hc.Client('1', endpoint=CONF["os_image_endpoint"], token=get_auth_token())
|
return hc.Client('1',
|
||||||
|
endpoint=CONF["os_image_endpoint"],
|
||||||
|
token=get_auth_token())
|
||||||
|
|
||||||
|
|
||||||
def get_cinder_client():
|
def get_cinder_client():
|
||||||
return cc.Client('1', CONF["os_username"], CONF["os_password"], CONF["os_tenant_name"], CONF["os_auth_url"])
|
return cc.Client('1',
|
||||||
|
CONF["os_username"],
|
||||||
|
CONF["os_password"],
|
||||||
|
CONF["os_tenant_name"],
|
||||||
|
CONF["os_auth_url"])
|
||||||
|
|
||||||
|
|
||||||
def cleanup_heat():
|
def cleanup_heat():
|
||||||
current_name = sys.argv[2]
|
current_name = sys.argv[2]
|
||||||
@@ -54,23 +53,24 @@ def cleanup_heat():
|
|||||||
deleted_stacks = []
|
deleted_stacks = []
|
||||||
|
|
||||||
for stack in stacks:
|
for stack in stacks:
|
||||||
if name_regex.match(stack.stack_name) :
|
if name_regex.match(stack.stack_name):
|
||||||
deleted_stacks.append(stack.stack_name)
|
deleted_stacks.append(stack.stack_name)
|
||||||
print stack.stack_name
|
print stack.stack_name
|
||||||
client.stacks.delete(stack.stack_name)
|
client.stacks.delete(stack.stack_name)
|
||||||
if not deleted_stacks :
|
if not deleted_stacks:
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
# Let Heat delete stacks
|
# Let Heat delete stacks
|
||||||
time.sleep(60)
|
time.sleep(60)
|
||||||
|
|
||||||
stacks = client.stacks.list()
|
stacks = client.stacks.list()
|
||||||
for stack in stacks:
|
for stack in stacks:
|
||||||
if stack.stack_name in deleted_stacks :
|
if stack.stack_name in deleted_stacks:
|
||||||
#Resource cleanup is required
|
# Resource cleanup is required
|
||||||
print "At least one stack wasn't deleted!"
|
print "At least one stack wasn't deleted!"
|
||||||
print "Performing resources cleanup..."
|
print "Performing resources cleanup..."
|
||||||
cleanup()
|
cleanup()
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
def cleanup():
|
||||||
client = get_nova_client()
|
client = get_nova_client()
|
||||||
@@ -82,7 +82,7 @@ def cleanup():
|
|||||||
name_regex = re.compile(current_name)
|
name_regex = re.compile(current_name)
|
||||||
|
|
||||||
for server in servers:
|
for server in servers:
|
||||||
if name_regex.match(server.name) :
|
if name_regex.match(server.name):
|
||||||
print server.name
|
print server.name
|
||||||
fl_ips = client.floating_ips.findall(instance_id=server.id)
|
fl_ips = client.floating_ips.findall(instance_id=server.id)
|
||||||
for fl_ip in fl_ips:
|
for fl_ip in fl_ips:
|
||||||
@@ -91,14 +91,15 @@ def cleanup():
|
|||||||
|
|
||||||
time.sleep(20)
|
time.sleep(20)
|
||||||
for volume in volumes:
|
for volume in volumes:
|
||||||
if name_regex.match(volume.display_name) :
|
if name_regex.match(volume.display_name):
|
||||||
print volume.display_name
|
print volume.display_name
|
||||||
volume.delete()
|
volume.delete()
|
||||||
|
|
||||||
for group in secgroups:
|
for group in secgroups:
|
||||||
if name_regex.match(group.name) :
|
if name_regex.match(group.name):
|
||||||
print group.name
|
print group.name
|
||||||
group.delete()
|
group.delete()
|
||||||
|
|
||||||
|
|
||||||
def main(argv):
|
def main(argv):
|
||||||
load_conf()
|
load_conf()
|
||||||
|
|||||||
@@ -1,4 +0,0 @@
|
|||||||
[default]
|
|
||||||
OS_USERNAME=ci-user
|
|
||||||
OS_TENANT_NAME=ci
|
|
||||||
OS_PASSWORD=nova
|
|
||||||
@@ -6,6 +6,9 @@ sleep 20
|
|||||||
source $JENKINS_HOME/credentials
|
source $JENKINS_HOME/credentials
|
||||||
set -x
|
set -x
|
||||||
job_type=$(echo $PREV_JOB | awk -F '-' '{ print $1 }')
|
job_type=$(echo $PREV_JOB | awk -F '-' '{ print $1 }')
|
||||||
|
export os_username=$os_username
|
||||||
|
export os_password=$os_password
|
||||||
|
export os_tenant_name=$os_tenant_name
|
||||||
if [[ "$HOST_NAME" =~ neutron ]]; then
|
if [[ "$HOST_NAME" =~ neutron ]]; then
|
||||||
export os_auth_url="http://$OPENSTACK_HOST_NEUTRON_LAB:5000/v2.0"
|
export os_auth_url="http://$OPENSTACK_HOST_NEUTRON_LAB:5000/v2.0"
|
||||||
export os_image_endpoint="http://$OPENSTACK_HOST_NEUTRON_LAB:8004/v1/$NEUTRON_LAB_TENANT_ID"
|
export os_image_endpoint="http://$OPENSTACK_HOST_NEUTRON_LAB:8004/v1/$NEUTRON_LAB_TENANT_ID"
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
source $JENKINS_HOME/credentials
|
source $JENKINS_HOME/credentials
|
||||||
|
|
||||||
sudo su - jenkins -c "cat $WORKSPACE/slave-scripts/credentials.conf > /etc/jenkins_jobs/credentials.conf"
|
|
||||||
sudo su - zuul -c "cat $WORKSPACE/config/zuul/zuul.conf > /etc/zuul/zuul.conf"
|
sudo su - zuul -c "cat $WORKSPACE/config/zuul/zuul.conf > /etc/zuul/zuul.conf"
|
||||||
sudo su - zuul -c "cat $WORKSPACE/config/zuul/gearman-logging.conf > /etc/zuul/gearman-logging.conf"
|
sudo su - zuul -c "cat $WORKSPACE/config/zuul/gearman-logging.conf > /etc/zuul/gearman-logging.conf"
|
||||||
sudo su - zuul -c "cat $WORKSPACE/config/zuul/layout.yaml > /etc/zuul/layout.yaml"
|
sudo su - zuul -c "cat $WORKSPACE/config/zuul/layout.yaml > /etc/zuul/layout.yaml"
|
||||||
|
|||||||
Reference in New Issue
Block a user