make our utility script switch to virtual env before running.

Change-Id: I76a214a81e72d08b3d00704b78e92b450943ddc4
This commit is contained in:
xiaodongwang 2014-09-04 14:23:49 -07:00
parent c70b608be3
commit 9a51828a87
11 changed files with 221 additions and 53 deletions

View File

@ -16,10 +16,19 @@
"""binary to deploy a cluster by compass client api."""
import logging
import os
import re
import requests
import site
import sys
import time
activate_this = '$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('$PythonHome/lib/python2.6/site-packages')
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
from compass.apiclient.restful import Client
from compass.utils import flags
from compass.utils import logsetting
@ -28,6 +37,12 @@ from compass.utils import logsetting
flags.add('compass_server',
help='compass server url',
default='http://127.0.0.1/api')
flags.add('compass_user_email',
help='compass user email',
default='admin@huawei.com')
flags.add('compass_user_password',
help='compass user password',
default='admin')
flags.add('switch_ips',
help='comma seperated switch ips',
default='')
@ -52,6 +67,9 @@ flags.add('adapter_os_name',
flags.add('adapter_target_system',
help='adapter target system name',
default='openstack')
flags.add('adapter_flavor',
help='adapter flavor name',
default='allinone')
flags.add('cluster_name',
help='cluster name',
default='cluster1')
@ -101,9 +119,28 @@ def _get_client():
return Client(flags.OPTIONS.compass_server)
def _login(client):
"""get apiclient token."""
status, token = client.login(
flags.OPTIONS.compass_user_email,
flags.OPTIONS.compass_user_password
)
logging.info(
'login status: %s, token: %s',
status, token
)
if status >= 400:
raise Exception(
'failed to login %s with user %s',
flags.OPTIONS.compass_server,
flags.OPTIONS.compass_user_email
)
return token
def _get_machines(client):
"""get machines connected to the switch."""
status, resp = client.get_machines()
status, resp = client.list_machines()
logging.info(
'get all machines status: %s, resp: %s', status, resp)
if status >= 400:
@ -116,7 +153,7 @@ def _get_machines(client):
])
logging.info('machines to add: %s', list(machines_to_add))
machines = {}
for machine in resp['machines']:
for machine in resp:
mac = machine['mac']
if mac in machines_to_add:
machines[machine['id']] = mac
@ -134,14 +171,14 @@ def _get_machines(client):
def _poll_switches(client):
"""get all switches."""
status, resp = client.get_switches()
status, resp = client.list_switches()
logging.info('get all switches status: %s resp: %s', status, resp)
if status >= 400:
msg = 'failed to get switches'
raise Exception(msg)
all_switches = {}
for switch in resp['switches']:
for switch in resp:
all_switches[switch['ip']] = switch
# add a switch.
@ -163,7 +200,7 @@ def _poll_switches(client):
msg = 'failed to add switch %s' % switch_ip
raise Exception(msg)
all_switches[switch_ip] = resp['switch']
all_switches[switch_ip] = resp
else:
logging.info('switch %s is already added', switch_ip)
@ -185,7 +222,7 @@ def _poll_switches(client):
msg = 'failed to get switch %s' % switch_ip
raise Exception(msg)
switch = resp['switch']
switch = resp
all_switches[switch_ip] = switch
if switch['state'] == 'notsupported':
@ -202,8 +239,7 @@ def _poll_switches(client):
if remain_retries > 0:
for switch_ip, switch in all_switches.items():
status, resp = client.update_switch(
switch_id, switch_ip, **switch_credential)
status, resp = client.poll_switch(switch['id'])
if status >= 400:
msg = 'failed to update switch %s' % switch_ip
raise Exception(msg)
@ -216,51 +252,85 @@ def _poll_switches(client):
def _get_adapter(client):
"""get adapter."""
status, resp = client.get_adapters()
logging.info('get all adapters status: %s, resp: %s', status, resp)
status, resp = client.list_adapters()
logging.info(
'get all adapters status: %s, resp: %s',
status, resp
)
if status >= 400:
msg = 'failed to get adapters'
raise Exception(msg)
os_name_pattern = flags.OPTIONS.adapter_os_name
os_name_re = re.compile(os_name_pattern)
target_system = flags.OPTIONS.adapter_target_system
target_system_pattern = flags.OPTIONS.adapter_target_system
target_system_re = re.compile(target_system_pattern)
flavor_name_pattern = flags.OPTIONS.adapter_flavor
flavor_re = re.compile(flavor_name_pattern)
adapter_id = None
for adapter in resp['adapters']:
if (
os_name_re.match(adapter['os']) and
target_system == adapter['target_system']
):
os_id = None
flavor_id = None
adapter = None
for item in resp:
if target_system_re.match(item['distributed_system_name']):
adapter = item
adapter_id = adapter['id']
break
if not adapter_id:
msg = 'no adapter found for %s and %s' % (
os_name_pattern, target_system)
msg = 'no adapter found for %s' % target_system_pattern
raise Exception(msg)
for supported_os in adapter['supported_oses']:
if os_name_re.match(supported_os['name']):
os_id = supported_os['os_id']
break
if not os_id:
msg = 'no os found for %s' % os_name_pattern
raise Exception(msg)
for flavor in adapter['flavors']:
if flavor_re.match(flavor['name']):
flavor_id = flavor['id']
break
if not flavor_id:
msg = 'no flavor found for %s' % flavor_name_pattern
raise Exception(msg)
logging.info('adpater for deploying a cluster: %s', adapter_id)
return adapter_id
return (adapter_id, os_id, flavor_id)
def _add_cluster(client, adapter_id, machines):
def _add_subnet(client):
pass
def _add_cluster(client, adapter_id, os_id, flavor_id, machines):
"""add a cluster."""
cluster_name = flags.OPTIONS.cluster_name
status, resp = client.add_cluster(
cluster_name=cluster_name, adapter_id=adapter_id)
cluster_name, adapter_id,
os_id, flavor_id)
logging.info('add cluster %s status: %s, resp: %s',
cluster_name, status, resp)
if status >= 400:
msg = 'failed to add cluster %s with adapter %s' % (
cluster_name, adapter_id)
msg = 'failed to add cluster %s with adapter %s os %s flavor %s' % (
cluster_name, adapter_id, os_id, flavor_id)
raise Exception(msg)
cluster = resp['cluster']
cluster = resp
cluster_id = cluster['id']
machines_dict = []
for machine_id in machines:
machines_dict.append({
'machine_id': machine_id
})
# add hosts to the cluster.
status, resp = client.add_hosts(
cluster_id=cluster_id,
machine_ids=machines.keys())
status, resp = client.add_hosts_to_cluster(
cluster_id,
machines_dict)
logging.info('add hosts to cluster %s status: %s, resp: %s',
cluster_id, status, resp)
if status >= 400:
@ -573,13 +643,14 @@ def main():
flags.init()
logsetting.init()
client = _get_client()
_login(client)
if flags.OPTIONS.poll_switches:
machines = _poll_switches(client)
else:
machines = _get_machines(client)
adapter_id = _get_adapter(client)
cluster_hosts = _add_cluster(client, adapter_id, machines)
adapter_id, os_id, flavor_id = _get_adapter(client)
cluster_hosts = _add_cluster(
client, adapter_id, os_id, flavor_id, machines)
_set_cluster_security(client, cluster_hosts)
_set_cluster_networking(client, cluster_hosts)
_set_cluster_partition(client, cluster_hosts)

View File

@ -1,6 +1,14 @@
#!/usr/bin/env python
import os
import site
import sys
activate_this='$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('$PythonHome/lib/python2.6/site-packages')
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
import compass.actions.cli as cli
sys.exit(cli.main())

43
bin/compassd Executable file
View File

@ -0,0 +1,43 @@
#!/bin/sh
RETVAL_CELERY=0
RETVAL_PROGRESS_UPDATE=0
start() {
service compass-celeryd start
RETVAL_CELERY=$?
service compass-progress-updated start
RETVAL_PROGRESS_UPDATE=$?
}
stop() {
service compass-celeryd stop
RETVAL_CELERY=$?
service compass-progress-updated stop
RETVAL_PROGRESS_UPDATE=$?
}
restart() {
stop
start
}
case "$1" in
start|stop|restart)
$1
;;
status)
service compass-celeryd status
RETVAL_CELERY=$?
service compass-progress-updated status
RETVAL_PROGRESS_UPDATE=$?
;;
*)
echo "Usage: $0 {start|stop|status|restart}"
exit 1
;;
esac
if [[ "$RETVAL_CELERY" != "0" ]]; then
exit $RETVAL_CELERY
fi
if [[ "$RETVAL_PROGRESS_UPDATE" != "0" ]]; then
exit $RETVAL_PROGRESS_UPDATE
fi

View File

@ -17,8 +17,15 @@
"""utility binary to manage database."""
import os
import os.path
import site
import sys
activate_this = '$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('$PythonHome/lib/python2.6/site-packages')
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
from flask.ext.script import Manager
from compass.actions import deploy

View File

@ -18,12 +18,23 @@
import functools
import lockfile
import logging
import os
import site
import sys
activate_this = '$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('$PythonHome/lib/python2.6/site-packages')
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
from multiprocessing import Pool
from compass.actions import poll_switch
from compass.actions import util
from compass.db import database
from compass.db.api import database
from compass.db.api import switch as switch_api
from compass.db.api import user as user_api
from compass.tasks.client import celery
from compass.utils import daemonize
from compass.utils import flags
@ -37,47 +48,58 @@ flags.add('switch_ips',
flags.add_bool('async',
help='ryn in async mode',
default=True)
flags.add('thread_pool_size',
flags.add('thread_pool_size', type='int',
help='thread pool size when run in noasync mode',
default='4')
flags.add('run_interval',
default=4)
flags.add('run_interval', type='int',
help='run interval in seconds',
default=setting.POLLSWITCH_INTERVAL)
def pollswitches(switch_ips):
"""poll switch."""
poll_switch_ips = []
with database.session():
poll_switch_ips = util.update_switch_ips(switch_ips)
user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
poll_switches = []
all_switches = dict([
(switch['ip'], switch['credentials'])
for switch in switch_api.list_switches(user)
])
if switch_ips:
poll_switches = dict([
(switch_ip, all_switches[switch_ip])
for switch_ip in switch_ips
if switch_ip in all_switches
])
else:
poll_switches = all_switches
if flags.OPTIONS.async:
for poll_switch_ip in poll_switch_ips:
for switch_ip, switch_credentials in poll_switches.items():
celery.send_task(
'compass.tasks.pollswitch',
(poll_switch_ip,)
(user.email, switch_ip, switch_credentials)
)
else:
try:
pool = Pool(processes=int(flags.OPTIONS.thread_pool_size))
for poll_switch_ip in poll_switch_ips:
pool = Pool(processes=flags.OPTIONS.thread_pool_size)
for switch_ip, switch_credentials in poll_switches.items():
pool.apply_async(
poll_switch.poll_switch,
(poll_switch_ip,)
(user.email, switch_ip, switch_credentials)
)
pool.close()
pool.join()
except Exception as error:
logging.error('failed to poll switches %s',
poll_switch_ips)
poll_switches)
logging.exception(error)
if __name__ == '__main__':
flags.init()
logsetting.init()
database.init()
logging.info('run poll_switch')
daemonize.daemonize(
functools.partial(

View File

@ -18,6 +18,15 @@
import functools
import lockfile
import logging
import os
import site
import sys
activate_this = '$PythonHome/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
site.addsitedir('$PythonHome/lib/python2.6/site-packages')
sys.path.append('$PythonHome')
os.environ['PYTHON_EGG_CACHE'] = '/tmp/.egg'
from compass.actions import update_progress
from compass.db.api import database
@ -32,7 +41,7 @@ from compass.utils import util
flags.add_bool('async',
help='run in async mode',
default=True)
flags.add('run_interval',
flags.add('run_interval', type='int',
help='run interval in seconds',
default=setting.PROGRESS_UPDATE_INTERVAL)

View File

@ -1 +0,0 @@
CELERY_CONFIG_MODULE=compass.utils.celeryconfig_wrapper celeryd

View File

@ -88,7 +88,8 @@ RESP_MACHINES_HOSTS_FIELDS = [
'id', 'switch_id', 'switch_ip', 'machine_id', 'switch_machine_id',
'port', 'vlans', 'mac',
'ipmi_credentials', 'tag', 'location',
'name', 'os_name', 'clusters'
'name', 'os_name', 'clusters',
'created_at', 'updated_at'
]
RESP_CLUSTER_FIELDS = [
'name', 'id'

View File

@ -13,6 +13,7 @@ sudo cp -rf $COMPASSDIR/conf/* /etc/compass/
sudo cp -rf $COMPASSDIR/service/* /etc/init.d/
sudo cp -rf $COMPASSDIR/bin/*.py /opt/compass/bin/
sudo cp -rf $COMPASSDIR/bin/*.sh /opt/compass/bin/
sudo cp -rf $COMPASSDIR/bin/compassd /usr/bin/
sudo cp -rf $COMPASSDIR/bin/compass /usr/bin/
sudo cp -rf $COMPASSDIR/bin/chef/* /opt/compass/bin/
sudo cp -rf $WEB_HOME/public/* /var/www/compass_web/
@ -52,9 +53,13 @@ sudo sed -i "s/\$compass_ip/$ipaddr/g" /etc/compass/global_config
sudo sed -i "s/\$compass_hostname/$HOSTNAME/g" /etc/compass/global_config
sudo sed -i "s/\$compass_testmode/$TESTMODE/g" /etc/compass/global_config
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /var/www/compass/compass.wsgi
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /usr/bin/compass
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /opt/compass/bin/poll_switch.py
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /opt/compass/bin/progress_update.py
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /opt/compass/bin/manage_db.py
sudo sed -e 's|$PythonHome|'$VIRTUAL_ENV'|' -i /opt/compass/bin/client.py
sudo sed -e 's|$Python|'$VIRTUAL_ENV/bin/python'|' -i /etc/init.d/compass-progress-updated
sudo sed -e 's|$CeleryPath|'$VIRTUAL_ENV/bin/celery'|' -i /etc/init.d/compass-celeryd
sudo sed -e 's|$Python|'$VIRTUAL_ENV/bin/python'|' -i /usr/bin/compassd
sudo sed -i "s/\$ipaddr/$ipaddr/g" /etc/compass/os_metadata/general.conf
sudo sed -i "s/\$hostname/$HOSTNAME/g" /etc/compass/os_metadata/general.conf
sed -i "s/\$gateway/$OPTION_ROUTER/g" /etc/compass/os_metadata/general.conf

View File

@ -80,11 +80,13 @@ case "$1" in
if [ -f $SUSE ]; then
checkproc -v -p /var/run/celery-worker.pid $CELERY
rc_status -v
RETVAL=$?
elif [ -f $DEBIAN ]; then
status_of_proc -p /var/run/celery-worker.pid $CELERY
RETVAL=$?
else
status -p /var/run/celery-worker.pid $CELERY
echo
RETVAL=$?
fi
;;
*)

View File

@ -48,7 +48,6 @@ start() {
RETVAL=$?
fi
echo
return $RETVAL
}
stop() {
@ -80,11 +79,13 @@ case "$1" in
if [ -f $SUSE ]; then
checkproc -v -p /var/run/progress_update.pid progress_updated
rc_status -v
RETVAL=$?
elif [ -f $DEBIAN ]; then
status_of_proc -p /var/run/progress_update.pid progress_updated
RETVAL=$?
else
status -p /var/run/progress_update.pid progress_updated
echo
RETVAL=$?
fi
;;
*)