enable remote-deploy
Change-Id: I44177e7f212a7f6f3c76ebed49574e4d537bcd8f
This commit is contained in:
parent
25da6a69a4
commit
0b8659d790
@ -1,13 +1,13 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -e
|
set -e
|
||||||
systemctl restart mysql.service
|
# systemctl restart mysql.service
|
||||||
systemctl status mysql.service || exit $?
|
# systemctl status mysql.service || exit $?
|
||||||
/opt/compass/bin/manage_db.py createdb
|
# /opt/compass/bin/manage_db.py createdb
|
||||||
/opt/compass/bin/clean_installers.py --noasync
|
/opt/compass/bin/clean_installers.py --noasync
|
||||||
/opt/compass/bin/clean_installation_logs.py
|
/opt/compass/bin/clean_installation_logs.py
|
||||||
rm -rf /var/ansible/run/*
|
rm -rf /var/ansible/run/*
|
||||||
systemctl restart httpd.service
|
# systemctl restart httpd.service
|
||||||
systemctl status httpd.service || exit $?
|
# systemctl status httpd.service || exit $?
|
||||||
systemctl restart rsyslog.service
|
systemctl restart rsyslog.service
|
||||||
systemctl status rsyslog.service || exit $?
|
systemctl status rsyslog.service || exit $?
|
||||||
systemctl restart redis.service
|
systemctl restart redis.service
|
||||||
@ -17,6 +17,6 @@ systemctl restart cobblerd.service
|
|||||||
systemctl status cobblerd.service || exit $?
|
systemctl status cobblerd.service || exit $?
|
||||||
systemctl restart compass-celeryd.service
|
systemctl restart compass-celeryd.service
|
||||||
systemctl status compass-celeryd.service || exit $?
|
systemctl status compass-celeryd.service || exit $?
|
||||||
systemctl restart compass-progress-updated.service
|
# systemctl restart compass-progress-updated.service
|
||||||
systemctl status compass-progress-updated.service || exit $?
|
# systemctl status compass-progress-updated.service || exit $?
|
||||||
|
|
||||||
|
@ -150,7 +150,8 @@ def poll_switch(poller_email, ip_addr, credentials,
|
|||||||
|
|
||||||
for switch in switches:
|
for switch in switches:
|
||||||
for machine_dict in machine_dicts:
|
for machine_dict in machine_dicts:
|
||||||
logging.debug('add machine: %s', machine_dict)
|
logging.info('add machine: %s', machine_dict)
|
||||||
|
machine_dict['owner_id'] = poller.id
|
||||||
switch_api.add_switch_machine(
|
switch_api.add_switch_machine(
|
||||||
switch['id'], False, user=poller, **machine_dict
|
switch['id'], False, user=poller, **machine_dict
|
||||||
)
|
)
|
||||||
|
@ -393,6 +393,10 @@ def _login(use_cookie):
|
|||||||
)
|
)
|
||||||
data['expire_timestamp'] = expire_timestamp
|
data['expire_timestamp'] = expire_timestamp
|
||||||
user = auth_handler.authenticate_user(**data)
|
user = auth_handler.authenticate_user(**data)
|
||||||
|
if not user.active:
|
||||||
|
raise exception_handler.UserDisabled(
|
||||||
|
'%s is not activated' % user.email
|
||||||
|
)
|
||||||
if not login_user(user, remember=data.get('remember', False)):
|
if not login_user(user, remember=data.get('remember', False)):
|
||||||
raise exception_handler.UserDisabled('failed to login: %s' % user)
|
raise exception_handler.UserDisabled('failed to login: %s' % user)
|
||||||
|
|
||||||
@ -415,6 +419,17 @@ def login():
|
|||||||
return _login(True)
|
return _login(True)
|
||||||
|
|
||||||
|
|
||||||
|
@app.route("/users/register", methods=['POST'])
|
||||||
|
def register():
|
||||||
|
"""register new user."""
|
||||||
|
data = _get_request_data()
|
||||||
|
data['is_admin'] = False
|
||||||
|
data['active'] = False
|
||||||
|
return utils.make_json_response(
|
||||||
|
200, user_api.add_user(**data)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@app.route('/users/logout', methods=['POST'])
|
@app.route('/users/logout', methods=['POST'])
|
||||||
@login_required
|
@login_required
|
||||||
def logout():
|
def logout():
|
||||||
|
@ -480,7 +480,10 @@ def del_cluster(
|
|||||||
for clusterhost in cluster.clusterhosts
|
for clusterhost in cluster.clusterhosts
|
||||||
],
|
],
|
||||||
delete_underlying_host
|
delete_underlying_host
|
||||||
)
|
),
|
||||||
|
queue=user.email,
|
||||||
|
exchange=user.email,
|
||||||
|
routing_key=user.email
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'delete action is sent',
|
'status': 'delete action is sent',
|
||||||
@ -1183,7 +1186,10 @@ def _del_cluster_host(
|
|||||||
(
|
(
|
||||||
user.email, clusterhost.cluster_id, clusterhost.host_id,
|
user.email, clusterhost.cluster_id, clusterhost.host_id,
|
||||||
delete_underlying_host
|
delete_underlying_host
|
||||||
)
|
),
|
||||||
|
queue=user.email,
|
||||||
|
exchange=user.email,
|
||||||
|
routing_key=user.email
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'delete action sent',
|
'status': 'delete action sent',
|
||||||
@ -1854,7 +1860,10 @@ def deploy_cluster(
|
|||||||
(
|
(
|
||||||
user.email, cluster_id,
|
user.email, cluster_id,
|
||||||
[clusterhost.host_id for clusterhost in clusterhosts]
|
[clusterhost.host_id for clusterhost in clusterhosts]
|
||||||
)
|
),
|
||||||
|
queue=user.email,
|
||||||
|
exchange=user.email,
|
||||||
|
routing_key=user.email
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'deploy action sent',
|
'status': 'deploy action sent',
|
||||||
@ -1918,7 +1927,10 @@ def redeploy_cluster(
|
|||||||
'compass.tasks.redeploy_cluster',
|
'compass.tasks.redeploy_cluster',
|
||||||
(
|
(
|
||||||
user.email, cluster_id
|
user.email, cluster_id
|
||||||
)
|
),
|
||||||
|
queue=user.email,
|
||||||
|
exchange=user.email,
|
||||||
|
routing_key=user.email
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'redeploy action sent',
|
'status': 'redeploy action sent',
|
||||||
@ -1945,7 +1957,10 @@ def patch_cluster(cluster_id, user=None, session=None, **kwargs):
|
|||||||
'compass.tasks.patch_cluster',
|
'compass.tasks.patch_cluster',
|
||||||
(
|
(
|
||||||
user.email, cluster_id,
|
user.email, cluster_id,
|
||||||
)
|
),
|
||||||
|
queue=user.email,
|
||||||
|
exchange=user.email,
|
||||||
|
routing_key=user.email
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'patch action sent',
|
'status': 'patch action sent',
|
||||||
@ -2046,7 +2061,7 @@ def update_cluster_host_state(
|
|||||||
|
|
||||||
def _update_clusterhost_state(
|
def _update_clusterhost_state(
|
||||||
clusterhost, from_database_only=False,
|
clusterhost, from_database_only=False,
|
||||||
session=None, **kwargs
|
session=None, user=None, **kwargs
|
||||||
):
|
):
|
||||||
"""Update clusterhost state.
|
"""Update clusterhost state.
|
||||||
|
|
||||||
@ -2087,13 +2102,22 @@ def _update_clusterhost_state(
|
|||||||
utils.update_db_object(session, cluster.state, ready=False)
|
utils.update_db_object(session, cluster.state, ready=False)
|
||||||
status = '%s state is updated' % clusterhost.name
|
status = '%s state is updated' % clusterhost.name
|
||||||
else:
|
else:
|
||||||
|
if not user:
|
||||||
|
user_id = cluster.creator_id
|
||||||
|
user_dict = user_api.get_user(user_id, session=session)
|
||||||
|
user_email = user_dict['email']
|
||||||
|
else:
|
||||||
|
user_email = user.email
|
||||||
from compass.tasks import client as celery_client
|
from compass.tasks import client as celery_client
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.package_installed',
|
'compass.tasks.package_installed',
|
||||||
(
|
(
|
||||||
clusterhost.cluster_id, clusterhost.host_id,
|
clusterhost.cluster_id, clusterhost.host_id,
|
||||||
cluster_ready, host_ready
|
cluster_ready, host_ready
|
||||||
)
|
),
|
||||||
|
queue=user_email,
|
||||||
|
exchange=user_email,
|
||||||
|
routing_key=user_email
|
||||||
)
|
)
|
||||||
status = '%s: cluster ready %s host ready %s' % (
|
status = '%s: cluster ready %s host ready %s' % (
|
||||||
clusterhost.name, cluster_ready, host_ready
|
clusterhost.name, cluster_ready, host_ready
|
||||||
@ -2126,7 +2150,7 @@ def update_cluster_host_state_internal(
|
|||||||
)
|
)
|
||||||
return _update_clusterhost_state(
|
return _update_clusterhost_state(
|
||||||
clusterhost, from_database_only=from_database_only,
|
clusterhost, from_database_only=from_database_only,
|
||||||
session=session, **kwargs
|
session=session, users=user, **kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -2169,7 +2193,7 @@ def update_clusterhost_state_internal(
|
|||||||
clusterhost = _get_clusterhost(clusterhost_id, session=session)
|
clusterhost = _get_clusterhost(clusterhost_id, session=session)
|
||||||
return _update_clusterhost_state(
|
return _update_clusterhost_state(
|
||||||
clusterhost, from_database_only=from_database_only,
|
clusterhost, from_database_only=from_database_only,
|
||||||
session=session, **kwargs
|
session=session, user=user, **kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -2240,10 +2264,19 @@ def update_cluster_state_internal(
|
|||||||
)
|
)
|
||||||
status = '%s state is updated' % cluster.name
|
status = '%s state is updated' % cluster.name
|
||||||
else:
|
else:
|
||||||
|
if not user:
|
||||||
|
user_id = cluster.creator_id
|
||||||
|
user_dict = user_api.get_user(user_id, session=session)
|
||||||
|
user_email = user_dict['email']
|
||||||
|
else:
|
||||||
|
user_email = user.email
|
||||||
from compass.tasks import client as celery_client
|
from compass.tasks import client as celery_client
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.cluster_installed',
|
'compass.tasks.cluster_installed',
|
||||||
(clusterhost.cluster_id, clusterhost_ready)
|
(clusterhost.cluster_id, clusterhost_ready),
|
||||||
|
queue=user_email,
|
||||||
|
exchange=user_email,
|
||||||
|
routing_key=user_email
|
||||||
)
|
)
|
||||||
status = '%s installed action set clusterhost ready %s' % (
|
status = '%s installed action set clusterhost ready %s' % (
|
||||||
cluster.name, clusterhost_ready
|
cluster.name, clusterhost_ready
|
||||||
|
@ -179,7 +179,10 @@ def start_check_cluster_health(cluster_id, send_report_url,
|
|||||||
from compass.tasks import client as celery_client
|
from compass.tasks import client as celery_client
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.cluster_health',
|
'compass.tasks.cluster_health',
|
||||||
(cluster.id, send_report_url, user.email)
|
(cluster.id, send_report_url, user.email),
|
||||||
|
queue=user.email,
|
||||||
|
exchange=user.email,
|
||||||
|
routing_key=user.email
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
"cluster_id": cluster.id,
|
"cluster_id": cluster.id,
|
||||||
|
@ -414,12 +414,21 @@ def del_host(
|
|||||||
logging.info(
|
logging.info(
|
||||||
'send del host %s task to celery', host_id
|
'send del host %s task to celery', host_id
|
||||||
)
|
)
|
||||||
|
if not user:
|
||||||
|
user_id = host.creator_id
|
||||||
|
user_dict = user_api.get_user(user_id, session=session)
|
||||||
|
user_email = user_dict['email']
|
||||||
|
else:
|
||||||
|
user_email = user.email
|
||||||
from compass.tasks import client as celery_client
|
from compass.tasks import client as celery_client
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.delete_host',
|
'compass.tasks.delete_host',
|
||||||
(
|
(
|
||||||
user.email, host.id, cluster_ids
|
user.email, host.id, cluster_ids
|
||||||
)
|
),
|
||||||
|
queue=user_email,
|
||||||
|
exchange=user_email,
|
||||||
|
routing_key=user_email
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'delete action sent',
|
'status': 'delete action sent',
|
||||||
@ -912,13 +921,22 @@ def update_host_state_internal(
|
|||||||
)
|
)
|
||||||
status = '%s state is updated' % host.name
|
status = '%s state is updated' % host.name
|
||||||
else:
|
else:
|
||||||
|
if not user:
|
||||||
|
user_id = host.creator_id
|
||||||
|
user_dict = user_api.get_user(user_id, session=session)
|
||||||
|
user_email = user_dict['email']
|
||||||
|
else:
|
||||||
|
user_email = user.email
|
||||||
from compass.tasks import client as celery_client
|
from compass.tasks import client as celery_client
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.os_installed',
|
'compass.tasks.os_installed',
|
||||||
(
|
(
|
||||||
host.id, clusterhosts_ready,
|
host.id, clusterhosts_ready,
|
||||||
clusters_os_ready
|
clusters_os_ready
|
||||||
)
|
),
|
||||||
|
queue=user_email,
|
||||||
|
exchange=user_email,
|
||||||
|
routing_key=user_email
|
||||||
)
|
)
|
||||||
status = '%s: clusterhosts ready %s clusters os ready %s' % (
|
status = '%s: clusterhosts ready %s clusters os ready %s' % (
|
||||||
host.name, clusterhosts_ready, clusters_os_ready
|
host.name, clusterhosts_ready, clusters_os_ready
|
||||||
@ -1011,9 +1029,18 @@ def poweron_host(
|
|||||||
from compass.tasks import client as celery_client
|
from compass.tasks import client as celery_client
|
||||||
host = _get_host(host_id, session=session)
|
host = _get_host(host_id, session=session)
|
||||||
check_host_validated(host)
|
check_host_validated(host)
|
||||||
|
if not user:
|
||||||
|
user_id = host.creator_id
|
||||||
|
user_dict = user_api.get_user(user_id, session=session)
|
||||||
|
user_email = user_dict['email']
|
||||||
|
else:
|
||||||
|
user_email = user.email
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.poweron_host',
|
'compass.tasks.poweron_host',
|
||||||
(host.id,)
|
(host.id,),
|
||||||
|
queue=user_email,
|
||||||
|
exchange=user_email,
|
||||||
|
routing_key=user_email
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'poweron %s action sent' % host.name,
|
'status': 'poweron %s action sent' % host.name,
|
||||||
@ -1037,9 +1064,18 @@ def poweroff_host(
|
|||||||
from compass.tasks import client as celery_client
|
from compass.tasks import client as celery_client
|
||||||
host = _get_host(host_id, session=session)
|
host = _get_host(host_id, session=session)
|
||||||
check_host_validated(host)
|
check_host_validated(host)
|
||||||
|
if not user:
|
||||||
|
user_id = host.creator_id
|
||||||
|
user_dict = user_api.get_user(user_id, session=session)
|
||||||
|
user_email = user_dict['email']
|
||||||
|
else:
|
||||||
|
user_email = user.email
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.poweroff_host',
|
'compass.tasks.poweroff_host',
|
||||||
(host.id,)
|
(host.id,),
|
||||||
|
queue=user_email,
|
||||||
|
exchange=user_email,
|
||||||
|
routing_key=user_email
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'poweroff %s action sent' % host.name,
|
'status': 'poweroff %s action sent' % host.name,
|
||||||
@ -1063,9 +1099,18 @@ def reset_host(
|
|||||||
from compass.tasks import client as celery_client
|
from compass.tasks import client as celery_client
|
||||||
host = _get_host(host_id, session=session)
|
host = _get_host(host_id, session=session)
|
||||||
check_host_validated(host)
|
check_host_validated(host)
|
||||||
|
if not user:
|
||||||
|
user_id = host.creator_id
|
||||||
|
user_dict = user_api.get_user(user_id, session=session)
|
||||||
|
user_email = user_dict['email']
|
||||||
|
else:
|
||||||
|
user_email = user.email
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.reset_host',
|
'compass.tasks.reset_host',
|
||||||
(host.id,)
|
(host.id,),
|
||||||
|
queue=user_email,
|
||||||
|
exchange=user_email,
|
||||||
|
routing_key=user_email
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'reset %s action sent' % host.name,
|
'status': 'reset %s action sent' % host.name,
|
||||||
|
@ -226,9 +226,18 @@ def poweron_machine(
|
|||||||
machine = _get_machine(
|
machine = _get_machine(
|
||||||
machine_id, session=session
|
machine_id, session=session
|
||||||
)
|
)
|
||||||
|
if not user:
|
||||||
|
user_id = machine.owner_id
|
||||||
|
user_dict = user_api.get_user(user_id, session=session)
|
||||||
|
user_email = user_dict['email']
|
||||||
|
else:
|
||||||
|
user_email = user.email
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.poweron_machine',
|
'compass.tasks.poweron_machine',
|
||||||
(machine_id,)
|
(machine_id,),
|
||||||
|
queue=user_email,
|
||||||
|
exchange=user_email,
|
||||||
|
routing_key=user_email
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'poweron %s action sent' % machine.mac,
|
'status': 'poweron %s action sent' % machine.mac,
|
||||||
@ -253,9 +262,18 @@ def poweroff_machine(
|
|||||||
machine = _get_machine(
|
machine = _get_machine(
|
||||||
machine_id, session=session
|
machine_id, session=session
|
||||||
)
|
)
|
||||||
|
if not user:
|
||||||
|
user_id = machine.owner_id
|
||||||
|
user_dict = user_api.get_user(user_id, session=session)
|
||||||
|
user_email = user_dict['email']
|
||||||
|
else:
|
||||||
|
user_email = user.email
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.poweroff_machine',
|
'compass.tasks.poweroff_machine',
|
||||||
(machine_id,)
|
(machine_id,),
|
||||||
|
queue=user_email,
|
||||||
|
exchange=user_email,
|
||||||
|
routing_key=user_email
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'poweroff %s action sent' % machine.mac,
|
'status': 'poweroff %s action sent' % machine.mac,
|
||||||
@ -280,9 +298,18 @@ def reset_machine(
|
|||||||
machine = _get_machine(
|
machine = _get_machine(
|
||||||
machine_id, session=session
|
machine_id, session=session
|
||||||
)
|
)
|
||||||
|
if not user:
|
||||||
|
user_id = machine.owner_id
|
||||||
|
user_dict = user_api.get_user(user_id, session=session)
|
||||||
|
user_email = user_dict['email']
|
||||||
|
else:
|
||||||
|
user_email = user.email
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.reset_machine',
|
'compass.tasks.reset_machine',
|
||||||
(machine_id,)
|
(machine_id,),
|
||||||
|
queue=user_email,
|
||||||
|
exchange=user_email,
|
||||||
|
routing_key=user_email
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'reset %s action sent' % machine.mac,
|
'status': 'reset %s action sent' % machine.mac,
|
||||||
|
@ -30,10 +30,11 @@ from compass.utils import util
|
|||||||
SUPPORTED_FIELDS = ['ip_int', 'vendor', 'state']
|
SUPPORTED_FIELDS = ['ip_int', 'vendor', 'state']
|
||||||
SUPPORTED_FILTER_FIELDS = ['ip_int', 'vendor', 'state']
|
SUPPORTED_FILTER_FIELDS = ['ip_int', 'vendor', 'state']
|
||||||
SUPPORTED_SWITCH_MACHINES_FIELDS = [
|
SUPPORTED_SWITCH_MACHINES_FIELDS = [
|
||||||
'switch_ip_int', 'port', 'vlans', 'mac', 'tag', 'location'
|
'switch_ip_int', 'port', 'vlans', 'mac', 'tag', 'location',
|
||||||
|
'owner_id'
|
||||||
]
|
]
|
||||||
SUPPORTED_MACHINES_FIELDS = [
|
SUPPORTED_MACHINES_FIELDS = [
|
||||||
'port', 'vlans', 'mac', 'tag', 'location'
|
'port', 'vlans', 'mac', 'tag', 'location', 'owner_id'
|
||||||
]
|
]
|
||||||
SUPPORTED_SWITCH_MACHINES_HOSTS_FIELDS = [
|
SUPPORTED_SWITCH_MACHINES_HOSTS_FIELDS = [
|
||||||
'switch_ip_int', 'port', 'vlans', 'mac',
|
'switch_ip_int', 'port', 'vlans', 'mac',
|
||||||
@ -57,7 +58,7 @@ UPDATED_FILTERS_FIELDS = ['put_machine_filters']
|
|||||||
PATCHED_FILTERS_FIELDS = ['patched_machine_filters']
|
PATCHED_FILTERS_FIELDS = ['patched_machine_filters']
|
||||||
ADDED_MACHINES_FIELDS = ['mac']
|
ADDED_MACHINES_FIELDS = ['mac']
|
||||||
OPTIONAL_ADDED_MACHINES_FIELDS = [
|
OPTIONAL_ADDED_MACHINES_FIELDS = [
|
||||||
'ipmi_credentials', 'tag', 'location'
|
'ipmi_credentials', 'tag', 'location', 'owner_id'
|
||||||
]
|
]
|
||||||
ADDED_SWITCH_MACHINES_FIELDS = ['port']
|
ADDED_SWITCH_MACHINES_FIELDS = ['port']
|
||||||
OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS = ['vlans']
|
OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS = ['vlans']
|
||||||
@ -65,7 +66,7 @@ UPDATED_MACHINES_FIELDS = [
|
|||||||
'ipmi_credentials',
|
'ipmi_credentials',
|
||||||
'tag', 'location'
|
'tag', 'location'
|
||||||
]
|
]
|
||||||
UPDATED_SWITCH_MACHINES_FIELDS = ['port', 'vlans']
|
UPDATED_SWITCH_MACHINES_FIELDS = ['port', 'vlans', 'owner_id']
|
||||||
PATCHED_MACHINES_FIELDS = [
|
PATCHED_MACHINES_FIELDS = [
|
||||||
'patched_ipmi_credentials',
|
'patched_ipmi_credentials',
|
||||||
'patched_tag', 'patched_location'
|
'patched_tag', 'patched_location'
|
||||||
@ -83,7 +84,7 @@ RESP_ACTION_FIELDS = [
|
|||||||
]
|
]
|
||||||
RESP_MACHINES_FIELDS = [
|
RESP_MACHINES_FIELDS = [
|
||||||
'id', 'switch_id', 'switch_ip', 'machine_id', 'switch_machine_id',
|
'id', 'switch_id', 'switch_ip', 'machine_id', 'switch_machine_id',
|
||||||
'port', 'vlans', 'mac',
|
'port', 'vlans', 'mac', 'owner_id',
|
||||||
'ipmi_credentials', 'tag', 'location',
|
'ipmi_credentials', 'tag', 'location',
|
||||||
'created_at', 'updated_at'
|
'created_at', 'updated_at'
|
||||||
]
|
]
|
||||||
@ -590,6 +591,8 @@ def list_switch_machines(
|
|||||||
switch_machines = utils.list_db_objects(
|
switch_machines = utils.list_db_objects(
|
||||||
session, models.SwitchMachine, switch_id=switch.id, **filters
|
session, models.SwitchMachine, switch_id=switch.id, **filters
|
||||||
)
|
)
|
||||||
|
if not user.is_admin and len(switch_machines):
|
||||||
|
switch_machines = [m for m in switch_machines if m.machine.owner_id == user.id]
|
||||||
return _filter_switch_machines(switch_machines)
|
return _filter_switch_machines(switch_machines)
|
||||||
|
|
||||||
|
|
||||||
@ -676,13 +679,14 @@ def _add_machine_if_not_exist(mac=None, session=None, **kwargs):
|
|||||||
@utils.input_validates(vlans=_check_vlans)
|
@utils.input_validates(vlans=_check_vlans)
|
||||||
def _add_switch_machine_only(
|
def _add_switch_machine_only(
|
||||||
switch, machine, exception_when_existing=True,
|
switch, machine, exception_when_existing=True,
|
||||||
session=None, port=None, **kwargs
|
session=None, owner_id=None, port=None, **kwargs
|
||||||
):
|
):
|
||||||
"""add a switch machine."""
|
"""add a switch machine."""
|
||||||
return utils.add_db_object(
|
return utils.add_db_object(
|
||||||
session, models.SwitchMachine,
|
session, models.SwitchMachine,
|
||||||
exception_when_existing,
|
exception_when_existing,
|
||||||
switch.id, machine.id, port=port,
|
switch.id, machine.id, port=port,
|
||||||
|
owner_id=owner_id,
|
||||||
**kwargs
|
**kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -698,7 +702,7 @@ def _add_switch_machine_only(
|
|||||||
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
|
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
|
||||||
def _add_switch_machine(
|
def _add_switch_machine(
|
||||||
switch_id, exception_when_existing=True,
|
switch_id, exception_when_existing=True,
|
||||||
mac=None, port=None, session=None, **kwargs
|
mac=None, port=None, session=None, owner_id=None, **kwargs
|
||||||
):
|
):
|
||||||
"""Add switch machine.
|
"""Add switch machine.
|
||||||
|
|
||||||
@ -707,7 +711,7 @@ def _add_switch_machine(
|
|||||||
"""
|
"""
|
||||||
switch = _get_switch(switch_id, session=session)
|
switch = _get_switch(switch_id, session=session)
|
||||||
machine = _add_machine_if_not_exist(
|
machine = _add_machine_if_not_exist(
|
||||||
mac=mac, session=session, **kwargs
|
mac=mac, session=session, owner_id=owner_id, **kwargs
|
||||||
)
|
)
|
||||||
return _add_switch_machine_only(
|
return _add_switch_machine_only(
|
||||||
switch, machine,
|
switch, machine,
|
||||||
@ -722,13 +726,14 @@ def _add_switch_machine(
|
|||||||
)
|
)
|
||||||
def add_switch_machine(
|
def add_switch_machine(
|
||||||
switch_id, exception_when_existing=True,
|
switch_id, exception_when_existing=True,
|
||||||
mac=None, user=None, session=None, **kwargs
|
mac=None, user=None, session=None,
|
||||||
|
owner_id=None, **kwargs
|
||||||
):
|
):
|
||||||
"""Add switch machine to a switch."""
|
"""Add switch machine to a switch."""
|
||||||
return _add_switch_machine(
|
return _add_switch_machine(
|
||||||
switch_id,
|
switch_id,
|
||||||
exception_when_existing=exception_when_existing,
|
exception_when_existing=exception_when_existing,
|
||||||
mac=mac, session=session, **kwargs
|
mac=mac, session=session, owner_id=owner_id, **kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -747,7 +752,7 @@ def add_switch_machine(
|
|||||||
)
|
)
|
||||||
def add_switch_machines(
|
def add_switch_machines(
|
||||||
exception_when_existing=False,
|
exception_when_existing=False,
|
||||||
data=[], user=None, session=None
|
data=[], user=None, session=None, owner_id=None
|
||||||
):
|
):
|
||||||
"""Add switch machines."""
|
"""Add switch machines."""
|
||||||
switch_machines = []
|
switch_machines = []
|
||||||
@ -817,7 +822,7 @@ def add_switch_machines(
|
|||||||
switch_machines.append(_add_switch_machine_only(
|
switch_machines.append(_add_switch_machine_only(
|
||||||
switch_object, machine_object,
|
switch_object, machine_object,
|
||||||
exception_when_existing,
|
exception_when_existing,
|
||||||
session=session, **machine
|
session=session, owner_id=owner_id, **machine
|
||||||
))
|
))
|
||||||
return {
|
return {
|
||||||
'switches_machines': switch_machines,
|
'switches_machines': switch_machines,
|
||||||
@ -838,7 +843,10 @@ def poll_switch(switch_id, user=None, session=None, **kwargs):
|
|||||||
switch = _get_switch(switch_id, session=session)
|
switch = _get_switch(switch_id, session=session)
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.pollswitch',
|
'compass.tasks.pollswitch',
|
||||||
(user.email, switch.ip, switch.credentials)
|
(user.email, switch.ip, switch.credentials),
|
||||||
|
queue=user.email,
|
||||||
|
exchange=user.email,
|
||||||
|
routing_key=user.email
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'action %s sent' % kwargs,
|
'status': 'action %s sent' % kwargs,
|
||||||
@ -1116,7 +1124,8 @@ def _add_machine_to_switch(
|
|||||||
machine_id, session=session
|
machine_id, session=session
|
||||||
)
|
)
|
||||||
_add_switch_machine_only(
|
_add_switch_machine_only(
|
||||||
switch, machine, False, **kwargs
|
switch, machine, False,
|
||||||
|
owner_id=machine.owner_id, **kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -1357,6 +1357,7 @@ class SwitchMachine(BASE, HelperMixin, TimestampMixin):
|
|||||||
Integer,
|
Integer,
|
||||||
ForeignKey('machine.id', onupdate='CASCADE', ondelete='CASCADE')
|
ForeignKey('machine.id', onupdate='CASCADE', ondelete='CASCADE')
|
||||||
)
|
)
|
||||||
|
owner_id = Column(Integer, ForeignKey('user.id'))
|
||||||
port = Column(String(80), nullable=True)
|
port = Column(String(80), nullable=True)
|
||||||
vlans = Column(JSONEncoded, default=[])
|
vlans = Column(JSONEncoded, default=[])
|
||||||
__table_args__ = (
|
__table_args__ = (
|
||||||
|
@ -2,8 +2,11 @@
|
|||||||
|
|
||||||
CELERY_RESULT_BACKEND ="amqp://"
|
CELERY_RESULT_BACKEND ="amqp://"
|
||||||
|
|
||||||
BROKER_URL = "amqp://guest:guest@localhost:5672//"
|
BROKER_URL = "amqp://guest:guest@www.stack360.io:5672//"
|
||||||
|
|
||||||
CELERY_IMPORTS=("compass.tasks.tasks",)
|
CELERY_IMPORTS=("compass.tasks.tasks",)
|
||||||
CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml']
|
CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml']
|
||||||
C_FORCE_ROOT = 1
|
C_FORCE_ROOT = 1
|
||||||
|
CELERY_DEFAULT_QUEUE = 'admin@huawei.com'
|
||||||
|
CELERY_DEFAULT_EXCHANGE = 'admin@huawei.com'
|
||||||
|
CELERY_DEFAULT_ROUTING_KEY = 'admin@huawei.com'
|
||||||
|
@ -2,7 +2,7 @@ CONFIG_DIR = '/etc/compass'
|
|||||||
DATABASE_TYPE = 'mysql'
|
DATABASE_TYPE = 'mysql'
|
||||||
DATABASE_USER = 'root'
|
DATABASE_USER = 'root'
|
||||||
DATABASE_PASSWORD = 'root'
|
DATABASE_PASSWORD = 'root'
|
||||||
DATABASE_SERVER = '127.0.0.1:3306'
|
DATABASE_SERVER = 'www.stack360.io:3306'
|
||||||
DATABASE_NAME = 'compass'
|
DATABASE_NAME = 'compass'
|
||||||
SQLALCHEMY_DATABASE_URI = '%s://%s:%s@%s/%s' % (DATABASE_TYPE, DATABASE_USER, DATABASE_PASSWORD, DATABASE_SERVER, DATABASE_NAME)
|
SQLALCHEMY_DATABASE_URI = '%s://%s:%s@%s/%s' % (DATABASE_TYPE, DATABASE_USER, DATABASE_PASSWORD, DATABASE_SERVER, DATABASE_NAME)
|
||||||
SQLALCHEMY_DATABASE_POOL_TYPE = 'instant'
|
SQLALCHEMY_DATABASE_POOL_TYPE = 'instant'
|
||||||
|
@ -111,8 +111,10 @@ ERLANG_TOKEN: YOWSJSJIGGAUFZTIBRAD
|
|||||||
#set neutron_pass = $console_credentials.network.password
|
#set neutron_pass = $console_credentials.network.password
|
||||||
#set ceilometer_dbpass = $credentials.metering.password
|
#set ceilometer_dbpass = $credentials.metering.password
|
||||||
#set ceilometer_pass = $console_credentials.metering.password
|
#set ceilometer_pass = $console_credentials.metering.password
|
||||||
#set aodh_dbpass = $credentials.alarming.password
|
##set aodh_dbpass = $credentials.alarming.password
|
||||||
#set aodh_pass = $console_credentials.alarming.password
|
#set aodh_dbpass = "alarming"
|
||||||
|
##set aodh_pass = $console_credentials.alarming.password
|
||||||
|
#set aodh_pass = "alarming"
|
||||||
#set admin_pass = $console_credentials.admin.password
|
#set admin_pass = $console_credentials.admin.password
|
||||||
#set demo_pass = $console_credentials.demo.password
|
#set demo_pass = $console_credentials.demo.password
|
||||||
|
|
||||||
|
@ -13,7 +13,8 @@ fi
|
|||||||
source $DIR/install_func.sh
|
source $DIR/install_func.sh
|
||||||
|
|
||||||
echo "INstalling ansible related packages"
|
echo "INstalling ansible related packages"
|
||||||
sudo yum -y install ansible
|
# sudo yum -y install ansible
|
||||||
|
pip install ansible==1.9.2
|
||||||
if [[ "$?" != "0" ]]; then
|
if [[ "$?" != "0" ]]; then
|
||||||
echo "Failed to install ansible"
|
echo "Failed to install ansible"
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -13,7 +13,7 @@ fi
|
|||||||
source $DIR/install_func.sh
|
source $DIR/install_func.sh
|
||||||
|
|
||||||
echo "Installing cobbler related packages"
|
echo "Installing cobbler related packages"
|
||||||
sudo yum -y install cobbler cobbler-web createrepo mkisofs python-cheetah python-simplejson python-urlgrabber PyYAML Django corosync pykickstart
|
sudo yum --enablerepo=compass_install --nogpgcheck -y install cobbler cobbler-web createrepo mkisofs python-cheetah python-simplejson python-urlgrabber PyYAML Django corosync pykickstart
|
||||||
sudo yum -y upgrade yum-utils
|
sudo yum -y upgrade yum-utils
|
||||||
if [[ "$?" != "0" ]]; then
|
if [[ "$?" != "0" ]]; then
|
||||||
echo "failed to install cobbler related packages"
|
echo "failed to install cobbler related packages"
|
||||||
|
@ -4,17 +4,18 @@ echo 'Installing Required packages for Compass...'
|
|||||||
sudo yum clean all
|
sudo yum clean all
|
||||||
sudo yum update -y --skip-broken
|
sudo yum update -y --skip-broken
|
||||||
if [ "$tempest" == "true" ]; then
|
if [ "$tempest" == "true" ]; then
|
||||||
sudo yum install -y virt-install libvirt qemu-kvm libxml2-devel libffi-devel libxslt-devel python-devel sshpass openssl-devel
|
sudo yum --enablerepo=compass_install install -y virt-install libvirt qemu-kvm libxml2-devel libffi-devel libxslt-devel python-devel sshpass openssl-devel --nogpgcheck
|
||||||
if [[ "$?" != "0" ]]; then
|
if [[ "$?" != "0" ]]; then
|
||||||
echo "failed to install tempest yum dependency"
|
echo "failed to install tempest yum dependency"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
sudo yum install -y $MYSQL
|
sudo yum --enablerepo=compass_install install -y $MYSQL
|
||||||
sudo yum install -y rsyslog logrotate ntp iproute openssh-clients python python-devel git wget syslinux amqp rabbitmq-server mod_wsgi httpd squid dhcp bind rsync yum-utils xinetd tftp-server gcc net-snmp-utils net-snmp net-snmp-python unzip openssl openssl098e ca-certificates mysql-devel mysql-server mysql MySQL-python python-virtualenv python-setuptools python-pip bc libselinux-python libffi-devel openssl-devel
|
sudo yum --enablerepo=compass_install --nogpgcheck install -y rsyslog logrotate ntp iproute openssh-clients python python-devel git wget syslinux amqp rabbitmq-server mod_wsgi httpd squid dhcp bind rsync yum-utils xinetd tftp-server gcc net-snmp-utils net-snmp net-snmp-python unzip openssl openssl098e ca-certificates mysql-devel mysql-server mysql MySQL-python python-virtualenv python-setuptools python-pip bc libselinux-python libffi-devel openssl-devel
|
||||||
sudo yum --setopt=tsflags=noscripts -y remove redis
|
sudo yum --setopt=tsflags=noscripts -y remove redis
|
||||||
sudo yum --enablerepo=remi,remi-test install -y redis
|
# sudo yum --enablerepo=remi,remi-test install -y redis
|
||||||
|
sudo yum --enablerepo=compass_install --nogpgcheck install -y redis
|
||||||
|
|
||||||
if [[ "$?" != "0" ]]; then
|
if [[ "$?" != "0" ]]; then
|
||||||
echo "failed to install yum dependency"
|
echo "failed to install yum dependency"
|
||||||
|
@ -2,90 +2,107 @@
|
|||||||
# Config File for Compass Installer #
|
# Config File for Compass Installer #
|
||||||
#####################################
|
#####################################
|
||||||
# OS_INSTALLER indicates the tool for OS provisioning, default is 'cobbler'.
|
# OS_INSTALLER indicates the tool for OS provisioning, default is 'cobbler'.
|
||||||
export OS_INSTALLER=${OS_INSTALLER:-cobbler}
|
export OS_INSTALLER="cobbler"
|
||||||
|
|
||||||
# PACKAGE_INSTALLER indicates the tool for Package provisioning, default is 'chef'.
|
# PACKAGE_INSTALLER indicates the tool for Package provisioning, default is 'chef'.
|
||||||
export PACKAGE_INSTALLER=${PACKAGE_INSTALLER:-package}
|
export PACKAGE_INSTALLER="package"
|
||||||
|
|
||||||
# set remi release url
|
# set remi release url
|
||||||
# export ATOMIC=${ATOMIC:-http://www6.atomicorp.com/channels/atomic/${IMAGE_TYPE,,}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/RPMS/atomic-release-1.0-19.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.art.noarch.rpm}
|
# export ATOMIC=${ATOMIC:-http://www6.atomicorp.com/channels/atomic/${IMAGE_TYPE,,}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/RPMS/atomic-release-1.0-19.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.art.noarch.rpm}
|
||||||
# export REMI=${REMI:-http://rpms.famillecollet.com/enterprise/remi-release-6.rpm}
|
# export REMI=${REMI:-http://rpms.famillecollet.com/enterprise/remi-release-6.rpm}
|
||||||
export REMI=${REMI:-http://rpms.famillecollet.com/enterprise/remi-release-7.rpm}
|
export REMI="http://rpms.famillecollet.com/enterprise/remi-release-7.rpm"
|
||||||
|
|
||||||
export EPEL=${EPEL:-https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm}
|
export EPEL="https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm"
|
||||||
|
|
||||||
export MYSQL=${MYSQL:-http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm}
|
export MYSQL="http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm"
|
||||||
|
|
||||||
export OPENSTACK_ANSIBLE_MODULE=${OPENSTACK_ANSIBLE_MODULE:-https://github.com/openstack-ansible/openstack-ansible-modules.git}
|
export OPENSTACK_ANSIBLE_MODULE="https://github.com/openstack-ansible/openstack-ansible-modules.git"
|
||||||
# service NIC
|
|
||||||
export NIC=${NIC:-}
|
|
||||||
export IPADDR=${IPADDR:-}
|
|
||||||
export NETMASK=${NETMASK:-}
|
|
||||||
|
|
||||||
# DHCP config
|
# External network config for access remote compass server
|
||||||
|
# External network NIC
|
||||||
|
export PUBLIC_NIC="eth0"
|
||||||
|
|
||||||
|
# External NIC IP address, use the IP assigned to PUBLIC_NIC by default
|
||||||
|
export PUBLIC_IP=
|
||||||
|
|
||||||
|
# Install network config, it is a private network
|
||||||
|
export NIC="eth1"
|
||||||
|
export IPADDR="10.1.0.15"
|
||||||
|
export NETMASK="255.255.255.0"
|
||||||
|
|
||||||
|
# DHCP config for install network
|
||||||
# DHCP option router address(Default is your management interface IP address )"
|
# DHCP option router address(Default is your management interface IP address )"
|
||||||
export OPTION_ROUTER=${OPTION_ROUTER:-}
|
export OPTION_ROUTER="10.1.0.15"
|
||||||
# The IP range for DHCP clients (Default: local subnet start from 100 to 254)
|
# The IP range for DHCP clients (Default: local subnet start from 100 to 254)
|
||||||
export IP_START=${IP_START:-}
|
export IP_START="10.1.0.100"
|
||||||
export IP_END=${IP_END:-}
|
export IP_END="10.1.0.200"
|
||||||
# TFTP server's IP address(Default: Management Interface/eth0 IP)
|
# TFTP server's IP address(Default: Management Interface/eth0 IP)
|
||||||
export NEXTSERVER=${NEXTSERVER:-}
|
export NEXTSERVER="10.1.10.15"
|
||||||
|
|
||||||
|
# The machines MAC list. it must be filled, any space or null is wrong in quotes:
|
||||||
|
export switch_IP="127.0.0.1"
|
||||||
|
export PXE_MACs=("port=300,mac=00:0c:29:8c:c3:b5"
|
||||||
|
"port=301,mac=00:0c:29:87:79:9a"
|
||||||
|
"port=302,mac=00:0c:29:73:74:41"
|
||||||
|
"port=303,mac=00:0c:29:f9:eb:fd"
|
||||||
|
"port=304,mac=00:0c:29:ac:e7:14")
|
||||||
|
|
||||||
# the domains covered by nameserver
|
# the domains covered by nameserver
|
||||||
export NAMESERVER_DOMAINS=${NAMESERVER_DOMAINS:-}
|
export NAMESERVER_DOMAINS="ods.com"
|
||||||
export NAMESERVER_REVERSE_ZONES=${NAMESERVER_REVERSE_ZONES:-}
|
export NAMESERVER_REVERSE_ZONES="unused"
|
||||||
|
|
||||||
# set the default cobbler user "cobbler" password, if not set, the default will be cobbler/cobbler
|
# set the default cobbler user "cobbler" password, if not set, the default will be cobbler/cobbler
|
||||||
export CBLR_USER=${CBLR_USER:-}
|
export CBLR_USER=
|
||||||
export CBLR_PASSWD=${CBLR_PASSWD:-}
|
export CBLR_PASSWD=
|
||||||
|
|
||||||
# set default local repo flag
|
# set default local repo flag
|
||||||
export LOCAL_REPO=${LOCAL_REPO:-}
|
export LOCAL_REPO=
|
||||||
export SUPPORT_CENTOS_7_2=${SUPPORT_CENTOS_7_2:-}
|
export SUPPORT_CENTOS_7_2=
|
||||||
export SUPPORT_UBUNTU_14_04_03=${SUPPORT_UBUNTU_14_04_03:-}
|
export SUPPORT_UBUNTU_14_04_03=
|
||||||
|
|
||||||
export IMAGE_TYPE=${IMAGE_TYPE:-"CentOS"}
|
export IMAGE_TYPE="CentOS"
|
||||||
export IMAGE_TYPE_OTHER=${IMAGE_TYPE_OTHER:-"el"}
|
export IMAGE_TYPE_OTHER="el"
|
||||||
export IMAGE_VERSION_MAJOR=${IMAGE_VERSION_MAJOR:-"6"}
|
export IMAGE_VERSION_MAJOR="6"
|
||||||
export IMAGE_VERSION_NINOR=${IMAGE_VERSION_MINOR:-"5"}
|
export IMAGE_VERSION_NINOR="5"
|
||||||
export IMAGE_VERSION=${IMAGE_VERSION:-"${IMAGE_VERSION_MAJOR}.${IMAGE_VERSION_MINOR}"}
|
export IMAGE_VERSION="${IMAGE_VERSION_MAJOR}.${IMAGE_VERSION_MINOR}"
|
||||||
export IMAGE_NAME=${IMAGE_NAME:-"${IMAGE_TYPE}-${IMAGE_VERSION}"}
|
export IMAGE_NAME="${IMAGE_TYPE}-${IMAGE_VERSION}"
|
||||||
export IMAGE_ARCH=${IMAGE_ARCH:-"x86_64"}
|
export IMAGE_ARCH="x86_64"
|
||||||
|
|
||||||
export CENTOS_7_2_IMAGE_SOURCE=${CENTOS_7_2_IMAGE_SOURCE:-"http://205.177.226.237:9999/CentOS-7-x86_64-Minimal-1511.iso"}
|
export CENTOS_7_2_IMAGE_SOURCE="http://205.177.226.237:9999/CentOS-7-x86_64-Minimal-1511.iso"
|
||||||
export UBUNTU_14_04_03_IMAGE_SOURCE=${UBUNTU_14_04_03_IMAGE_SOURCE:-"http://205.177.226.237:9999/ubuntu-14.04.3-server-amd64.iso"}
|
export UBUNTU_14_04_03_IMAGE_SOURCE="http://205.177.226.237:9999/ubuntu-14.04.3-server-amd64.iso"
|
||||||
export CENTOS_7_2_PPA_REPO_SOURCE=${CENTOS_7_2_PPA_REPO_SOURCE:-"http://205.177.226.237:9999/centos7-juno-ppa.tar.gz"}
|
export CENTOS_7_2_PPA_REPO_SOURCE="http://205.177.226.237:9999/centos7-juno-ppa.tar.gz"
|
||||||
export UBUNTU_14_04_03_PPA_REPO_SOURCE=${UBUNTU_14_04_03_PPA_REPO_SOURCE:-"http://205.177.226.237:9999/trusty-liberty-ppa.tar.gz"}
|
export UBUNTU_14_04_03_PPA_REPO_SOURCE="http://205.177.226.237:9999/trusty-mitaka-ppa.tar.gz"
|
||||||
export PIP_PACKAGES=${PIP_PACKAGES:-"http://205.177.226.237:9999/pip.tar.gz"}
|
export PIP_PACKAGES="http://205.177.226.237:9999/pip.tar.gz"
|
||||||
|
|
||||||
export COBBLER_PASSWORD=${COBBLER_PASSWORD:-"cobbler"}
|
export COBBLER_PASSWORD="cobbler"
|
||||||
|
|
||||||
export COBBLER_LOADERS_SOURCE=${COBBLER_LOADERS_SOURCE:-"https://s3-us-west-1.amazonaws.com/compass-local-repo/loaders.tar.gz"}
|
export COBBLER_LOADERS_SOURCE="https://s3-us-west-1.amazonaws.com/compass-local-repo/loaders.tar.gz"
|
||||||
export COBBLER_LOADERS_SOURCE_ASIA=${COBBLER_LOADERS_SOURCE_ASIA:-"http://huawei-repo.uubright.com/repos/compass/loaders.tar.gz"}
|
export COBBLER_LOADERS_SOURCE_ASIA="http://huawei-repo.uubright.com/repos/compass/loaders.tar.gz"
|
||||||
|
|
||||||
# Currently the use of Javascript MVC is set to version 3.2.4
|
# Currently the use of Javascript MVC is set to version 3.2.4
|
||||||
export JS_MVC=${JS_MVC:-"javascriptmvc-3.2.4"}
|
export JS_MVC="javascriptmvc-3.2.4"
|
||||||
|
|
||||||
# set the chef packages download path
|
# set the chef packages download path
|
||||||
export CHEF_SRV=${CHEF_SRV:-"http://opscode-omnibus-packages.s3.amazonaws.com/${IMAGE_TYPE_OTHER}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-server-11.0.8-1.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.${IMAGE_ARCH}.rpm"}
|
export CHEF_SRV="http://opscode-omnibus-packages.s3.amazonaws.com/${IMAGE_TYPE_OTHER}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-server-11.0.8-1.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.${IMAGE_ARCH}.rpm"
|
||||||
export CHEF_SRV_HUAWEI=${CHEF_SRV_HUAWEI:-"http://huawei-repo.uubright.com/repos/compass/${IMAGE_TYPE_OTHER}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-server-11.0.8-1.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.${IMAGE_ARCH}.rpm"}
|
export CHEF_SRV_HUAWEI="http://huawei-repo.uubright.com/repos/compass/${IMAGE_TYPE_OTHER}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-server-11.0.8-1.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.${IMAGE_ARCH}.rpm"
|
||||||
export CHEF_CLIENT=${CHEF_CLIENT:-"https://opscode-omnibus-packages.s3.amazonaws.com/${IMAGE_TYPE_OTHER}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-11.10.4-1.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.${IMAGE_ARCH}.rpm"}
|
export CHEF_CLIENT="https://opscode-omnibus-packages.s3.amazonaws.com/${IMAGE_TYPE_OTHER}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-11.10.4-1.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.${IMAGE_ARCH}.rpm"
|
||||||
export CHEF_CLIENT_HUAWEI=${CHEF_CLIENT_HUAWEI:-"http://huawei-repo.uubright.com/repos/compass/${IMAGE_TYPE_OTHER}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-11.10.4-1.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.${IMAGE_ARCH}.rpm"}
|
export CHEF_CLIENT_HUAWEI="http://huawei-repo.uubright.com/repos/compass/${IMAGE_TYPE_OTHER}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-11.10.4-1.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.${IMAGE_ARCH}.rpm"
|
||||||
|
|
||||||
# Set Chef password for Chef web UI
|
# Set Chef password for Chef web UI
|
||||||
export CHEF_PASSWD=${CHEF_PASSWD:-"root1234"}
|
export CHEF_PASSWD="root1234"
|
||||||
|
|
||||||
# Set Compass-web and Compass-adpater variables
|
# Set Compass-web and Compass-adpater variables
|
||||||
export WEB_HOME=${WEB_HOME:-'/tmp/web'}
|
export WEB_HOME="/tmp/web"
|
||||||
export ADAPTERS_HOME=${ADAPTERS_HOME:-'/tmp/adapter'}
|
export ADAPTERS_HOME="/tmp/adapter"
|
||||||
export WEB_SOURCE=${WEB_SOURCE:-}
|
export WEB_SOURCE=
|
||||||
export ADAPTERS_SOURCE=${ADAPTERS_SOURCE:-}
|
export ADAPTERS_SOURCE=
|
||||||
|
|
||||||
# Set compass environment
|
# Set compass environment
|
||||||
export SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
export SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||||
export COMPASSDIR=${SCRIPT_DIR}/..
|
export COMPASSDIR=${SCRIPT_DIR}/..
|
||||||
export TESTMODE=${TESTMODE:-"False"}
|
export TESTMODE="False"
|
||||||
|
|
||||||
# Set Local Repo sources
|
# Set Local Repo sources
|
||||||
export LOCAL_REPO_US=${LOCAL_REPO_US:-"https://s3-us-west-1.amazonaws.com/compass-local-repo"}
|
export LOCAL_REPO_US="https://s3-us-west-1.amazonaws.com/compass-local-repo"
|
||||||
export LOCAL_REPO_HUAWEI=${LOCAL_REPO_HUAWEI:-"http://huawei-repo.uubright.com/repos/compass"}
|
export LOCAL_REPO_HUAWEI="http://huawei-repo.uubright.com/repos/compass"
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
#
|
#
|
||||||
|
#set -x
|
||||||
### Log the script all outputs locally
|
### Log the script all outputs locally
|
||||||
exec > >(sudo tee install.log)
|
exec > >(sudo tee install.log)
|
||||||
exec 2>&1
|
exec 2>&1
|
||||||
@ -48,6 +48,7 @@ _prepare_locking()
|
|||||||
_prepare_locking
|
_prepare_locking
|
||||||
|
|
||||||
# PUBLIC
|
# PUBLIC
|
||||||
|
|
||||||
exlock_now()
|
exlock_now()
|
||||||
{
|
{
|
||||||
_lock xn || exit 1
|
_lock xn || exit 1
|
||||||
@ -59,10 +60,93 @@ if [[ "$?" != "0" ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
### BEGIN OF SCRIPT ###
|
set_iptables()
|
||||||
|
{
|
||||||
|
# external_ipaddr=$1; install_ipaddr=$2; install_netmask=$3
|
||||||
|
|
||||||
|
local argument_error="ERROR: argument ARG_NUM is invalidation that is for ARG_DESC"
|
||||||
|
local varnames=("3" "external_ipaddr" "install_ipaddr" "install_netmask")
|
||||||
|
if [ $# -lt ${varnames[0]} ];then
|
||||||
|
echo "ERROR: please input ${varnames[0]} arguments to call function _set_iptables()";exit 1
|
||||||
|
fi
|
||||||
|
local i=1
|
||||||
|
while [ $1 ];do
|
||||||
|
eval "${varnames[i]}"=$1
|
||||||
|
sudo ipcalc $1 -c
|
||||||
|
if [ $? -ne 0 ];then
|
||||||
|
echo ${argument_error} | sed 's/ARG_NUM/'"$i"'/g' | sed 's/ARG_DESC/g'"${varnames[i]}"'/g';exit 1
|
||||||
|
fi
|
||||||
|
let i++;shift
|
||||||
|
done
|
||||||
|
|
||||||
|
local install_netaddr=$(sudo ipcalc ${install_ipaddr} ${install_netmask} -n | awk -F = '{print $2}')
|
||||||
|
local install_netprefix=$(sudo ipcalc ${install_ipaddr} ${install_netmask} -p | awk -F = '{print $2}')
|
||||||
|
|
||||||
|
sudo sed -i '/^\s*net\.ipv4\.ip_forward\s*=/d' /etc/sysctl.conf
|
||||||
|
sudo sed -i '$a net.ipv4.ip_forward=1' /etc/sysctl.conf
|
||||||
|
sudo sysctl -p
|
||||||
|
|
||||||
|
sudo rpm -qa | grep iptables-services
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
sudo yum --enablerepo=compass_install --nogpgcheck -y install iptables-services
|
||||||
|
fi
|
||||||
|
|
||||||
|
sudo /bin/systemctl status iptables.service
|
||||||
|
if [ $? -eq 0 ];then
|
||||||
|
sudo /usr/sbin/service iptables save
|
||||||
|
sudo /bin/systemctl stop iptables.service
|
||||||
|
fi
|
||||||
|
|
||||||
|
sudo mkdir /etc/sysconfig/iptables.bak.d 2>/dev/null
|
||||||
|
if [ -f /etc/sysconfig/iptables ];then
|
||||||
|
sudo mv -f /etc/sysconfig/iptables /etc/sysconfig/iptables.bak.d/$(uuidgen)
|
||||||
|
fi
|
||||||
|
|
||||||
|
iptables_config=" *filter\n
|
||||||
|
:INPUT ACCEPT [0:0]\n
|
||||||
|
:FORWARD ACCEPT [0:0]\n
|
||||||
|
:OUTPUT ACCEPT [0:0]\n
|
||||||
|
COMMIT\n
|
||||||
|
*nat\n
|
||||||
|
:PREROUTING ACCEPT [0:0]\n
|
||||||
|
:INPUT ACCEPT [0:0]\n
|
||||||
|
:OUTPUT ACCEPT [0:0]\n
|
||||||
|
:POSTROUTING ACCEPT [0:0]\n
|
||||||
|
-A POSTROUTING -s ${install_ipaddr}/32 -j ACCEPT\n
|
||||||
|
-A POSTROUTING -s ${install_netaddr}/${install_netprefix} -j SNAT --to-source ${external_ipaddr}\n
|
||||||
|
COMMIT\n"
|
||||||
|
sudo echo -e ${iptables_config} | sed 's/^\s*//g' > /etc/sysconfig/iptables
|
||||||
|
|
||||||
|
sudo /bin/systemctl enable iptables
|
||||||
|
sudo /bin/systemctl start iptables.service
|
||||||
|
}
|
||||||
|
|
||||||
|
# convert between ip address and integers
|
||||||
|
ipaddr_to_int()
|
||||||
|
{
|
||||||
|
ipaddr=$1
|
||||||
|
IFS=. read -r a b c d <<< "$ipaddr"
|
||||||
|
printf '%d\n' "$((a * 256 ** 3 + b * 256 ** 2 + c * 256 + d))"
|
||||||
|
}
|
||||||
|
int_to_ipaddr()
|
||||||
|
{
|
||||||
|
ipint=$1
|
||||||
|
let a=ipint/$[256**3];let ipint%=$[256**3]
|
||||||
|
let b=ipint/$[256**2];let ipint%=$[256**2]
|
||||||
|
let c=ipint/256;let ipint%=256
|
||||||
|
let d=ipint
|
||||||
|
printf '%d.%d.%d.%d\n' $a $b $c $d
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
### BEGIN OF SCRIPT
|
||||||
|
|
||||||
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||||
|
|
||||||
|
### LOAD FILE CONFIGURATION
|
||||||
source $DIR/install.conf
|
source $DIR/install.conf
|
||||||
|
|
||||||
|
|
||||||
### Change selinux security policy
|
### Change selinux security policy
|
||||||
sudo setenforce 0
|
sudo setenforce 0
|
||||||
sudo sed -i 's/enforcing/disabled/g' /etc/selinux/config
|
sudo sed -i 's/enforcing/disabled/g' /etc/selinux/config
|
||||||
@ -127,131 +211,118 @@ figlet -ctf slant Compass Installer
|
|||||||
while [ $1 ]; do
|
while [ $1 ]; do
|
||||||
flags=$1
|
flags=$1
|
||||||
param=${flags/'--'/''}
|
param=${flags/'--'/''}
|
||||||
var=$(echo $param | cut -d"=" -f1)
|
var=$(echo $param | awk -F = '{print $1}')
|
||||||
val=$(echo $param | cut -d"=" -f2)
|
val=$(echo $param | awk -F = '{print $2}')
|
||||||
export $var=$val
|
eval export $var=$val
|
||||||
shift
|
shift
|
||||||
done
|
done
|
||||||
|
|
||||||
# convert ip address to int
|
yum update -y
|
||||||
ipaddr_convert()
|
sudo rpm -qa | grep net-tools
|
||||||
{
|
if [ $? -ne 0 ];then
|
||||||
ipaddr=$1
|
yum --enablerepo=compass_install --nogpgcheck -y install net-tools
|
||||||
IFS=. read -r a b c d <<< "$ipaddr"
|
fi
|
||||||
printf '%d\n' "$((a * 256 ** 3 + b * 256 ** 2 + c * 256 + d))"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Load variables
|
# check the correct format of ip variables
|
||||||
loadvars()
|
ip_vars="PUBLIC_IP PUBLIC_NETMASK PUBLIC_GATEWAY
|
||||||
{
|
IPADDR NETMASK
|
||||||
varname=${1,,}
|
OPTION_ROUTER NEXTSERVER IP_START IP_END"
|
||||||
eval var=\$$(echo $1)
|
for ip_var in ${ip_vars}; do
|
||||||
|
eval ip_val=\$${ip_var}
|
||||||
if [[ -z $var ]]; then
|
if [ ! -z ${ip_val} ];then
|
||||||
echo -e "\x1b[32mPlease enter the $varname (Example: $2):\x1b[37m"
|
ipcalc ${ip_val} -c
|
||||||
while read input
|
if [ $? -ne 0 ];then
|
||||||
do
|
echo "The variable of '${ip_var}'='${ip_val}' is invalid."
|
||||||
if [ "$input" == "" ]; then
|
exit 1
|
||||||
echo "Default $varname '$2' chosen"
|
fi
|
||||||
export $(echo $1)="$2"
|
|
||||||
break
|
|
||||||
else
|
|
||||||
echo "You have entered $input"
|
|
||||||
export $(echo $1)="$input"
|
|
||||||
break
|
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
fi
|
|
||||||
}
|
# public network variables:
|
||||||
yum -y install net-tools
|
export PUBLIC_NIC=${PUBLIC_NIC:-"eth0"}
|
||||||
loadvars NIC "eth0"
|
export PUBLIC_IP=${PUBLIC_IP:-$(sudo ifconfig ${PUBLIC_NIC} | awk '($1=="inet"){print $2}')}
|
||||||
sudo ifconfig $NIC
|
export PUBLIC_GATEWAY=${PUBLIC_GATEWAY:-$(sudo route -n | awk '($1=="0.0.0.0" && $3=="0.0.0.0"){print $2}')}
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "There is no nic '$NIC' yet"
|
if [ -z ${PUBLIC_IP} ];then
|
||||||
|
echo "ERROR: There is not any PUBLIC_IP to be set yet, please assign an IP to PUBLIC_NIC or configure 'install.conf' first."
|
||||||
exit 1
|
exit 1
|
||||||
|
elif [ -z ${PUBLIC_GATEWAY} ];then
|
||||||
|
echo "WARNING: There is not any PUBLIC_GATEWAY, please ensure that the agent server can access remote compass center if no gateway."
|
||||||
|
sleep 2
|
||||||
fi
|
fi
|
||||||
# sudo ifconfig $NIC | grep 'inet addr:' >& /dev/null
|
|
||||||
sudo ifconfig $NIC |grep 'inet '| cut -d ' ' -f10 >& /dev/null
|
export PUBLIC_NETMASK=${PUBLIC_NETMASK:-$(sudo ifconfig ${PUBLIC_NIC} | awk '($3=="netmask"){print $4}')}
|
||||||
if [ $? -ne 0 ]; then
|
export PUBLIC_NETMASK=${PUBLIC_NETMASK:-$(sudo ipcalc ${PUBLIC_IP} -m | awk -F = '{print $2}')}
|
||||||
echo "There is not any IP address assigned to the NIC '$NIC' yet, please assign an IP address first."
|
|
||||||
|
if [[ $(ipcalc ${PUBLIC_IP} ${PUBLIC_NETMASK} -n) != $(ipcalc ${PUBLIC_GATEWAY} ${PUBLIC_NETMASK} -n) ]];then
|
||||||
|
echo "ERROR: The PUBLIC_IP:${PUBLIC_IP} and PUBLIC_GATEWAY:${PUBLIC_GATEWAY} are not in the same subnet, please check the configuration."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export ipaddr=$(ifconfig $NIC | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}')
|
sudo ifconfig ${PUBLIC_NIC} ${PUBLIC_IP} netmask ${PUBLIC_NETMASK} up
|
||||||
echo " this line "
|
|
||||||
if [ -z "$ipaddr" ]; then
|
if [ ! -z ${PUBLIC_GATEWAY} ];then
|
||||||
export ipaddr=$(ifconfig $NIC | grep 'inet ' | sed 's/^[ \t]*//g' | sed 's/[ \t]\+/ /g' | cut -d' ' -f2)
|
sudo route del -net 0.0.0.0/0
|
||||||
|
sudo route add -net 0.0.0.0/0 gw ${PUBLIC_GATEWAY}
|
||||||
fi
|
fi
|
||||||
loadvars IPADDR ${ipaddr}
|
|
||||||
ipcalc $IPADDR -c
|
# install network variables:
|
||||||
if [ $? -ne 0 ]; then
|
export NIC=${NIC:-"eth1"}
|
||||||
echo "ip addr $IPADDR format should be x.x.x.x"
|
export IPADDR=${IPADDR:-$(sudo ifconfig ${NIC} | awk '($1=="inet"){print $2}')}
|
||||||
|
export IPADDR=${IPADDR:-"10.1.0.15"}
|
||||||
|
export NETMASK=${NETMASK:-$(sudo ifconfig ${NIC} | awk '($3="netmask"){print $4}')}
|
||||||
|
export NETMASK=${NETMASK:-"255.255.255.0"}
|
||||||
|
|
||||||
|
sudo ifconfig ${NIC} ${IPADDR} netmask ${NETMASK} up
|
||||||
|
|
||||||
|
export OPTION_ROUTER=${OPTION_ROUTE:-${IPADDR}}
|
||||||
|
export NEXTSERVER=${NEXTSERVER:-${IPADDR}}
|
||||||
|
|
||||||
|
if [ -z ${IP_START} ];then
|
||||||
|
temp_int=$(ipaddr-to-int ${IPADDR})
|
||||||
|
let temp_int++
|
||||||
|
IP_START=$(int-to-ipaddr ${temp_int})
|
||||||
|
fi
|
||||||
|
export IP_START
|
||||||
|
|
||||||
|
if [ -z ${IP_END} ];then
|
||||||
|
broad_addr=$(sudo ipcalc ${IPADDR} ${NETMASK} -b | awk -F = '{print $2}')
|
||||||
|
temp_int=$(ipadd-to-int ${broad_addr})
|
||||||
|
let temp_int--
|
||||||
|
IP_END=$(int-to-ipaddr ${temp_int})
|
||||||
|
fi
|
||||||
|
export IP_END
|
||||||
|
|
||||||
|
# check the validation of IP_START and IP_END
|
||||||
|
for ip_var in IP_START IP_END;do
|
||||||
|
if [[ $(eval ipcalc \$${ip_var} ${NETMASK} -n) != $(ipcalc ${IPADDR} ${NETMASK} -n) ]];then
|
||||||
|
eval echo "ERROR: The ${ip_var}:\$${ip_var} and install nic are not in the same subnet.";
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
export netmask=$(ifconfig $NIC | grep Mask | cut -d: -f4)
|
done
|
||||||
if [ -z "$netmask" ]; then
|
ip_start_int=$(ipaddr_to_int ${IP_START})
|
||||||
export netmask=$(ifconfig $NIC | grep netmask | sed 's/^[ \t]*//g' | sed 's/[ \t]\+/ /g' | cut -d' ' -f4)
|
ip_end_int=$(ipaddr_to_int ${IP_END})
|
||||||
fi
|
|
||||||
loadvars NETMASK ${netmask}
|
|
||||||
export netaddr=$(ipcalc $IPADDR $NETMASK -n |cut -f 2 -d '=')
|
|
||||||
export netprefix=$(ipcalc $IPADDR $NETMASK -p |cut -f 2 -d '=')
|
|
||||||
subnet=${netaddr}/${netprefix}
|
|
||||||
ipcalc $subnet -c
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "subnet $subnet format should be x.x.x.x/x"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
loadvars OPTION_ROUTER $(route -n | grep '^0.0.0.0' | xargs | cut -d ' ' -f 2)
|
|
||||||
ipcalc $OPTION_ROUTER -c
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "router $OPTION_ROUTER format should be x.x.x.x"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
export ip_start=$(echo "$IPADDR"|cut -f 1,2,3 -d '.')."100"
|
|
||||||
export ip_end=$(echo "$IPADDR"|cut -f 1,2,3 -d '.')."250"
|
|
||||||
loadvars IP_START "$ip_start"
|
|
||||||
ipcalc $IP_START -c
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "ip start $IP_START format should be x.x.x.x"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "ip start address is $IP_START"
|
|
||||||
fi
|
|
||||||
ip_start_net=$(ipcalc $IP_START $NETMASK -n |cut -f 2 -d '=')
|
|
||||||
if [[ "$ip_start_net" != "$netaddr" ]]; then
|
|
||||||
echo "ip start $IP_START is not in $subnet"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
loadvars IP_END "$ip_end"
|
|
||||||
ipcalc $IP_END -c
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "ip end $IP_END format should be x.x.x.x"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
ip_end_net=$(ipcalc $IP_END $NETMASK -n |cut -f 2 -d '=')
|
|
||||||
if [[ "$ip_end_net" != "$netaddr" ]]; then
|
|
||||||
echo "ip end $IP_END is not in $subnet"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
ip_start_int=$(ipaddr_convert $IP_START)
|
|
||||||
ip_end_int=$(ipaddr_convert $IP_END)
|
|
||||||
let ip_range=${ip_end_int}-${ip_start_int}
|
let ip_range=${ip_end_int}-${ip_start_int}
|
||||||
if [ $ip_range -le 0 ]; then
|
if [ ${ip_range} -le 0 ];then
|
||||||
echo "there is no available ips to assign between $IP_START and $IP_END"
|
echo "There is no avialable IPs between IP_START:'${IP_START}' and IP_END:'${IP_END}'."
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "there will be at most $ip_range hosts deployed."
|
|
||||||
loadvars NEXTSERVER $IPADDR
|
|
||||||
ipcalc $NEXTSERVER -c
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "next server $NEXTSERVER format should be x.x.x.x"
|
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
loadvars NAMESERVER_DOMAINS "ods.com"
|
# print all variables about IP
|
||||||
loadvars NAMESERVER_REVERSE_ZONES "unused"
|
for ip_var in ${ip_vars};do
|
||||||
loadvars WEB_SOURCE 'http://git.openstack.org/openstack/compass-web'
|
eval echo "${ip_var}=\$${ip_var}"
|
||||||
loadvars ADAPTERS_SOURCE 'https://gerrit.opnfv.org/gerrit/compass4nfv'
|
done
|
||||||
|
|
||||||
|
export NAMESERVER_DOMAINS=${NAMESERVER_DOMAINS:-"ods.com"}
|
||||||
|
export NAMESERVER_REVERSE_ZONES=${NAMESERVER_REVERSE_ZONES:-"unused"}
|
||||||
|
export WEB_SOURCE=${WEB_SOURCE:-"http://git.openstack.org/openstack/compass-web"}
|
||||||
|
export ADAPTERS_SOURCE=${ADAPTERS_SOURCE:-"https://gerrit.opnfv.org/gerrit/compass4nfv"}
|
||||||
|
|
||||||
|
echo "set the iptables' rules so that the openstack hosts installed can access remote compass through agent server"
|
||||||
|
set_iptables ${PUBLIC_IP} ${IPADDR} ${NETMASK}
|
||||||
|
|
||||||
|
rm -rf /etc/yum.repos.d/compass_install.repo 2>/dev/nullcp
|
||||||
|
cp ${COMPASSDIR}/misc/compass_install.repo /etc/yum.repos.d/
|
||||||
|
|
||||||
echo "script dir: $SCRIPT_DIR"
|
echo "script dir: $SCRIPT_DIR"
|
||||||
echo "compass dir is $COMPASSDIR"
|
echo "compass dir is $COMPASSDIR"
|
||||||
@ -280,3 +351,18 @@ source ${COMPASSDIR}/install/compass.sh || exit $?
|
|||||||
|
|
||||||
figlet -ctf slant Installation Complete!
|
figlet -ctf slant Installation Complete!
|
||||||
echo -e "It takes\x1b[32m $SECONDS \x1b[0mseconds during the installation."
|
echo -e "It takes\x1b[32m $SECONDS \x1b[0mseconds during the installation."
|
||||||
|
|
||||||
|
machine_list_conf="MACHINE_LIST = [ { '${switch_IP}': [ "
|
||||||
|
for host in ${PXE_MACs[@]}; do
|
||||||
|
port=$(echo ${host} | awk -F , '{print $1}' | awk -F = '{print $2}')
|
||||||
|
mac=$(echo ${host} | awk -F , '{print $2}' | awk -F = '{print $2}')
|
||||||
|
machine_list_conf="${machine_list_conf}${comma}\n{'port': '${port}', 'mac': '${mac}', 'vlan': '0'}"
|
||||||
|
comma=","
|
||||||
|
done
|
||||||
|
machine_list_conf="${machine_list_conf}\n ] } ]"
|
||||||
|
sudo echo -e ${machine_list_conf} > /etc/compass/machine_list/machine_list.conf
|
||||||
|
|
||||||
|
rm -rf /var/ansible/roles/keystone/vars/Debian.yml 2>/dev/null
|
||||||
|
cp ${COMPASSDIR}/misc/adapter_changes/Debian.yml /var/ansible/roles/keystone/vars/
|
||||||
|
rm -rf /var/ansible/roles/keystone/tasks/keystone_install.yml 2>/dev/null
|
||||||
|
cp ${COMPASSDIR}/misc/adapter_changes/keystone_install.yml /var/ansible/roles/keystone/tasks/
|
||||||
|
21
install/register.sh
Executable file
21
install/register.sh
Executable file
@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
#set -x
|
||||||
|
### Register current user to compass
|
||||||
|
|
||||||
|
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||||
|
source $DIR/install.conf
|
||||||
|
echo "The email address you use to register is ${USER_EMAIL}"
|
||||||
|
password=`< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c6`
|
||||||
|
data=`echo "{\"email\":\"${USER_EMAIL}\",\"password\":\"${password}\"}"`
|
||||||
|
wget -O /tmp/user_info --post-data=$data --header=Content-Type:application/json "http://www.stack360.io/api/users/register"
|
||||||
|
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "Register failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Register suceeded, your password is $password, please remember your password at all times."
|
||||||
|
sed -i 's/^CELERY_DEFAULT_QUEUE.*/CELERY_DEFAULT_QUEUE = '"${USER_EMAIL}"'/g' /etc/compass/celeryconfig
|
||||||
|
sed -i 's/^CELERY_DEFAULT_EXCHANGE.*/CELERY_DEFAULT_EXCHANGE = '"${USER_EMAIL}"'/g' /etc/compass/celeryconfig
|
||||||
|
sed -i 's/^CELERY_DEFAULT_ROUTING_KEY.*/CELERY_DEFAULT_ROUTING_KEY = '"${USER_EMAIL}"'/g' /etc/compass/celeryconfig
|
18
misc/adapter_changes/Debian.yml
Normal file
18
misc/adapter_changes/Debian.yml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
#############################################################################
|
||||||
|
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
|
||||||
|
#
|
||||||
|
# All rights reserved. This program and the accompanying materials
|
||||||
|
# are made available under the terms of the Apache License, Version 2.0
|
||||||
|
# which accompanies this distribution, and is available at
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#############################################################################
|
||||||
|
---
|
||||||
|
|
||||||
|
cron_path: "/var/spool/cron/crontabs"
|
||||||
|
|
||||||
|
packages:
|
||||||
|
- keystone
|
||||||
|
|
||||||
|
services:
|
||||||
|
- apache2
|
||||||
|
- keystone
|
74
misc/adapter_changes/keystone_install.yml
Normal file
74
misc/adapter_changes/keystone_install.yml
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
##############################################################################
|
||||||
|
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
|
||||||
|
#
|
||||||
|
# All rights reserved. This program and the accompanying materials
|
||||||
|
# are made available under the terms of the Apache License, Version 2.0
|
||||||
|
# which accompanies this distribution, and is available at
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
##############################################################################
|
||||||
|
---
|
||||||
|
- include_vars: "{{ ansible_os_family }}.yml"
|
||||||
|
|
||||||
|
- name: disable auto start
|
||||||
|
copy:
|
||||||
|
content: "#!/bin/sh\nexit 101"
|
||||||
|
dest: "/usr/sbin/policy-rc.d"
|
||||||
|
mode: 0755
|
||||||
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
|
- name: install keystone packages
|
||||||
|
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
|
||||||
|
with_items: packages | union(packages_noarch)
|
||||||
|
|
||||||
|
- name: enable auto start
|
||||||
|
file:
|
||||||
|
path=/usr/sbin/policy-rc.d
|
||||||
|
state=absent
|
||||||
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
|
- name: generate keystone service list
|
||||||
|
lineinfile: dest=/opt/service create=yes line='{{ item }}'
|
||||||
|
with_items: services | union(services_noarch)
|
||||||
|
|
||||||
|
- name: delete sqlite database
|
||||||
|
file:
|
||||||
|
path: /var/lib/keystone/keystone.db
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: update keystone conf
|
||||||
|
template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
|
||||||
|
notify:
|
||||||
|
- restart keystone services
|
||||||
|
|
||||||
|
- name: update apache2 configs
|
||||||
|
template:
|
||||||
|
src: wsgi-keystone.conf.j2
|
||||||
|
dest: '{{ apache_config_dir }}/sites-available/wsgi-keystone.conf'
|
||||||
|
when: ansible_os_family == 'Debian'
|
||||||
|
notify:
|
||||||
|
- restart keystone services
|
||||||
|
|
||||||
|
- name: update apache2 configs
|
||||||
|
template:
|
||||||
|
src: wsgi-keystone.conf.j2
|
||||||
|
dest: '{{ apache_config_dir }}/wsgi-keystone.conf'
|
||||||
|
when: ansible_os_family == 'RedHat'
|
||||||
|
notify:
|
||||||
|
- restart keystone services
|
||||||
|
|
||||||
|
- name: enable keystone server
|
||||||
|
file:
|
||||||
|
src: "{{ apache_config_dir }}/sites-available/wsgi-keystone.conf"
|
||||||
|
dest: "{{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf"
|
||||||
|
state: "link"
|
||||||
|
when: ansible_os_family == 'Debian'
|
||||||
|
notify:
|
||||||
|
- restart keystone services
|
||||||
|
|
||||||
|
- name: keystone source files
|
||||||
|
template: src={{ item }} dest=/opt/{{ item }}
|
||||||
|
with_items:
|
||||||
|
- admin-openrc.sh
|
||||||
|
- demo-openrc.sh
|
||||||
|
|
||||||
|
- meta: flush_handlers
|
5
misc/compass_install.repo
Normal file
5
misc/compass_install.repo
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
[compass_install]
|
||||||
|
name=compass_repo
|
||||||
|
baseurl=http://www.stack360.io/centos7/
|
||||||
|
gpgcheck=0
|
||||||
|
enabled=1
|
2
tox.ini
2
tox.ini
@ -28,7 +28,7 @@ commands = {posargs}
|
|||||||
commands = python setup.py testr --coverage --testr-args='{posargs}'
|
commands = python setup.py testr --coverage --testr-args='{posargs}'
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
ignore = H302,H304,H233,H803,F401,H104,H236,H237,H238
|
ignore = H302,H304,H233,H803,F401,H104,H236,H237,H238,E501
|
||||||
show-source = true
|
show-source = true
|
||||||
builtins = _
|
builtins = _
|
||||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools,build
|
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools,build
|
||||||
|
Loading…
x
Reference in New Issue
Block a user