Revert "Merge "fix compasss.sh""
This reverts commit 35472d1f153fb20393c6d6fb6dffc1b83db5355b, reversing changes made to 5a929be775bb4513560b87f5da314e12421df665. Change-Id: Ie5f2f92473489bb5be981b97e17597e91ff1cbf9
This commit is contained in:
parent
e4a0189298
commit
34e734f474
@ -1,3 +1,22 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
/opt/compass/bin/refresh_agent.sh
|
set -e
|
||||||
/opt/compass/bin/refresh_server.sh
|
systemctl restart mysql.service
|
||||||
|
systemctl status mysql.service || exit $?
|
||||||
|
/opt/compass/bin/manage_db.py createdb
|
||||||
|
/opt/compass/bin/clean_installers.py --noasync
|
||||||
|
/opt/compass/bin/clean_installation_logs.py
|
||||||
|
rm -rf /var/ansible/run/*
|
||||||
|
systemctl restart httpd.service
|
||||||
|
systemctl status httpd.service || exit $?
|
||||||
|
systemctl restart rsyslog.service
|
||||||
|
systemctl status rsyslog.service || exit $?
|
||||||
|
systemctl restart redis.service
|
||||||
|
systemctl status redis.service || exit $?
|
||||||
|
redis-cli flushall
|
||||||
|
systemctl restart cobblerd.service
|
||||||
|
systemctl status cobblerd.service || exit $?
|
||||||
|
systemctl restart compass-celeryd.service
|
||||||
|
systemctl status compass-celeryd.service || exit $?
|
||||||
|
systemctl restart compass-progress-updated.service
|
||||||
|
systemctl status compass-progress-updated.service || exit $?
|
||||||
|
|
||||||
|
@ -1,22 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
# systemctl restart mysql.service
|
|
||||||
# systemctl status mysql.service || exit $?
|
|
||||||
# /opt/compass/bin/manage_db.py createdb
|
|
||||||
/opt/compass/bin/clean_installers.py --noasync
|
|
||||||
/opt/compass/bin/clean_installation_logs.py
|
|
||||||
rm -rf /var/ansible/run/*
|
|
||||||
# systemctl restart httpd.service
|
|
||||||
# systemctl status httpd.service || exit $?
|
|
||||||
systemctl restart rsyslog.service
|
|
||||||
systemctl status rsyslog.service || exit $?
|
|
||||||
systemctl restart redis.service
|
|
||||||
systemctl status redis.service || exit $?
|
|
||||||
redis-cli flushall
|
|
||||||
systemctl restart cobblerd.service
|
|
||||||
systemctl status cobblerd.service || exit $?
|
|
||||||
systemctl restart compass-celeryd.service
|
|
||||||
systemctl status compass-celeryd.service || exit $?
|
|
||||||
# systemctl restart compass-progress-updated.service
|
|
||||||
# systemctl status compass-progress-updated.service || exit $?
|
|
||||||
|
|
@ -1,22 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
systemctl restart mysql.service
|
|
||||||
systemctl status mysql.service || exit $?
|
|
||||||
/opt/compass/bin/manage_db.py createdb
|
|
||||||
# /opt/compass/bin/clean_installers.py --noasync
|
|
||||||
# /opt/compass/bin/clean_installation_logs.py
|
|
||||||
# rm -rf /var/ansible/run/*
|
|
||||||
systemctl restart httpd.service
|
|
||||||
systemctl status httpd.service || exit $?
|
|
||||||
systemctl restart rsyslog.service
|
|
||||||
systemctl status rsyslog.service || exit $?
|
|
||||||
systemctl restart redis.service
|
|
||||||
systemctl status redis.service || exit $?
|
|
||||||
redis-cli flushall
|
|
||||||
# systemctl restart cobblerd.service
|
|
||||||
# systemctl status cobblerd.service || exit $?
|
|
||||||
# systemctl restart compass-celeryd.service
|
|
||||||
# systemctl status compass-celeryd.service || exit $?
|
|
||||||
# systemctl restart compass-progress-updated.service
|
|
||||||
# systemctl status compass-progress-updated.service || exit $?
|
|
||||||
|
|
@ -150,8 +150,7 @@ def poll_switch(poller_email, ip_addr, credentials,
|
|||||||
|
|
||||||
for switch in switches:
|
for switch in switches:
|
||||||
for machine_dict in machine_dicts:
|
for machine_dict in machine_dicts:
|
||||||
logging.info('add machine: %s', machine_dict)
|
logging.debug('add machine: %s', machine_dict)
|
||||||
machine_dict['owner_id'] = poller.id
|
|
||||||
switch_api.add_switch_machine(
|
switch_api.add_switch_machine(
|
||||||
switch['id'], False, user=poller, **machine_dict
|
switch['id'], False, user=poller, **machine_dict
|
||||||
)
|
)
|
||||||
|
@ -393,10 +393,6 @@ def _login(use_cookie):
|
|||||||
)
|
)
|
||||||
data['expire_timestamp'] = expire_timestamp
|
data['expire_timestamp'] = expire_timestamp
|
||||||
user = auth_handler.authenticate_user(**data)
|
user = auth_handler.authenticate_user(**data)
|
||||||
if not user.active:
|
|
||||||
raise exception_handler.UserDisabled(
|
|
||||||
'%s is not activated' % user.email
|
|
||||||
)
|
|
||||||
if not login_user(user, remember=data.get('remember', False)):
|
if not login_user(user, remember=data.get('remember', False)):
|
||||||
raise exception_handler.UserDisabled('failed to login: %s' % user)
|
raise exception_handler.UserDisabled('failed to login: %s' % user)
|
||||||
|
|
||||||
@ -419,17 +415,6 @@ def login():
|
|||||||
return _login(True)
|
return _login(True)
|
||||||
|
|
||||||
|
|
||||||
@app.route("/users/register", methods=['POST'])
|
|
||||||
def register():
|
|
||||||
"""register new user."""
|
|
||||||
data = _get_request_data()
|
|
||||||
data['is_admin'] = False
|
|
||||||
data['active'] = False
|
|
||||||
return utils.make_json_response(
|
|
||||||
200, user_api.add_user(**data)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/users/logout', methods=['POST'])
|
@app.route('/users/logout', methods=['POST'])
|
||||||
@login_required
|
@login_required
|
||||||
def logout():
|
def logout():
|
||||||
@ -3164,19 +3149,19 @@ def update_host_state(host_id):
|
|||||||
|
|
||||||
|
|
||||||
@util.deprecated
|
@util.deprecated
|
||||||
@app.route("/hosts/<host_id>/state_internal", methods=['PUT', 'POST'])
|
@app.route("/hosts/<hostname>/state_internal", methods=['PUT', 'POST'])
|
||||||
def update_host_state_internal(host_id):
|
def update_host_state_internal(hostname):
|
||||||
"""update host state.
|
"""update host state.
|
||||||
|
|
||||||
Supported fields: ['ready']
|
Supported fields: ['ready']
|
||||||
"""
|
"""
|
||||||
data = _get_request_data()
|
data = _get_request_data()
|
||||||
host_id = int(host_id)
|
hosts = host_api.list_hosts(name=hostname)
|
||||||
hosts = host_api.list_hosts(id=host_id)
|
|
||||||
if not hosts:
|
if not hosts:
|
||||||
raise exception_handler.ItemNotFound(
|
raise exception_handler.ItemNotFound(
|
||||||
'no hosts found for host_id %s' % host_id
|
'no hosts found for hostname %s' % hostname
|
||||||
)
|
)
|
||||||
|
host_id = hosts[0]['id']
|
||||||
return utils.make_json_response(
|
return utils.make_json_response(
|
||||||
200,
|
200,
|
||||||
host_api.update_host_state_internal(
|
host_api.update_host_state_internal(
|
||||||
|
@ -359,7 +359,7 @@ def add_cluster(
|
|||||||
|
|
||||||
cluster = utils.add_db_object(
|
cluster = utils.add_db_object(
|
||||||
session, models.Cluster, exception_when_existing,
|
session, models.Cluster, exception_when_existing,
|
||||||
name, user.id, adapter_id=adapter_id,
|
name, creator_id=user.id, adapter_id=adapter_id,
|
||||||
flavor_id=flavor_id, flavor=flavor, **kwargs
|
flavor_id=flavor_id, flavor=flavor, **kwargs
|
||||||
)
|
)
|
||||||
return cluster
|
return cluster
|
||||||
@ -480,10 +480,7 @@ def del_cluster(
|
|||||||
for clusterhost in cluster.clusterhosts
|
for clusterhost in cluster.clusterhosts
|
||||||
],
|
],
|
||||||
delete_underlying_host
|
delete_underlying_host
|
||||||
),
|
)
|
||||||
queue=user.email,
|
|
||||||
exchange=user.email,
|
|
||||||
routing_key=user.email
|
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'delete action is sent',
|
'status': 'delete action is sent',
|
||||||
@ -1186,10 +1183,7 @@ def _del_cluster_host(
|
|||||||
(
|
(
|
||||||
user.email, clusterhost.cluster_id, clusterhost.host_id,
|
user.email, clusterhost.cluster_id, clusterhost.host_id,
|
||||||
delete_underlying_host
|
delete_underlying_host
|
||||||
),
|
)
|
||||||
queue=user.email,
|
|
||||||
exchange=user.email,
|
|
||||||
routing_key=user.email
|
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'delete action sent',
|
'status': 'delete action sent',
|
||||||
@ -1860,10 +1854,7 @@ def deploy_cluster(
|
|||||||
(
|
(
|
||||||
user.email, cluster_id,
|
user.email, cluster_id,
|
||||||
[clusterhost.host_id for clusterhost in clusterhosts]
|
[clusterhost.host_id for clusterhost in clusterhosts]
|
||||||
),
|
)
|
||||||
queue=user.email,
|
|
||||||
exchange=user.email,
|
|
||||||
routing_key=user.email
|
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'deploy action sent',
|
'status': 'deploy action sent',
|
||||||
@ -1927,10 +1918,7 @@ def redeploy_cluster(
|
|||||||
'compass.tasks.redeploy_cluster',
|
'compass.tasks.redeploy_cluster',
|
||||||
(
|
(
|
||||||
user.email, cluster_id
|
user.email, cluster_id
|
||||||
),
|
)
|
||||||
queue=user.email,
|
|
||||||
exchange=user.email,
|
|
||||||
routing_key=user.email
|
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'redeploy action sent',
|
'status': 'redeploy action sent',
|
||||||
@ -1957,10 +1945,7 @@ def patch_cluster(cluster_id, user=None, session=None, **kwargs):
|
|||||||
'compass.tasks.patch_cluster',
|
'compass.tasks.patch_cluster',
|
||||||
(
|
(
|
||||||
user.email, cluster_id,
|
user.email, cluster_id,
|
||||||
),
|
)
|
||||||
queue=user.email,
|
|
||||||
exchange=user.email,
|
|
||||||
routing_key=user.email
|
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'patch action sent',
|
'status': 'patch action sent',
|
||||||
@ -2061,7 +2046,7 @@ def update_cluster_host_state(
|
|||||||
|
|
||||||
def _update_clusterhost_state(
|
def _update_clusterhost_state(
|
||||||
clusterhost, from_database_only=False,
|
clusterhost, from_database_only=False,
|
||||||
session=None, user=None, **kwargs
|
session=None, **kwargs
|
||||||
):
|
):
|
||||||
"""Update clusterhost state.
|
"""Update clusterhost state.
|
||||||
|
|
||||||
@ -2102,22 +2087,13 @@ def _update_clusterhost_state(
|
|||||||
utils.update_db_object(session, cluster.state, ready=False)
|
utils.update_db_object(session, cluster.state, ready=False)
|
||||||
status = '%s state is updated' % clusterhost.name
|
status = '%s state is updated' % clusterhost.name
|
||||||
else:
|
else:
|
||||||
if not user:
|
|
||||||
user_id = cluster.creator_id
|
|
||||||
user_dict = user_api.get_user(user_id, session=session)
|
|
||||||
user_email = user_dict['email']
|
|
||||||
else:
|
|
||||||
user_email = user.email
|
|
||||||
from compass.tasks import client as celery_client
|
from compass.tasks import client as celery_client
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.package_installed',
|
'compass.tasks.package_installed',
|
||||||
(
|
(
|
||||||
clusterhost.cluster_id, clusterhost.host_id,
|
clusterhost.cluster_id, clusterhost.host_id,
|
||||||
cluster_ready, host_ready
|
cluster_ready, host_ready
|
||||||
),
|
)
|
||||||
queue=user_email,
|
|
||||||
exchange=user_email,
|
|
||||||
routing_key=user_email
|
|
||||||
)
|
)
|
||||||
status = '%s: cluster ready %s host ready %s' % (
|
status = '%s: cluster ready %s host ready %s' % (
|
||||||
clusterhost.name, cluster_ready, host_ready
|
clusterhost.name, cluster_ready, host_ready
|
||||||
@ -2150,7 +2126,7 @@ def update_cluster_host_state_internal(
|
|||||||
)
|
)
|
||||||
return _update_clusterhost_state(
|
return _update_clusterhost_state(
|
||||||
clusterhost, from_database_only=from_database_only,
|
clusterhost, from_database_only=from_database_only,
|
||||||
session=session, users=user, **kwargs
|
session=session, **kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -2193,7 +2169,7 @@ def update_clusterhost_state_internal(
|
|||||||
clusterhost = _get_clusterhost(clusterhost_id, session=session)
|
clusterhost = _get_clusterhost(clusterhost_id, session=session)
|
||||||
return _update_clusterhost_state(
|
return _update_clusterhost_state(
|
||||||
clusterhost, from_database_only=from_database_only,
|
clusterhost, from_database_only=from_database_only,
|
||||||
session=session, user=user, **kwargs
|
session=session, **kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -2264,19 +2240,10 @@ def update_cluster_state_internal(
|
|||||||
)
|
)
|
||||||
status = '%s state is updated' % cluster.name
|
status = '%s state is updated' % cluster.name
|
||||||
else:
|
else:
|
||||||
if not user:
|
|
||||||
user_id = cluster.creator_id
|
|
||||||
user_dict = user_api.get_user(user_id, session=session)
|
|
||||||
user_email = user_dict['email']
|
|
||||||
else:
|
|
||||||
user_email = user.email
|
|
||||||
from compass.tasks import client as celery_client
|
from compass.tasks import client as celery_client
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.cluster_installed',
|
'compass.tasks.cluster_installed',
|
||||||
(clusterhost.cluster_id, clusterhost_ready),
|
(clusterhost.cluster_id, clusterhost_ready)
|
||||||
queue=user_email,
|
|
||||||
exchange=user_email,
|
|
||||||
routing_key=user_email
|
|
||||||
)
|
)
|
||||||
status = '%s installed action set clusterhost ready %s' % (
|
status = '%s installed action set clusterhost ready %s' % (
|
||||||
cluster.name, clusterhost_ready
|
cluster.name, clusterhost_ready
|
||||||
|
@ -179,10 +179,7 @@ def start_check_cluster_health(cluster_id, send_report_url,
|
|||||||
from compass.tasks import client as celery_client
|
from compass.tasks import client as celery_client
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.cluster_health',
|
'compass.tasks.cluster_health',
|
||||||
(cluster.id, send_report_url, user.email),
|
(cluster.id, send_report_url, user.email)
|
||||||
queue=user.email,
|
|
||||||
exchange=user.email,
|
|
||||||
routing_key=user.email
|
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
"cluster_id": cluster.id,
|
"cluster_id": cluster.id,
|
||||||
|
@ -28,7 +28,7 @@ from compass.db import models
|
|||||||
from compass.utils import util
|
from compass.utils import util
|
||||||
|
|
||||||
|
|
||||||
SUPPORTED_FIELDS = ['name', 'os_name', 'owner', 'mac', 'id']
|
SUPPORTED_FIELDS = ['name', 'os_name', 'owner', 'mac']
|
||||||
SUPPORTED_MACHINE_HOST_FIELDS = [
|
SUPPORTED_MACHINE_HOST_FIELDS = [
|
||||||
'mac', 'tag', 'location', 'os_name', 'os_id'
|
'mac', 'tag', 'location', 'os_name', 'os_id'
|
||||||
]
|
]
|
||||||
@ -414,21 +414,12 @@ def del_host(
|
|||||||
logging.info(
|
logging.info(
|
||||||
'send del host %s task to celery', host_id
|
'send del host %s task to celery', host_id
|
||||||
)
|
)
|
||||||
if not user:
|
|
||||||
user_id = host.creator_id
|
|
||||||
user_dict = user_api.get_user(user_id, session=session)
|
|
||||||
user_email = user_dict['email']
|
|
||||||
else:
|
|
||||||
user_email = user.email
|
|
||||||
from compass.tasks import client as celery_client
|
from compass.tasks import client as celery_client
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.delete_host',
|
'compass.tasks.delete_host',
|
||||||
(
|
(
|
||||||
user.email, host.id, cluster_ids
|
user.email, host.id, cluster_ids
|
||||||
),
|
)
|
||||||
queue=user_email,
|
|
||||||
exchange=user_email,
|
|
||||||
routing_key=user_email
|
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'delete action sent',
|
'status': 'delete action sent',
|
||||||
@ -658,11 +649,10 @@ def _add_host_network(
|
|||||||
"""Add hostnetwork to a host."""
|
"""Add hostnetwork to a host."""
|
||||||
host = _get_host(host_id, session=session)
|
host = _get_host(host_id, session=session)
|
||||||
check_host_editable(host, user=user)
|
check_host_editable(host, user=user)
|
||||||
user_id = user.id
|
|
||||||
return utils.add_db_object(
|
return utils.add_db_object(
|
||||||
session, models.HostNetwork,
|
session, models.HostNetwork,
|
||||||
exception_when_existing,
|
exception_when_existing,
|
||||||
host.id, interface, user_id, ip=ip, **kwargs
|
host.id, interface, ip=ip, **kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -884,7 +874,6 @@ def update_host_state_internal(
|
|||||||
"""
|
"""
|
||||||
# TODO(xicheng): should be merged into update_host_state
|
# TODO(xicheng): should be merged into update_host_state
|
||||||
host = _get_host(host_id, session=session)
|
host = _get_host(host_id, session=session)
|
||||||
logging.info("======host state: %s", host.state)
|
|
||||||
if 'ready' in kwargs and kwargs['ready'] and not host.state.ready:
|
if 'ready' in kwargs and kwargs['ready'] and not host.state.ready:
|
||||||
ready_triggered = True
|
ready_triggered = True
|
||||||
else:
|
else:
|
||||||
@ -923,22 +912,13 @@ def update_host_state_internal(
|
|||||||
)
|
)
|
||||||
status = '%s state is updated' % host.name
|
status = '%s state is updated' % host.name
|
||||||
else:
|
else:
|
||||||
if not user:
|
|
||||||
user_id = host.creator_id
|
|
||||||
user_dict = user_api.get_user(user_id, session=session)
|
|
||||||
user_email = user_dict['email']
|
|
||||||
else:
|
|
||||||
user_email = user.email
|
|
||||||
from compass.tasks import client as celery_client
|
from compass.tasks import client as celery_client
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.os_installed',
|
'compass.tasks.os_installed',
|
||||||
(
|
(
|
||||||
host.id, clusterhosts_ready,
|
host.id, clusterhosts_ready,
|
||||||
clusters_os_ready
|
clusters_os_ready
|
||||||
),
|
)
|
||||||
queue=user_email,
|
|
||||||
exchange=user_email,
|
|
||||||
routing_key=user_email
|
|
||||||
)
|
)
|
||||||
status = '%s: clusterhosts ready %s clusters os ready %s' % (
|
status = '%s: clusterhosts ready %s clusters os ready %s' % (
|
||||||
host.name, clusterhosts_ready, clusters_os_ready
|
host.name, clusterhosts_ready, clusters_os_ready
|
||||||
@ -1031,18 +1011,9 @@ def poweron_host(
|
|||||||
from compass.tasks import client as celery_client
|
from compass.tasks import client as celery_client
|
||||||
host = _get_host(host_id, session=session)
|
host = _get_host(host_id, session=session)
|
||||||
check_host_validated(host)
|
check_host_validated(host)
|
||||||
if not user:
|
|
||||||
user_id = host.creator_id
|
|
||||||
user_dict = user_api.get_user(user_id, session=session)
|
|
||||||
user_email = user_dict['email']
|
|
||||||
else:
|
|
||||||
user_email = user.email
|
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.poweron_host',
|
'compass.tasks.poweron_host',
|
||||||
(host.id,),
|
(host.id,)
|
||||||
queue=user_email,
|
|
||||||
exchange=user_email,
|
|
||||||
routing_key=user_email
|
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'poweron %s action sent' % host.name,
|
'status': 'poweron %s action sent' % host.name,
|
||||||
@ -1066,18 +1037,9 @@ def poweroff_host(
|
|||||||
from compass.tasks import client as celery_client
|
from compass.tasks import client as celery_client
|
||||||
host = _get_host(host_id, session=session)
|
host = _get_host(host_id, session=session)
|
||||||
check_host_validated(host)
|
check_host_validated(host)
|
||||||
if not user:
|
|
||||||
user_id = host.creator_id
|
|
||||||
user_dict = user_api.get_user(user_id, session=session)
|
|
||||||
user_email = user_dict['email']
|
|
||||||
else:
|
|
||||||
user_email = user.email
|
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.poweroff_host',
|
'compass.tasks.poweroff_host',
|
||||||
(host.id,),
|
(host.id,)
|
||||||
queue=user_email,
|
|
||||||
exchange=user_email,
|
|
||||||
routing_key=user_email
|
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'poweroff %s action sent' % host.name,
|
'status': 'poweroff %s action sent' % host.name,
|
||||||
@ -1101,18 +1063,9 @@ def reset_host(
|
|||||||
from compass.tasks import client as celery_client
|
from compass.tasks import client as celery_client
|
||||||
host = _get_host(host_id, session=session)
|
host = _get_host(host_id, session=session)
|
||||||
check_host_validated(host)
|
check_host_validated(host)
|
||||||
if not user:
|
|
||||||
user_id = host.creator_id
|
|
||||||
user_dict = user_api.get_user(user_id, session=session)
|
|
||||||
user_email = user_dict['email']
|
|
||||||
else:
|
|
||||||
user_email = user.email
|
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.reset_host',
|
'compass.tasks.reset_host',
|
||||||
(host.id,),
|
(host.id,)
|
||||||
queue=user_email,
|
|
||||||
exchange=user_email,
|
|
||||||
routing_key=user_email
|
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'reset %s action sent' % host.name,
|
'status': 'reset %s action sent' % host.name,
|
||||||
|
@ -226,18 +226,9 @@ def poweron_machine(
|
|||||||
machine = _get_machine(
|
machine = _get_machine(
|
||||||
machine_id, session=session
|
machine_id, session=session
|
||||||
)
|
)
|
||||||
if not user:
|
|
||||||
user_id = machine.owner_id
|
|
||||||
user_dict = user_api.get_user(user_id, session=session)
|
|
||||||
user_email = user_dict['email']
|
|
||||||
else:
|
|
||||||
user_email = user.email
|
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.poweron_machine',
|
'compass.tasks.poweron_machine',
|
||||||
(machine_id,),
|
(machine_id,)
|
||||||
queue=user_email,
|
|
||||||
exchange=user_email,
|
|
||||||
routing_key=user_email
|
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'poweron %s action sent' % machine.mac,
|
'status': 'poweron %s action sent' % machine.mac,
|
||||||
@ -262,18 +253,9 @@ def poweroff_machine(
|
|||||||
machine = _get_machine(
|
machine = _get_machine(
|
||||||
machine_id, session=session
|
machine_id, session=session
|
||||||
)
|
)
|
||||||
if not user:
|
|
||||||
user_id = machine.owner_id
|
|
||||||
user_dict = user_api.get_user(user_id, session=session)
|
|
||||||
user_email = user_dict['email']
|
|
||||||
else:
|
|
||||||
user_email = user.email
|
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.poweroff_machine',
|
'compass.tasks.poweroff_machine',
|
||||||
(machine_id,),
|
(machine_id,)
|
||||||
queue=user_email,
|
|
||||||
exchange=user_email,
|
|
||||||
routing_key=user_email
|
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'poweroff %s action sent' % machine.mac,
|
'status': 'poweroff %s action sent' % machine.mac,
|
||||||
@ -298,18 +280,9 @@ def reset_machine(
|
|||||||
machine = _get_machine(
|
machine = _get_machine(
|
||||||
machine_id, session=session
|
machine_id, session=session
|
||||||
)
|
)
|
||||||
if not user:
|
|
||||||
user_id = machine.owner_id
|
|
||||||
user_dict = user_api.get_user(user_id, session=session)
|
|
||||||
user_email = user_dict['email']
|
|
||||||
else:
|
|
||||||
user_email = user.email
|
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.reset_machine',
|
'compass.tasks.reset_machine',
|
||||||
(machine_id,),
|
(machine_id,)
|
||||||
queue=user_email,
|
|
||||||
exchange=user_email,
|
|
||||||
routing_key=user_email
|
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'reset %s action sent' % machine.mac,
|
'status': 'reset %s action sent' % machine.mac,
|
||||||
|
@ -30,11 +30,10 @@ from compass.utils import util
|
|||||||
SUPPORTED_FIELDS = ['ip_int', 'vendor', 'state']
|
SUPPORTED_FIELDS = ['ip_int', 'vendor', 'state']
|
||||||
SUPPORTED_FILTER_FIELDS = ['ip_int', 'vendor', 'state']
|
SUPPORTED_FILTER_FIELDS = ['ip_int', 'vendor', 'state']
|
||||||
SUPPORTED_SWITCH_MACHINES_FIELDS = [
|
SUPPORTED_SWITCH_MACHINES_FIELDS = [
|
||||||
'switch_ip_int', 'port', 'vlans', 'mac', 'tag', 'location',
|
'switch_ip_int', 'port', 'vlans', 'mac', 'tag', 'location'
|
||||||
'owner_id'
|
|
||||||
]
|
]
|
||||||
SUPPORTED_MACHINES_FIELDS = [
|
SUPPORTED_MACHINES_FIELDS = [
|
||||||
'port', 'vlans', 'mac', 'tag', 'location', 'owner_id'
|
'port', 'vlans', 'mac', 'tag', 'location'
|
||||||
]
|
]
|
||||||
SUPPORTED_SWITCH_MACHINES_HOSTS_FIELDS = [
|
SUPPORTED_SWITCH_MACHINES_HOSTS_FIELDS = [
|
||||||
'switch_ip_int', 'port', 'vlans', 'mac',
|
'switch_ip_int', 'port', 'vlans', 'mac',
|
||||||
@ -58,7 +57,7 @@ UPDATED_FILTERS_FIELDS = ['put_machine_filters']
|
|||||||
PATCHED_FILTERS_FIELDS = ['patched_machine_filters']
|
PATCHED_FILTERS_FIELDS = ['patched_machine_filters']
|
||||||
ADDED_MACHINES_FIELDS = ['mac']
|
ADDED_MACHINES_FIELDS = ['mac']
|
||||||
OPTIONAL_ADDED_MACHINES_FIELDS = [
|
OPTIONAL_ADDED_MACHINES_FIELDS = [
|
||||||
'ipmi_credentials', 'tag', 'location', 'owner_id'
|
'ipmi_credentials', 'tag', 'location'
|
||||||
]
|
]
|
||||||
ADDED_SWITCH_MACHINES_FIELDS = ['port']
|
ADDED_SWITCH_MACHINES_FIELDS = ['port']
|
||||||
OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS = ['vlans']
|
OPTIONAL_ADDED_SWITCH_MACHINES_FIELDS = ['vlans']
|
||||||
@ -66,7 +65,7 @@ UPDATED_MACHINES_FIELDS = [
|
|||||||
'ipmi_credentials',
|
'ipmi_credentials',
|
||||||
'tag', 'location'
|
'tag', 'location'
|
||||||
]
|
]
|
||||||
UPDATED_SWITCH_MACHINES_FIELDS = ['port', 'vlans', 'owner_id']
|
UPDATED_SWITCH_MACHINES_FIELDS = ['port', 'vlans']
|
||||||
PATCHED_MACHINES_FIELDS = [
|
PATCHED_MACHINES_FIELDS = [
|
||||||
'patched_ipmi_credentials',
|
'patched_ipmi_credentials',
|
||||||
'patched_tag', 'patched_location'
|
'patched_tag', 'patched_location'
|
||||||
@ -84,7 +83,7 @@ RESP_ACTION_FIELDS = [
|
|||||||
]
|
]
|
||||||
RESP_MACHINES_FIELDS = [
|
RESP_MACHINES_FIELDS = [
|
||||||
'id', 'switch_id', 'switch_ip', 'machine_id', 'switch_machine_id',
|
'id', 'switch_id', 'switch_ip', 'machine_id', 'switch_machine_id',
|
||||||
'port', 'vlans', 'mac', 'owner_id',
|
'port', 'vlans', 'mac',
|
||||||
'ipmi_credentials', 'tag', 'location',
|
'ipmi_credentials', 'tag', 'location',
|
||||||
'created_at', 'updated_at'
|
'created_at', 'updated_at'
|
||||||
]
|
]
|
||||||
@ -591,8 +590,6 @@ def list_switch_machines(
|
|||||||
switch_machines = utils.list_db_objects(
|
switch_machines = utils.list_db_objects(
|
||||||
session, models.SwitchMachine, switch_id=switch.id, **filters
|
session, models.SwitchMachine, switch_id=switch.id, **filters
|
||||||
)
|
)
|
||||||
if not user.is_admin and len(switch_machines):
|
|
||||||
switch_machines = [m for m in switch_machines if m.machine.owner_id == user.id]
|
|
||||||
return _filter_switch_machines(switch_machines)
|
return _filter_switch_machines(switch_machines)
|
||||||
|
|
||||||
|
|
||||||
@ -653,8 +650,6 @@ def list_switchmachines_hosts(user=None, session=None, **filters):
|
|||||||
switch_machines = utils.list_db_objects(
|
switch_machines = utils.list_db_objects(
|
||||||
session, models.SwitchMachine, **filters
|
session, models.SwitchMachine, **filters
|
||||||
)
|
)
|
||||||
if not user.is_admin and len(switch_machines):
|
|
||||||
switch_machines = [m for m in switch_machines if m.machine.owner_id == user.id]
|
|
||||||
return _filter_switch_machines_hosts(
|
return _filter_switch_machines_hosts(
|
||||||
switch_machines
|
switch_machines
|
||||||
)
|
)
|
||||||
@ -681,14 +676,13 @@ def _add_machine_if_not_exist(mac=None, session=None, **kwargs):
|
|||||||
@utils.input_validates(vlans=_check_vlans)
|
@utils.input_validates(vlans=_check_vlans)
|
||||||
def _add_switch_machine_only(
|
def _add_switch_machine_only(
|
||||||
switch, machine, exception_when_existing=True,
|
switch, machine, exception_when_existing=True,
|
||||||
session=None, owner_id=None, port=None, **kwargs
|
session=None, port=None, **kwargs
|
||||||
):
|
):
|
||||||
"""add a switch machine."""
|
"""add a switch machine."""
|
||||||
return utils.add_db_object(
|
return utils.add_db_object(
|
||||||
session, models.SwitchMachine,
|
session, models.SwitchMachine,
|
||||||
exception_when_existing,
|
exception_when_existing,
|
||||||
switch.id, machine.id, port=port,
|
switch.id, machine.id, port=port,
|
||||||
owner_id=owner_id,
|
|
||||||
**kwargs
|
**kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -704,7 +698,7 @@ def _add_switch_machine_only(
|
|||||||
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
|
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
|
||||||
def _add_switch_machine(
|
def _add_switch_machine(
|
||||||
switch_id, exception_when_existing=True,
|
switch_id, exception_when_existing=True,
|
||||||
mac=None, port=None, session=None, owner_id=None, **kwargs
|
mac=None, port=None, session=None, **kwargs
|
||||||
):
|
):
|
||||||
"""Add switch machine.
|
"""Add switch machine.
|
||||||
|
|
||||||
@ -713,7 +707,7 @@ def _add_switch_machine(
|
|||||||
"""
|
"""
|
||||||
switch = _get_switch(switch_id, session=session)
|
switch = _get_switch(switch_id, session=session)
|
||||||
machine = _add_machine_if_not_exist(
|
machine = _add_machine_if_not_exist(
|
||||||
mac=mac, session=session, owner_id=owner_id, **kwargs
|
mac=mac, session=session, **kwargs
|
||||||
)
|
)
|
||||||
return _add_switch_machine_only(
|
return _add_switch_machine_only(
|
||||||
switch, machine,
|
switch, machine,
|
||||||
@ -728,14 +722,13 @@ def _add_switch_machine(
|
|||||||
)
|
)
|
||||||
def add_switch_machine(
|
def add_switch_machine(
|
||||||
switch_id, exception_when_existing=True,
|
switch_id, exception_when_existing=True,
|
||||||
mac=None, user=None, session=None,
|
mac=None, user=None, session=None, **kwargs
|
||||||
owner_id=None, **kwargs
|
|
||||||
):
|
):
|
||||||
"""Add switch machine to a switch."""
|
"""Add switch machine to a switch."""
|
||||||
return _add_switch_machine(
|
return _add_switch_machine(
|
||||||
switch_id,
|
switch_id,
|
||||||
exception_when_existing=exception_when_existing,
|
exception_when_existing=exception_when_existing,
|
||||||
mac=mac, session=session, owner_id=owner_id, **kwargs
|
mac=mac, session=session, **kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -754,7 +747,7 @@ def add_switch_machine(
|
|||||||
)
|
)
|
||||||
def add_switch_machines(
|
def add_switch_machines(
|
||||||
exception_when_existing=False,
|
exception_when_existing=False,
|
||||||
data=[], user=None, session=None, owner_id=None
|
data=[], user=None, session=None
|
||||||
):
|
):
|
||||||
"""Add switch machines."""
|
"""Add switch machines."""
|
||||||
switch_machines = []
|
switch_machines = []
|
||||||
@ -824,7 +817,7 @@ def add_switch_machines(
|
|||||||
switch_machines.append(_add_switch_machine_only(
|
switch_machines.append(_add_switch_machine_only(
|
||||||
switch_object, machine_object,
|
switch_object, machine_object,
|
||||||
exception_when_existing,
|
exception_when_existing,
|
||||||
session=session, owner_id=owner_id, **machine
|
session=session, **machine
|
||||||
))
|
))
|
||||||
return {
|
return {
|
||||||
'switches_machines': switch_machines,
|
'switches_machines': switch_machines,
|
||||||
@ -845,10 +838,7 @@ def poll_switch(switch_id, user=None, session=None, **kwargs):
|
|||||||
switch = _get_switch(switch_id, session=session)
|
switch = _get_switch(switch_id, session=session)
|
||||||
celery_client.celery.send_task(
|
celery_client.celery.send_task(
|
||||||
'compass.tasks.pollswitch',
|
'compass.tasks.pollswitch',
|
||||||
(user.email, switch.ip, switch.credentials),
|
(user.email, switch.ip, switch.credentials)
|
||||||
queue=user.email,
|
|
||||||
exchange=user.email,
|
|
||||||
routing_key=user.email
|
|
||||||
)
|
)
|
||||||
return {
|
return {
|
||||||
'status': 'action %s sent' % kwargs,
|
'status': 'action %s sent' % kwargs,
|
||||||
@ -1126,8 +1116,7 @@ def _add_machine_to_switch(
|
|||||||
machine_id, session=session
|
machine_id, session=session
|
||||||
)
|
)
|
||||||
_add_switch_machine_only(
|
_add_switch_machine_only(
|
||||||
switch, machine, False,
|
switch, machine, False, **kwargs
|
||||||
owner_id=machine.owner_id, **kwargs
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -204,20 +204,17 @@ class HostNetwork(BASE, TimestampMixin, HelperMixin):
|
|||||||
Integer,
|
Integer,
|
||||||
ForeignKey('subnet.id', onupdate='CASCADE', ondelete='CASCADE')
|
ForeignKey('subnet.id', onupdate='CASCADE', ondelete='CASCADE')
|
||||||
)
|
)
|
||||||
user_id = Column(Integer, ForeignKey('user.id'))
|
ip_int = Column(BigInteger, unique=True, nullable=False)
|
||||||
ip_int = Column(BigInteger, nullable=False)
|
|
||||||
is_mgmt = Column(Boolean, default=False)
|
is_mgmt = Column(Boolean, default=False)
|
||||||
is_promiscuous = Column(Boolean, default=False)
|
is_promiscuous = Column(Boolean, default=False)
|
||||||
|
|
||||||
__table_args__ = (
|
__table_args__ = (
|
||||||
UniqueConstraint('host_id', 'interface', name='interface_constraint'),
|
UniqueConstraint('host_id', 'interface', name='constraint'),
|
||||||
UniqueConstraint('ip_int', 'user_id', name='ip_constraint')
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self, host_id, interface, user_id, **kwargs):
|
def __init__(self, host_id, interface, **kwargs):
|
||||||
self.host_id = host_id
|
self.host_id = host_id
|
||||||
self.interface = interface
|
self.interface = interface
|
||||||
self.user_id = user_id
|
|
||||||
super(HostNetwork, self).__init__(**kwargs)
|
super(HostNetwork, self).__init__(**kwargs)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
@ -268,7 +265,6 @@ class HostNetwork(BASE, TimestampMixin, HelperMixin):
|
|||||||
dict_info['interface'] = self.interface
|
dict_info['interface'] = self.interface
|
||||||
dict_info['netmask'] = self.netmask
|
dict_info['netmask'] = self.netmask
|
||||||
dict_info['subnet'] = self.subnet.subnet
|
dict_info['subnet'] = self.subnet.subnet
|
||||||
dict_info['user_id'] = self.user_id
|
|
||||||
return dict_info
|
return dict_info
|
||||||
|
|
||||||
|
|
||||||
@ -706,7 +702,7 @@ class Host(BASE, TimestampMixin, HelperMixin):
|
|||||||
"""Host table."""
|
"""Host table."""
|
||||||
__tablename__ = 'host'
|
__tablename__ = 'host'
|
||||||
|
|
||||||
name = Column(String(80), nullable=True)
|
name = Column(String(80), unique=True, nullable=True)
|
||||||
config_step = Column(String(80), default='')
|
config_step = Column(String(80), default='')
|
||||||
os_config = Column(JSONEncoded, default={})
|
os_config = Column(JSONEncoded, default={})
|
||||||
config_validated = Column(Boolean, default=False)
|
config_validated = Column(Boolean, default=False)
|
||||||
@ -716,10 +712,6 @@ class Host(BASE, TimestampMixin, HelperMixin):
|
|||||||
owner = Column(String(80))
|
owner = Column(String(80))
|
||||||
os_installer = Column(JSONEncoded, default={})
|
os_installer = Column(JSONEncoded, default={})
|
||||||
|
|
||||||
__table_args__ = (
|
|
||||||
UniqueConstraint('name', 'owner', name='constraint'),
|
|
||||||
)
|
|
||||||
|
|
||||||
id = Column(
|
id = Column(
|
||||||
Integer,
|
Integer,
|
||||||
ForeignKey('machine.id', onupdate='CASCADE', ondelete='CASCADE'),
|
ForeignKey('machine.id', onupdate='CASCADE', ondelete='CASCADE'),
|
||||||
@ -980,7 +972,7 @@ class Cluster(BASE, TimestampMixin, HelperMixin):
|
|||||||
__tablename__ = 'cluster'
|
__tablename__ = 'cluster'
|
||||||
|
|
||||||
id = Column(Integer, primary_key=True)
|
id = Column(Integer, primary_key=True)
|
||||||
name = Column(String(80), nullable=False)
|
name = Column(String(80), unique=True, nullable=False)
|
||||||
reinstall_distributed_system = Column(Boolean, default=True)
|
reinstall_distributed_system = Column(Boolean, default=True)
|
||||||
config_step = Column(String(80), default='')
|
config_step = Column(String(80), default='')
|
||||||
os_name = Column(String(80))
|
os_name = Column(String(80))
|
||||||
@ -1008,13 +1000,9 @@ class Cluster(BASE, TimestampMixin, HelperMixin):
|
|||||||
cascade='all, delete-orphan',
|
cascade='all, delete-orphan',
|
||||||
backref=backref('cluster')
|
backref=backref('cluster')
|
||||||
)
|
)
|
||||||
__table_args__ = (
|
|
||||||
UniqueConstraint('name', 'creator_id', name='constraint'),
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(self, name, creator_id, **kwargs):
|
def __init__(self, name, **kwargs):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.creator_id = creator_id
|
|
||||||
self.state = ClusterState()
|
self.state = ClusterState()
|
||||||
super(Cluster, self).__init__(**kwargs)
|
super(Cluster, self).__init__(**kwargs)
|
||||||
|
|
||||||
@ -1369,7 +1357,6 @@ class SwitchMachine(BASE, HelperMixin, TimestampMixin):
|
|||||||
Integer,
|
Integer,
|
||||||
ForeignKey('machine.id', onupdate='CASCADE', ondelete='CASCADE')
|
ForeignKey('machine.id', onupdate='CASCADE', ondelete='CASCADE')
|
||||||
)
|
)
|
||||||
owner_id = Column(Integer, ForeignKey('user.id'))
|
|
||||||
port = Column(String(80), nullable=True)
|
port = Column(String(80), nullable=True)
|
||||||
vlans = Column(JSONEncoded, default=[])
|
vlans = Column(JSONEncoded, default=[])
|
||||||
__table_args__ = (
|
__table_args__ = (
|
||||||
|
@ -232,7 +232,7 @@ class CobblerInstaller(OSInstaller):
|
|||||||
err_msg = "Template '%s' does not exists!" % tmpl_path
|
err_msg = "Template '%s' does not exists!" % tmpl_path
|
||||||
logging.error(err_msg)
|
logging.error(err_msg)
|
||||||
raise Exception(err_msg)
|
raise Exception(err_msg)
|
||||||
host_vars_dict[const.BASEINFO]['host_id'] = host_id
|
|
||||||
system_config = self.get_config_from_template(tmpl_path,
|
system_config = self.get_config_from_template(tmpl_path,
|
||||||
host_vars_dict)
|
host_vars_dict)
|
||||||
|
|
||||||
|
@ -18,15 +18,13 @@
|
|||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
import os.path
|
import os.path
|
||||||
import urllib
|
|
||||||
|
|
||||||
from compass.utils import setting_wrapper as setting
|
from compass.utils import setting_wrapper as setting
|
||||||
|
|
||||||
|
|
||||||
# CELERY_RESULT_BACKEND = 'amqp://'
|
CELERY_RESULT_BACKEND = 'amqp://'
|
||||||
|
|
||||||
# BROKER_URL = 'amqp://guest:guest@localhost:5672//'
|
|
||||||
|
|
||||||
|
BROKER_URL = 'amqp://guest:guest@localhost:5672//'
|
||||||
|
|
||||||
CELERY_IMPORTS = ('compass.tasks.tasks',)
|
CELERY_IMPORTS = ('compass.tasks.tasks',)
|
||||||
|
|
||||||
|
@ -1,12 +1,9 @@
|
|||||||
## Celery related setting: this is the default setting
|
## Celery related setting: this is the default setting once we install RabbitMQ
|
||||||
|
|
||||||
CELERY_RESULT_BACKEND ="amqp://"
|
CELERY_RESULT_BACKEND ="amqp://"
|
||||||
BROKER_URL = "amqp://guest:guest@localhost:5672//"
|
|
||||||
|
|
||||||
|
BROKER_URL = "amqp://guest:guest@localhost:5672//"
|
||||||
|
|
||||||
CELERY_IMPORTS=("compass.tasks.tasks",)
|
CELERY_IMPORTS=("compass.tasks.tasks",)
|
||||||
CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml']
|
CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml']
|
||||||
C_FORCE_ROOT = 1
|
C_FORCE_ROOT = 1
|
||||||
CELERY_DEFAULT_QUEUE = 'admin@huawei.com'
|
|
||||||
CELERY_DEFAULT_EXCHANGE = 'admin@huawei.com'
|
|
||||||
CELERY_DEFAULT_ROUTING_KEY = 'admin@huawei.com'
|
|
@ -1,16 +0,0 @@
|
|||||||
## Celery related setting: this is the default setting
|
|
||||||
import urllib
|
|
||||||
|
|
||||||
#CELERY_RESULT_BACKEND ="amqp://"
|
|
||||||
#BROKER_URL = "amqp://guest:guest@www.stack360.io:5672//"
|
|
||||||
|
|
||||||
BROKER_TRANSPORT = 'sqs'
|
|
||||||
BROKER_URL = 'sqs://%s:%s@' % (urllib.quote(ACCESS_ID, safe=''),
|
|
||||||
urllib.quote(ACCESS_KEY, safe=''))
|
|
||||||
|
|
||||||
CELERY_IMPORTS=("compass.tasks.tasks",)
|
|
||||||
CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml']
|
|
||||||
C_FORCE_ROOT = 1
|
|
||||||
CELERY_DEFAULT_QUEUE = 'admin@huawei.com'
|
|
||||||
CELERY_DEFAULT_EXCHANGE = 'admin@huawei.com'
|
|
||||||
CELERY_DEFAULT_ROUTING_KEY = 'admin@huawei.com'
|
|
@ -2,7 +2,7 @@ CONFIG_DIR = '/etc/compass'
|
|||||||
DATABASE_TYPE = 'mysql'
|
DATABASE_TYPE = 'mysql'
|
||||||
DATABASE_USER = 'root'
|
DATABASE_USER = 'root'
|
||||||
DATABASE_PASSWORD = 'root'
|
DATABASE_PASSWORD = 'root'
|
||||||
DATABASE_SERVER = 'www.stack360.io:3306'
|
DATABASE_SERVER = '127.0.0.1:3306'
|
||||||
DATABASE_NAME = 'compass'
|
DATABASE_NAME = 'compass'
|
||||||
SQLALCHEMY_DATABASE_URI = '%s://%s:%s@%s/%s' % (DATABASE_TYPE, DATABASE_USER, DATABASE_PASSWORD, DATABASE_SERVER, DATABASE_NAME)
|
SQLALCHEMY_DATABASE_URI = '%s://%s:%s@%s/%s' % (DATABASE_TYPE, DATABASE_USER, DATABASE_PASSWORD, DATABASE_SERVER, DATABASE_NAME)
|
||||||
SQLALCHEMY_DATABASE_POOL_TYPE = 'instant'
|
SQLALCHEMY_DATABASE_POOL_TYPE = 'instant'
|
||||||
|
@ -113,10 +113,8 @@ ERLANG_TOKEN: YOWSJSJIGGAUFZTIBRAD
|
|||||||
#set neutron_pass = $console_credentials.network.password
|
#set neutron_pass = $console_credentials.network.password
|
||||||
#set ceilometer_dbpass = $credentials.metering.password
|
#set ceilometer_dbpass = $credentials.metering.password
|
||||||
#set ceilometer_pass = $console_credentials.metering.password
|
#set ceilometer_pass = $console_credentials.metering.password
|
||||||
##set aodh_dbpass = $credentials.alarming.password
|
#set aodh_dbpass = $credentials.alarming.password
|
||||||
#set aodh_dbpass = "alarming"
|
#set aodh_pass = $console_credentials.alarming.password
|
||||||
##set aodh_pass = $console_credentials.alarming.password
|
|
||||||
#set aodh_pass = "alarming"
|
|
||||||
#set admin_pass = $console_credentials.admin.password
|
#set admin_pass = $console_credentials.admin.password
|
||||||
#set demo_pass = $console_credentials.demo.password
|
#set demo_pass = $console_credentials.demo.password
|
||||||
|
|
||||||
|
@ -71,7 +71,6 @@
|
|||||||
"timezone": "$timezone",
|
"timezone": "$timezone",
|
||||||
"ignore_proxy": "$no_proxy",
|
"ignore_proxy": "$no_proxy",
|
||||||
"local_repo": "$getVar('local_repo', '')",
|
"local_repo": "$getVar('local_repo', '')",
|
||||||
"disk_num": "1",
|
"disk_num": "1"
|
||||||
"host_id": "$getVar('host_id', int())"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -71,7 +71,6 @@
|
|||||||
"timezone": "$timezone",
|
"timezone": "$timezone",
|
||||||
"ignore_proxy": "$no_proxy",
|
"ignore_proxy": "$no_proxy",
|
||||||
"local_repo": "$getVar('local_repo', '')",
|
"local_repo": "$getVar('local_repo', '')",
|
||||||
"disk_num": "1",
|
"disk_num": "1"
|
||||||
"host_id": "$getVar('host_id', int())"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,6 @@
|
|||||||
"timezone": "$timezone",
|
"timezone": "$timezone",
|
||||||
"ignore_proxy": "$no_proxy",
|
"ignore_proxy": "$no_proxy",
|
||||||
"local_repo": "$getVar('local_repo', '')",
|
"local_repo": "$getVar('local_repo', '')",
|
||||||
"disk_num": "1",
|
"disk_num": "1"
|
||||||
"host_id": "$getVar('host_id', int())"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,6 @@
|
|||||||
"timezone": "$timezone",
|
"timezone": "$timezone",
|
||||||
"ignore_proxy": "$no_proxy",
|
"ignore_proxy": "$no_proxy",
|
||||||
"local_repo": "$getVar('local_repo', '')",
|
"local_repo": "$getVar('local_repo', '')",
|
||||||
"disk_num": "1",
|
"disk_num": "1"
|
||||||
"host_id": "$getVar('host_id', int())"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -13,9 +13,7 @@ fi
|
|||||||
source $DIR/install_func.sh
|
source $DIR/install_func.sh
|
||||||
|
|
||||||
echo "INstalling ansible related packages"
|
echo "INstalling ansible related packages"
|
||||||
# sudo yum -y install ansible
|
sudo yum -y install ansible
|
||||||
pip install ansible==1.9.2
|
|
||||||
pip install python-keyczar
|
|
||||||
if [[ "$?" != "0" ]]; then
|
if [[ "$?" != "0" ]]; then
|
||||||
echo "Failed to install ansible"
|
echo "Failed to install ansible"
|
||||||
exit 1
|
exit 1
|
||||||
@ -27,7 +25,7 @@ sudo cp -rn /var/ansible/* /root/backup/ansible/
|
|||||||
|
|
||||||
for i in `ls $ADAPTERS_HOME/ansible/ | grep "openstack_"`; do
|
for i in `ls $ADAPTERS_HOME/ansible/ | grep "openstack_"`; do
|
||||||
mkdir -p /var/ansible/$i
|
mkdir -p /var/ansible/$i
|
||||||
cp -rf $ADAPTERS_HOME/ansible/openstack/* /var/ansible/$i
|
# cp -rf $ADAPTERS_HOME/ansible/openstack/* /var/ansible/$i
|
||||||
cp -rf $ADAPTERS_HOME/ansible/$i /var/ansible/
|
cp -rf $ADAPTERS_HOME/ansible/$i /var/ansible/
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ fi
|
|||||||
source $DIR/install_func.sh
|
source $DIR/install_func.sh
|
||||||
|
|
||||||
echo "Installing cobbler related packages"
|
echo "Installing cobbler related packages"
|
||||||
sudo yum --enablerepo=compass_install --nogpgcheck -y install cobbler cobbler-web createrepo mkisofs python-cheetah python-simplejson python-urlgrabber PyYAML Django corosync pykickstart
|
sudo yum -y install cobbler cobbler-web createrepo mkisofs python-cheetah python-simplejson python-urlgrabber PyYAML Django corosync pykickstart
|
||||||
sudo yum -y upgrade yum-utils
|
sudo yum -y upgrade yum-utils
|
||||||
if [[ "$?" != "0" ]]; then
|
if [[ "$?" != "0" ]]; then
|
||||||
echo "failed to install cobbler related packages"
|
echo "failed to install cobbler related packages"
|
||||||
|
@ -28,8 +28,6 @@ sudo rm -rf /var/www/compass/*
|
|||||||
|
|
||||||
sudo cp -rf $COMPASSDIR/misc/apache/ods-server.conf /etc/httpd/conf.d/ods-server.conf
|
sudo cp -rf $COMPASSDIR/misc/apache/ods-server.conf /etc/httpd/conf.d/ods-server.conf
|
||||||
sudo cp -rf $COMPASSDIR/misc/apache/http_pip.conf /etc/httpd/conf.d/http_pip.conf
|
sudo cp -rf $COMPASSDIR/misc/apache/http_pip.conf /etc/httpd/conf.d/http_pip.conf
|
||||||
sudo cp -rf $COMPASSDIR/misc/apache/images.conf /etc/httpd/conf.d/images.conf
|
|
||||||
sudo cp -rf $COMPASSDIR/misc/apache/packages.conf /etc/httpd/conf.d/packages.conf
|
|
||||||
sudo cp -rf $COMPASSDIR/conf/* /etc/compass/
|
sudo cp -rf $COMPASSDIR/conf/* /etc/compass/
|
||||||
sudo cp -rf $COMPASSDIR/service/* /etc/init.d/
|
sudo cp -rf $COMPASSDIR/service/* /etc/init.d/
|
||||||
sudo cp -rf $COMPASSDIR/bin/*.py /opt/compass/bin/
|
sudo cp -rf $COMPASSDIR/bin/*.py /opt/compass/bin/
|
||||||
@ -42,11 +40,6 @@ sudo ln -s -f /opt/compass/bin/compass_wsgi.py /var/www/compass/compass.wsgi
|
|||||||
sudo cp -rf $COMPASSDIR/bin/chef/* /opt/compass/bin/
|
sudo cp -rf $COMPASSDIR/bin/chef/* /opt/compass/bin/
|
||||||
sudo cp -rf $COMPASSDIR/bin/cobbler/* /opt/compass/bin/
|
sudo cp -rf $COMPASSDIR/bin/cobbler/* /opt/compass/bin/
|
||||||
|
|
||||||
if [ "$FULL_COMPASS_SERVER" == "false" ]; then
|
|
||||||
sudo rm -rf /opt/compass/bin/refresh.sh
|
|
||||||
sudo rm -rf /opt/compass/bin/refresh_server.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $SUPPORT_CENTOS_7_2 != "y" ]]; then
|
if [[ $SUPPORT_CENTOS_7_2 != "y" ]]; then
|
||||||
sudo rm -f /etc/compass/os/centos7.0.conf
|
sudo rm -f /etc/compass/os/centos7.0.conf
|
||||||
fi
|
fi
|
||||||
@ -64,7 +57,6 @@ if [ ! -f /usr/lib64/libcrypto.so ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
download -u "$PIP_PACKAGES" `basename $PIP_PACKAGES` unzip /var/www/ || exit $?
|
download -u "$PIP_PACKAGES" `basename $PIP_PACKAGES` unzip /var/www/ || exit $?
|
||||||
download -u "$EXTRA_PACKAGES" `basename $EXTRA_PACKAGES` unzip /var/www/ || exit $?
|
|
||||||
|
|
||||||
sudo mkdir -p /opt/compass/db
|
sudo mkdir -p /opt/compass/db
|
||||||
sudo chmod -R 777 /opt/compass/db
|
sudo chmod -R 777 /opt/compass/db
|
||||||
@ -93,11 +85,6 @@ sudo sed -i "s/\$hostname/$HOSTNAME/g" /etc/compass/setting
|
|||||||
sudo sed -i "s/\$gateway/$OPTION_ROUTER/g" /etc/compass/setting
|
sudo sed -i "s/\$gateway/$OPTION_ROUTER/g" /etc/compass/setting
|
||||||
domains=$(echo $NAMESERVER_DOMAINS | sed "s/,/','/g")
|
domains=$(echo $NAMESERVER_DOMAINS | sed "s/,/','/g")
|
||||||
sudo sed -i "s/\$domains/$domains/g" /etc/compass/setting
|
sudo sed -i "s/\$domains/$domains/g" /etc/compass/setting
|
||||||
if [ "$FULL_COMPASS_SERVER" == "true" ]; then
|
|
||||||
sudo sed -i "/DATABASE_SERVER =/c\DATABASE_SERVER = '127.0.0.1:3306'" /etc/compass/setting
|
|
||||||
else
|
|
||||||
sudo sed -i "/DATABASE_SERVER =/c\DATABASE_SERVER = '\$COMPASS_API_SERVER:3306'" /etc/compass/setting
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo sed -i "s/\$cobbler_ip/$IPADDR/g" /etc/compass/os_installer/cobbler.conf
|
sudo sed -i "s/\$cobbler_ip/$IPADDR/g" /etc/compass/os_installer/cobbler.conf
|
||||||
#sudo sed -i "s/\$chef_ip/$IPADDR/g" /etc/compass/package_installer/chef-icehouse.conf
|
#sudo sed -i "s/\$chef_ip/$IPADDR/g" /etc/compass/package_installer/chef-icehouse.conf
|
||||||
@ -130,29 +117,11 @@ else
|
|||||||
echo "redis is not running"
|
echo "redis is not running"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if [ "$FULL_COMPASS_SERVER" == "true" ]; then
|
|
||||||
sudo mv /etc/compass/celeryconfig_local /etc/compass/celeryconfig
|
|
||||||
elif [ "$COMPASS_API_SERVER" != "c.stack360.io" ];then
|
|
||||||
sudo mv /etc/compass/celeryconfig_local /etc/compass/celeryconfig
|
|
||||||
sudo sed -i "s/localhost/\$COMPASS_API_SERVER/g" /etc/compass/celeryconfig
|
|
||||||
else
|
|
||||||
sudo mv /etc/compass/celeryconfig_remote /etc/compass/celeryconfig
|
|
||||||
wget -O /tmp/aws_credentials "http://www.stack360.io/aws_credentials"
|
|
||||||
filename='/tmp/aws_credentials'
|
|
||||||
id=$(sed -n '1p' < $filename)
|
|
||||||
key=$(sed -n '2p' < $filename)
|
|
||||||
sudo sed -i "s~ACCESS_ID~$id~g" /etc/compass/celeryconfig
|
|
||||||
sudo sed -i "s~ACCESS_KEY~$key~g" /etc/compass/celeryconfig
|
|
||||||
fi
|
|
||||||
sudo systemctl enable compass-progress-updated.service
|
sudo systemctl enable compass-progress-updated.service
|
||||||
sudo systemctl enable compass-celeryd.service
|
sudo systemctl enable compass-celeryd.service
|
||||||
|
|
||||||
if [ "$FULL_COMPASS_SERVER" == "true" ]; then
|
/opt/compass/bin/refresh.sh
|
||||||
/opt/compass/bin/refresh.sh
|
|
||||||
else
|
|
||||||
/opt/compass/bin/refresh_agent.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$?" != "0" ]]; then
|
if [[ "$?" != "0" ]]; then
|
||||||
echo "failed to refresh compassd service"
|
echo "failed to refresh compassd service"
|
||||||
exit 1
|
exit 1
|
||||||
@ -176,23 +145,12 @@ else
|
|||||||
echo "redis has already started"
|
echo "redis has already started"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$FULL_COMPASS_SERVER" == "true" ]; then
|
|
||||||
sudo systemctl status mysql.service |grep running
|
sudo systemctl status mysql.service |grep running
|
||||||
if [[ "$?" != "0" ]]; then
|
if [[ "$?" != "0" ]]; then
|
||||||
echo "mysqld is not started"
|
echo "mysqld is not started"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#sudo systemctl status compass-progress-updated.service |grep running
|
|
||||||
#if [[ "$?" != "0" ]]; then
|
|
||||||
# echo "compass-progress-updated is not started"
|
|
||||||
# exit 1
|
|
||||||
#else
|
|
||||||
# echo "compass-progress-updated has already started"
|
|
||||||
#fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
sudo systemctl status compass-celeryd.service |grep running
|
sudo systemctl status compass-celeryd.service |grep running
|
||||||
if [[ "$?" != "0" ]]; then
|
if [[ "$?" != "0" ]]; then
|
||||||
echo "compass-celeryd is not started"
|
echo "compass-celeryd is not started"
|
||||||
@ -201,6 +159,14 @@ else
|
|||||||
echo "compass-celeryd has already started"
|
echo "compass-celeryd has already started"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
sudo systemctl status compass-progress-updated.service |grep running
|
||||||
|
if [[ "$?" != "0" ]]; then
|
||||||
|
echo "compass-progress-updated is not started"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "compass-progress-updated has already started"
|
||||||
|
fi
|
||||||
|
|
||||||
sleep 10
|
sleep 10
|
||||||
#compass check
|
#compass check
|
||||||
#if [[ "$?" != "0" ]]; then
|
#if [[ "$?" != "0" ]]; then
|
||||||
|
@ -4,22 +4,17 @@ echo 'Installing Required packages for Compass...'
|
|||||||
sudo yum clean all
|
sudo yum clean all
|
||||||
sudo yum update -y --skip-broken
|
sudo yum update -y --skip-broken
|
||||||
if [ "$tempest" == "true" ]; then
|
if [ "$tempest" == "true" ]; then
|
||||||
sudo yum --enablerepo=compass_install install -y virt-install libvirt qemu-kvm libxml2-devel libffi-devel libxslt-devel python-devel sshpass openssl-devel --nogpgcheck
|
sudo yum install -y virt-install libvirt qemu-kvm libxml2-devel libffi-devel libxslt-devel python-devel sshpass openssl-devel
|
||||||
if [[ "$?" != "0" ]]; then
|
if [[ "$?" != "0" ]]; then
|
||||||
echo "failed to install tempest yum dependency"
|
echo "failed to install tempest yum dependency"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$FULL_COMPASS_SERVER" == "true" ]; then
|
sudo yum install -y $MYSQL
|
||||||
sudo yum --enablerepo=compass_install install -y $MYSQL
|
sudo yum install -y rsyslog logrotate ntp iproute openssh-clients python python-devel git wget syslinux amqp rabbitmq-server mod_wsgi httpd squid dhcp bind rsync yum-utils xinetd tftp-server gcc net-snmp-utils net-snmp net-snmp-python unzip openssl openssl098e ca-certificates mysql-devel mysql-server mysql MySQL-python python-virtualenv python-setuptools python-pip bc libselinux-python libffi-devel openssl-devel
|
||||||
sudo yum --enablerepo=compass_install --nogpgcheck install -y rsyslog logrotate ntp iproute openssh-clients python python-devel git wget syslinux amqp rabbitmq-server mod_wsgi httpd squid dhcp bind rsync yum-utils xinetd tftp-server gcc net-snmp-utils net-snmp net-snmp-python unzip openssl openssl098e ca-certificates mysql-devel mysql-server mysql MySQL-python python-virtualenv python-setuptools python-pip bc libselinux-python libffi-devel openssl-devel
|
|
||||||
else
|
|
||||||
sudo yum --enablerepo=compass_install --nogpgcheck install -y rsyslog logrotate ntp iproute openssh-clients python python-devel git wget syslinux amqp httpd dhcp bind rsync yum-utils xinetd tftp-server gcc net-snmp-utils net-snmp net-snmp-python unzip openssl openssl098e ca-certificates mysql-devel mysql MySQL-python python-virtualenv python-setuptools python-pip bc libselinux-python libffi-devel openssl-devel
|
|
||||||
fi
|
|
||||||
sudo yum --setopt=tsflags=noscripts -y remove redis
|
sudo yum --setopt=tsflags=noscripts -y remove redis
|
||||||
# sudo yum --enablerepo=remi,remi-test install -y redis
|
sudo yum --enablerepo=remi,remi-test install -y redis
|
||||||
sudo yum --enablerepo=compass_install --nogpgcheck install -y redis
|
|
||||||
|
|
||||||
if [[ "$?" != "0" ]]; then
|
if [[ "$?" != "0" ]]; then
|
||||||
echo "failed to install yum dependency"
|
echo "failed to install yum dependency"
|
||||||
@ -61,7 +56,5 @@ sudo systemctl enable sshd.service
|
|||||||
sudo systemctl enable rsyslog.service
|
sudo systemctl enable rsyslog.service
|
||||||
sudo systemctl enable ntpd.service
|
sudo systemctl enable ntpd.service
|
||||||
sudo systemctl enable redis.service
|
sudo systemctl enable redis.service
|
||||||
if [ "$FULL_COMPASS_SERVER" == "true" ]; then
|
sudo systemctl enable mysqld.service
|
||||||
sudo systemctl enable mysqld.service
|
sudo systemctl enable rabbitmq-server.service
|
||||||
sudo systemctl enable rabbitmq-server.service
|
|
||||||
fi
|
|
||||||
|
@ -1,14 +0,0 @@
|
|||||||
NIC=${NIC:-eth1}
|
|
||||||
IPADDR=${IPADDR:-10.1.10.15}
|
|
||||||
NETMASK=${NETMASK:-255.255.255.0}
|
|
||||||
WEB_SOURCE=${WEB_SOURCE:-http://git.openstack.org/openstack/compass-web}
|
|
||||||
ADAPTERS_SOURCE=${ADAPTERS_SOURCE:-https://gerrit.opnfv.org/gerrit/compass4nfv}
|
|
||||||
OPTION_ROUTER=${OPTION_ROUTER:-10.1.10.15}
|
|
||||||
NAMESERVER_DOMAINS=${NAMESERVER_DOMAINS:-ods.com}
|
|
||||||
NAMESERVER_REVERSE_ZONES=${NAMESERVER_REVERSE_ZONES:-unused}
|
|
||||||
NEXTSERVER=${NEXTSERVER:-10.1.10.15}
|
|
||||||
IP_START=${IP_START:-10.1.10.100}
|
|
||||||
IP_END=${IP_END:-10.1.10.200}
|
|
||||||
LOCAL_REPO=${LOCAL_REPO:-}
|
|
||||||
SUPPORT_CENTOS_7_2=${SUPPORT_CENTOS_7_2:-}
|
|
||||||
SUPPORT_UBUNTU_14_04_03=${SUPPORT_UBUNTU_14_04_03:-}
|
|
@ -1,119 +1,91 @@
|
|||||||
#####################################
|
#####################################
|
||||||
# Config File for Compass Installer #
|
# Config File for Compass Installer #
|
||||||
#####################################
|
#####################################
|
||||||
# Agent or Fullserver
|
|
||||||
export FULL_COMPASS_SERVER="true"
|
|
||||||
# User Email
|
|
||||||
export USER_EMAIL="test@huawei.com"
|
|
||||||
# User Password
|
|
||||||
export USER_PASSWORD=
|
|
||||||
# Compass Server Address, required if installing agent
|
|
||||||
export COMPASS_API_SERVER="c.stack360.io"
|
|
||||||
# OS_INSTALLER indicates the tool for OS provisioning, default is 'cobbler'.
|
# OS_INSTALLER indicates the tool for OS provisioning, default is 'cobbler'.
|
||||||
export OS_INSTALLER="cobbler"
|
export OS_INSTALLER=${OS_INSTALLER:-cobbler}
|
||||||
|
|
||||||
# PACKAGE_INSTALLER indicates the tool for Package provisioning, default is 'chef'.
|
# PACKAGE_INSTALLER indicates the tool for Package provisioning, default is 'chef'.
|
||||||
export PACKAGE_INSTALLER="package"
|
export PACKAGE_INSTALLER=${PACKAGE_INSTALLER:-package}
|
||||||
|
|
||||||
# set remi release url
|
# set remi release url
|
||||||
# export ATOMIC=${ATOMIC:-http://www6.atomicorp.com/channels/atomic/${IMAGE_TYPE,,}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/RPMS/atomic-release-1.0-19.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.art.noarch.rpm}
|
# export ATOMIC=${ATOMIC:-http://www6.atomicorp.com/channels/atomic/${IMAGE_TYPE,,}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/RPMS/atomic-release-1.0-19.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.art.noarch.rpm}
|
||||||
# export REMI=${REMI:-http://rpms.famillecollet.com/enterprise/remi-release-6.rpm}
|
# export REMI=${REMI:-http://rpms.famillecollet.com/enterprise/remi-release-6.rpm}
|
||||||
export REMI="http://rpms.famillecollet.com/enterprise/remi-release-7.rpm"
|
export REMI=${REMI:-http://rpms.famillecollet.com/enterprise/remi-release-7.rpm}
|
||||||
|
|
||||||
export EPEL="https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm"
|
export EPEL=${EPEL:-https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm}
|
||||||
|
|
||||||
export MYSQL="http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm"
|
export MYSQL=${MYSQL:-http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm}
|
||||||
|
|
||||||
export OPENSTACK_ANSIBLE_MODULE="https://github.com/openstack-ansible/openstack-ansible-modules.git"
|
export OPENSTACK_ANSIBLE_MODULE=${OPENSTACK_ANSIBLE_MODULE:-https://github.com/openstack-ansible/openstack-ansible-modules.git}
|
||||||
|
# service NIC
|
||||||
|
export NIC=${NIC:-}
|
||||||
|
export IPADDR=${IPADDR:-}
|
||||||
|
export NETMASK=${NETMASK:-}
|
||||||
|
|
||||||
# External network config for access remote compass server
|
# DHCP config
|
||||||
# External network NIC
|
|
||||||
export PUBLIC_NIC="eth0"
|
|
||||||
|
|
||||||
# External NIC IP address, use the IP assigned to PUBLIC_NIC by default
|
|
||||||
export PUBLIC_IP=
|
|
||||||
export PUBLIC_NETMASK=
|
|
||||||
export PUBLIC_GATEWAY=
|
|
||||||
|
|
||||||
# Install network config, it is a private network
|
|
||||||
export NIC="eth1"
|
|
||||||
export IPADDR="10.1.10.15"
|
|
||||||
export NETMASK="255.255.255.0"
|
|
||||||
|
|
||||||
# DHCP config for install network
|
|
||||||
# DHCP option router address(Default is your management interface IP address )"
|
# DHCP option router address(Default is your management interface IP address )"
|
||||||
export OPTION_ROUTER="10.1.10.15"
|
export OPTION_ROUTER=${OPTION_ROUTER:-}
|
||||||
# The IP range for DHCP clients (Default: local subnet start from 100 to 254)
|
# The IP range for DHCP clients (Default: local subnet start from 100 to 254)
|
||||||
export IP_START="10.1.10.100"
|
export IP_START=${IP_START:-}
|
||||||
export IP_END="10.1.10.200"
|
export IP_END=${IP_END:-}
|
||||||
# TFTP server's IP address(Default: Management Interface/eth0 IP)
|
# TFTP server's IP address(Default: Management Interface/eth0 IP)
|
||||||
export NEXTSERVER="10.1.10.15"
|
export NEXTSERVER=${NEXTSERVER:-}
|
||||||
|
|
||||||
# The machines MAC list. it must be filled, any space or null is wrong in quotes:
|
|
||||||
export switch_IP="127.0.0.1"
|
|
||||||
export PXE_MACs=("port=300,mac=00:0c:29:8c:c3:b5"
|
|
||||||
"port=301,mac=00:0c:29:87:79:9a"
|
|
||||||
"port=302,mac=00:0c:29:73:74:41"
|
|
||||||
"port=303,mac=00:0c:29:f9:eb:fd"
|
|
||||||
"port=304,mac=00:0c:29:ac:e7:14")
|
|
||||||
|
|
||||||
# the domains covered by nameserver
|
# the domains covered by nameserver
|
||||||
export NAMESERVER_DOMAINS="ods.com"
|
export NAMESERVER_DOMAINS=${NAMESERVER_DOMAINS:-}
|
||||||
export NAMESERVER_REVERSE_ZONES="unused"
|
export NAMESERVER_REVERSE_ZONES=${NAMESERVER_REVERSE_ZONES:-}
|
||||||
|
|
||||||
# set the default cobbler user "cobbler" password, if not set, the default will be cobbler/cobbler
|
# set the default cobbler user "cobbler" password, if not set, the default will be cobbler/cobbler
|
||||||
export CBLR_USER=
|
export CBLR_USER=${CBLR_USER:-}
|
||||||
export CBLR_PASSWD=
|
export CBLR_PASSWD=${CBLR_PASSWD:-}
|
||||||
|
|
||||||
# set default local repo flag
|
# set default local repo flag
|
||||||
export LOCAL_REPO=
|
export LOCAL_REPO=${LOCAL_REPO:-}
|
||||||
export SUPPORT_CENTOS_7_2=
|
export SUPPORT_CENTOS_7_2=${SUPPORT_CENTOS_7_2:-}
|
||||||
export SUPPORT_UBUNTU_14_04_03=
|
export SUPPORT_UBUNTU_14_04_03=${SUPPORT_UBUNTU_14_04_03:-}
|
||||||
|
|
||||||
export IMAGE_TYPE="CentOS"
|
export IMAGE_TYPE=${IMAGE_TYPE:-"CentOS"}
|
||||||
export IMAGE_TYPE_OTHER="el"
|
export IMAGE_TYPE_OTHER=${IMAGE_TYPE_OTHER:-"el"}
|
||||||
export IMAGE_VERSION_MAJOR="6"
|
export IMAGE_VERSION_MAJOR=${IMAGE_VERSION_MAJOR:-"6"}
|
||||||
export IMAGE_VERSION_NINOR="5"
|
export IMAGE_VERSION_NINOR=${IMAGE_VERSION_MINOR:-"5"}
|
||||||
export IMAGE_VERSION="${IMAGE_VERSION_MAJOR}.${IMAGE_VERSION_MINOR}"
|
export IMAGE_VERSION=${IMAGE_VERSION:-"${IMAGE_VERSION_MAJOR}.${IMAGE_VERSION_MINOR}"}
|
||||||
export IMAGE_NAME="${IMAGE_TYPE}-${IMAGE_VERSION}"
|
export IMAGE_NAME=${IMAGE_NAME:-"${IMAGE_TYPE}-${IMAGE_VERSION}"}
|
||||||
export IMAGE_ARCH="x86_64"
|
export IMAGE_ARCH=${IMAGE_ARCH:-"x86_64"}
|
||||||
|
|
||||||
export CENTOS_7_2_IMAGE_SOURCE="http://205.177.226.237:9999/CentOS-7-x86_64-Minimal-1511.iso"
|
export CENTOS_7_2_IMAGE_SOURCE=${CENTOS_7_2_IMAGE_SOURCE:-"http://205.177.226.237:9999/CentOS-7-x86_64-Minimal-1511.iso"}
|
||||||
export UBUNTU_14_04_03_IMAGE_SOURCE="http://205.177.226.237:9999/ubuntu-14.04.3-server-amd64.iso"
|
export UBUNTU_14_04_03_IMAGE_SOURCE=${UBUNTU_14_04_03_IMAGE_SOURCE:-"http://205.177.226.237:9999/ubuntu-14.04.3-server-amd64.iso"}
|
||||||
export CENTOS_7_2_PPA_REPO_SOURCE="http://205.177.226.237:9999/centos7-juno-ppa.tar.gz"
|
export CENTOS_7_2_PPA_REPO_SOURCE=${CENTOS_7_2_PPA_REPO_SOURCE:-"http://205.177.226.237:9999/centos7-juno-ppa.tar.gz"}
|
||||||
export UBUNTU_14_04_03_PPA_REPO_SOURCE="http://205.177.226.237:9999/trusty-mitaka-ppa.tar.gz"
|
export UBUNTU_14_04_03_PPA_REPO_SOURCE=${UBUNTU_14_04_03_PPA_REPO_SOURCE:-"http://205.177.226.237:9999/trusty-liberty-ppa.tar.gz"}
|
||||||
export PIP_PACKAGES="http://205.177.226.237:9999/pip.tar.gz"
|
export PIP_PACKAGES=${PIP_PACKAGES:-"http://205.177.226.237:9999/pip.tar.gz"}
|
||||||
export EXTRA_PACKAGES="http://205.177.226.237:9999/packages.tar.gz"
|
|
||||||
|
|
||||||
export COBBLER_PASSWORD="cobbler"
|
export COBBLER_PASSWORD=${COBBLER_PASSWORD:-"cobbler"}
|
||||||
|
|
||||||
export COBBLER_LOADERS_SOURCE="https://s3-us-west-1.amazonaws.com/compass-local-repo/loaders.tar.gz"
|
export COBBLER_LOADERS_SOURCE=${COBBLER_LOADERS_SOURCE:-"https://s3-us-west-1.amazonaws.com/compass-local-repo/loaders.tar.gz"}
|
||||||
export COBBLER_LOADERS_SOURCE_ASIA="http://huawei-repo.uubright.com/repos/compass/loaders.tar.gz"
|
export COBBLER_LOADERS_SOURCE_ASIA=${COBBLER_LOADERS_SOURCE_ASIA:-"http://huawei-repo.uubright.com/repos/compass/loaders.tar.gz"}
|
||||||
|
|
||||||
# Currently the use of Javascript MVC is set to version 3.2.4
|
# Currently the use of Javascript MVC is set to version 3.2.4
|
||||||
export JS_MVC="javascriptmvc-3.2.4"
|
export JS_MVC=${JS_MVC:-"javascriptmvc-3.2.4"}
|
||||||
|
|
||||||
# set the chef packages download path
|
# set the chef packages download path
|
||||||
export CHEF_SRV="http://opscode-omnibus-packages.s3.amazonaws.com/${IMAGE_TYPE_OTHER}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-server-11.0.8-1.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.${IMAGE_ARCH}.rpm"
|
export CHEF_SRV=${CHEF_SRV:-"http://opscode-omnibus-packages.s3.amazonaws.com/${IMAGE_TYPE_OTHER}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-server-11.0.8-1.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.${IMAGE_ARCH}.rpm"}
|
||||||
export CHEF_SRV_HUAWEI="http://huawei-repo.uubright.com/repos/compass/${IMAGE_TYPE_OTHER}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-server-11.0.8-1.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.${IMAGE_ARCH}.rpm"
|
export CHEF_SRV_HUAWEI=${CHEF_SRV_HUAWEI:-"http://huawei-repo.uubright.com/repos/compass/${IMAGE_TYPE_OTHER}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-server-11.0.8-1.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.${IMAGE_ARCH}.rpm"}
|
||||||
export CHEF_CLIENT="https://opscode-omnibus-packages.s3.amazonaws.com/${IMAGE_TYPE_OTHER}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-11.10.4-1.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.${IMAGE_ARCH}.rpm"
|
export CHEF_CLIENT=${CHEF_CLIENT:-"https://opscode-omnibus-packages.s3.amazonaws.com/${IMAGE_TYPE_OTHER}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-11.10.4-1.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.${IMAGE_ARCH}.rpm"}
|
||||||
export CHEF_CLIENT_HUAWEI="http://huawei-repo.uubright.com/repos/compass/${IMAGE_TYPE_OTHER}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-11.10.4-1.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.${IMAGE_ARCH}.rpm"
|
export CHEF_CLIENT_HUAWEI=${CHEF_CLIENT_HUAWEI:-"http://huawei-repo.uubright.com/repos/compass/${IMAGE_TYPE_OTHER}/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-11.10.4-1.${IMAGE_TYPE_OTHER}${IMAGE_VERSION_MAJOR}.${IMAGE_ARCH}.rpm"}
|
||||||
|
|
||||||
# Set Chef password for Chef web UI
|
# Set Chef password for Chef web UI
|
||||||
export CHEF_PASSWD="root1234"
|
export CHEF_PASSWD=${CHEF_PASSWD:-"root1234"}
|
||||||
|
|
||||||
# Set Compass-web and Compass-adpater variables
|
# Set Compass-web and Compass-adpater variables
|
||||||
export WEB_HOME="/tmp/web"
|
export WEB_HOME=${WEB_HOME:-'/tmp/web'}
|
||||||
export ADAPTERS_HOME="/tmp/adapter"
|
export ADAPTERS_HOME=${ADAPTERS_HOME:-'/tmp/adapter'}
|
||||||
export WEB_SOURCE=
|
export WEB_SOURCE=${WEB_SOURCE:-}
|
||||||
export ADAPTERS_SOURCE=
|
export ADAPTERS_SOURCE=${ADAPTERS_SOURCE:-}
|
||||||
|
|
||||||
# Set compass environment
|
# Set compass environment
|
||||||
export SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
export SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||||
export COMPASSDIR=${SCRIPT_DIR}/..
|
export COMPASSDIR=${SCRIPT_DIR}/..
|
||||||
export TESTMODE="False"
|
export TESTMODE=${TESTMODE:-"False"}
|
||||||
|
|
||||||
# Set Local Repo sources
|
# Set Local Repo sources
|
||||||
export LOCAL_REPO_US="https://s3-us-west-1.amazonaws.com/compass-local-repo"
|
export LOCAL_REPO_US=${LOCAL_REPO_US:-"https://s3-us-west-1.amazonaws.com/compass-local-repo"}
|
||||||
export LOCAL_REPO_HUAWEI="http://huawei-repo.uubright.com/repos/compass"
|
export LOCAL_REPO_HUAWEI=${LOCAL_REPO_HUAWEI:-"http://huawei-repo.uubright.com/repos/compass"}
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
#
|
#
|
||||||
#set -x
|
|
||||||
### Log the script all outputs locally
|
### Log the script all outputs locally
|
||||||
exec > >(sudo tee install.log)
|
exec > >(sudo tee install.log)
|
||||||
exec 2>&1
|
exec 2>&1
|
||||||
@ -48,7 +48,6 @@ _prepare_locking()
|
|||||||
_prepare_locking
|
_prepare_locking
|
||||||
|
|
||||||
# PUBLIC
|
# PUBLIC
|
||||||
|
|
||||||
exlock_now()
|
exlock_now()
|
||||||
{
|
{
|
||||||
_lock xn || exit 1
|
_lock xn || exit 1
|
||||||
@ -60,93 +59,10 @@ if [[ "$?" != "0" ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set_iptables()
|
### BEGIN OF SCRIPT ###
|
||||||
{
|
|
||||||
# external_ipaddr=$1; install_ipaddr=$2; install_netmask=$3
|
|
||||||
|
|
||||||
local argument_error="ERROR: argument ARG_NUM is invalidation that is for ARG_DESC"
|
|
||||||
local varnames=("3" "external_ipaddr" "install_ipaddr" "install_netmask")
|
|
||||||
if [ $# -lt ${varnames[0]} ];then
|
|
||||||
echo "ERROR: please input ${varnames[0]} arguments to call function _set_iptables()";exit 1
|
|
||||||
fi
|
|
||||||
local i=1
|
|
||||||
while [ $1 ];do
|
|
||||||
eval "${varnames[i]}"=$1
|
|
||||||
sudo ipcalc $1 -c
|
|
||||||
if [ $? -ne 0 ];then
|
|
||||||
echo ${argument_error} | sed 's/ARG_NUM/'"$i"'/g' | sed 's/ARG_DESC/g'"${varnames[i]}"'/g';exit 1
|
|
||||||
fi
|
|
||||||
let i++;shift
|
|
||||||
done
|
|
||||||
|
|
||||||
local install_netaddr=$(sudo ipcalc ${install_ipaddr} ${install_netmask} -n | awk -F = '{print $2}')
|
|
||||||
local install_netprefix=$(sudo ipcalc ${install_ipaddr} ${install_netmask} -p | awk -F = '{print $2}')
|
|
||||||
|
|
||||||
sudo sed -i '/^\s*net\.ipv4\.ip_forward\s*=/d' /etc/sysctl.conf
|
|
||||||
sudo sed -i '$a net.ipv4.ip_forward=1' /etc/sysctl.conf
|
|
||||||
sudo sysctl -p
|
|
||||||
|
|
||||||
sudo rpm -qa | grep iptables-services
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
sudo yum -y install iptables-services
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo /bin/systemctl status iptables.service
|
|
||||||
if [ $? -eq 0 ];then
|
|
||||||
sudo /usr/sbin/service iptables save
|
|
||||||
sudo /bin/systemctl stop iptables.service
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo mkdir /etc/sysconfig/iptables.bak.d 2>/dev/null
|
|
||||||
if [ -f /etc/sysconfig/iptables ];then
|
|
||||||
sudo mv -f /etc/sysconfig/iptables /etc/sysconfig/iptables.bak.d/$(uuidgen)
|
|
||||||
fi
|
|
||||||
|
|
||||||
iptables_config=" *filter\n
|
|
||||||
:INPUT ACCEPT [0:0]\n
|
|
||||||
:FORWARD ACCEPT [0:0]\n
|
|
||||||
:OUTPUT ACCEPT [0:0]\n
|
|
||||||
COMMIT\n
|
|
||||||
*nat\n
|
|
||||||
:PREROUTING ACCEPT [0:0]\n
|
|
||||||
:INPUT ACCEPT [0:0]\n
|
|
||||||
:OUTPUT ACCEPT [0:0]\n
|
|
||||||
:POSTROUTING ACCEPT [0:0]\n
|
|
||||||
-A POSTROUTING -s ${install_ipaddr}/32 -j ACCEPT\n
|
|
||||||
-A POSTROUTING -s ${install_netaddr}/${install_netprefix} -j SNAT --to-source ${external_ipaddr}\n
|
|
||||||
COMMIT\n"
|
|
||||||
sudo echo -e ${iptables_config} | sed 's/^\s*//g' > /etc/sysconfig/iptables
|
|
||||||
|
|
||||||
sudo /bin/systemctl enable iptables
|
|
||||||
sudo /bin/systemctl start iptables.service
|
|
||||||
}
|
|
||||||
|
|
||||||
# convert between ip address and integers
|
|
||||||
ipaddr_to_int()
|
|
||||||
{
|
|
||||||
ipaddr=$1
|
|
||||||
IFS=. read -r a b c d <<< "$ipaddr"
|
|
||||||
printf '%d\n' "$((a * 256 ** 3 + b * 256 ** 2 + c * 256 + d))"
|
|
||||||
}
|
|
||||||
int_to_ipaddr()
|
|
||||||
{
|
|
||||||
ipint=$1
|
|
||||||
let a=ipint/$[256**3];let ipint%=$[256**3]
|
|
||||||
let b=ipint/$[256**2];let ipint%=$[256**2]
|
|
||||||
let c=ipint/256;let ipint%=256
|
|
||||||
let d=ipint
|
|
||||||
printf '%d.%d.%d.%d\n' $a $b $c $d
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
### BEGIN OF SCRIPT
|
|
||||||
|
|
||||||
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||||
|
|
||||||
### LOAD FILE CONFIGURATION
|
|
||||||
source $DIR/install.conf
|
source $DIR/install.conf
|
||||||
|
|
||||||
|
|
||||||
### Change selinux security policy
|
### Change selinux security policy
|
||||||
sudo setenforce 0
|
sudo setenforce 0
|
||||||
sudo sed -i 's/enforcing/disabled/g' /etc/selinux/config
|
sudo sed -i 's/enforcing/disabled/g' /etc/selinux/config
|
||||||
@ -200,129 +116,142 @@ fi
|
|||||||
|
|
||||||
# Install figlet
|
# Install figlet
|
||||||
sudo yum -y install figlet >& /dev/null
|
sudo yum -y install figlet >& /dev/null
|
||||||
# if [[ "$?" != "0" ]]; then
|
if [[ "$?" != "0" ]]; then
|
||||||
# echo "failed to install figlet"
|
echo "failed to install figlet"
|
||||||
# exit 1
|
exit 1
|
||||||
# else
|
else
|
||||||
# echo "figlet is installed"
|
echo "figlet is installed"
|
||||||
# fi
|
fi
|
||||||
figlet -ctf slant Compass Installer
|
figlet -ctf slant Compass Installer
|
||||||
|
|
||||||
while [ $1 ]; do
|
while [ $1 ]; do
|
||||||
flags=$1
|
flags=$1
|
||||||
param=${flags/'--'/''}
|
param=${flags/'--'/''}
|
||||||
var=$(echo $param | awk -F = '{print $1}')
|
var=$(echo $param | cut -d"=" -f1)
|
||||||
val=$(echo $param | awk -F = '{print $2}')
|
val=$(echo $param | cut -d"=" -f2)
|
||||||
eval export $var=$val
|
export $var=$val
|
||||||
shift
|
shift
|
||||||
done
|
done
|
||||||
|
|
||||||
yum update -y
|
# convert ip address to int
|
||||||
sudo rpm -qa | grep net-tools
|
ipaddr_convert()
|
||||||
if [ $? -ne 0 ];then
|
{
|
||||||
yum -y install net-tools
|
ipaddr=$1
|
||||||
fi
|
IFS=. read -r a b c d <<< "$ipaddr"
|
||||||
|
printf '%d\n' "$((a * 256 ** 3 + b * 256 ** 2 + c * 256 + d))"
|
||||||
|
}
|
||||||
|
|
||||||
# check the correct format of ip variables
|
# Load variables
|
||||||
ip_vars="PUBLIC_IP PUBLIC_NETMASK PUBLIC_GATEWAY
|
loadvars()
|
||||||
IPADDR NETMASK
|
{
|
||||||
OPTION_ROUTER NEXTSERVER IP_START IP_END"
|
varname=${1,,}
|
||||||
for ip_var in ${ip_vars}; do
|
eval var=\$$(echo $1)
|
||||||
eval ip_val=\$${ip_var}
|
|
||||||
if [ ! -z ${ip_val} ];then
|
if [[ -z $var ]]; then
|
||||||
ipcalc ${ip_val} -c
|
echo -e "\x1b[32mPlease enter the $varname (Example: $2):\x1b[37m"
|
||||||
if [ $? -ne 0 ];then
|
while read input
|
||||||
echo "The variable of '${ip_var}'='${ip_val}' is invalid."
|
do
|
||||||
exit 1
|
if [ "$input" == "" ]; then
|
||||||
|
echo "Default $varname '$2' chosen"
|
||||||
|
export $(echo $1)="$2"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
echo "You have entered $input"
|
||||||
|
export $(echo $1)="$input"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
fi
|
fi
|
||||||
fi
|
}
|
||||||
done
|
yum -y install net-tools
|
||||||
|
loadvars NIC "eth0"
|
||||||
# public network variables:
|
sudo ifconfig $NIC
|
||||||
export PUBLIC_NIC=${PUBLIC_NIC:-"eth0"}
|
if [ $? -ne 0 ]; then
|
||||||
export PUBLIC_IP=${PUBLIC_IP:-$(sudo ifconfig ${PUBLIC_NIC} | awk '($1=="inet"){print $2}')}
|
echo "There is no nic '$NIC' yet"
|
||||||
export PUBLIC_GATEWAY=${PUBLIC_GATEWAY:-$(sudo route -n | awk '($1=="0.0.0.0" && $3=="0.0.0.0"){print $2}')}
|
exit 1
|
||||||
|
fi
|
||||||
if [ -z ${PUBLIC_IP} ];then
|
# sudo ifconfig $NIC | grep 'inet addr:' >& /dev/null
|
||||||
echo "ERROR: There is not any PUBLIC_IP to be set yet, please assign an IP to PUBLIC_NIC or configure 'install.conf' first."
|
sudo ifconfig $NIC |grep 'inet '| cut -d ' ' -f10 >& /dev/null
|
||||||
exit 1
|
if [ $? -ne 0 ]; then
|
||||||
elif [ -z ${PUBLIC_GATEWAY} ];then
|
echo "There is not any IP address assigned to the NIC '$NIC' yet, please assign an IP address first."
|
||||||
echo "WARNING: There is not any PUBLIC_GATEWAY, please ensure that the agent server can access remote compass center if no gateway."
|
|
||||||
sleep 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
export PUBLIC_NETMASK=${PUBLIC_NETMASK:-$(sudo ifconfig ${PUBLIC_NIC} | awk '($3=="netmask"){print $4}')}
|
|
||||||
export PUBLIC_NETMASK=${PUBLIC_NETMASK:-$(sudo ipcalc ${PUBLIC_IP} -m | awk -F = '{print $2}')}
|
|
||||||
|
|
||||||
if [[ $(ipcalc ${PUBLIC_IP} ${PUBLIC_NETMASK} -n) != $(ipcalc ${PUBLIC_GATEWAY} ${PUBLIC_NETMASK} -n) ]];then
|
|
||||||
echo "ERROR: The PUBLIC_IP:${PUBLIC_IP} and PUBLIC_GATEWAY:${PUBLIC_GATEWAY} are not in the same subnet, please check the configuration."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo ifconfig ${PUBLIC_NIC} ${PUBLIC_IP} netmask ${PUBLIC_NETMASK} up
|
|
||||||
|
|
||||||
if [ ! -z ${PUBLIC_GATEWAY} ];then
|
|
||||||
sudo route del -net 0.0.0.0/0
|
|
||||||
sudo route add -net 0.0.0.0/0 gw ${PUBLIC_GATEWAY}
|
|
||||||
fi
|
|
||||||
|
|
||||||
# install network variables:
|
|
||||||
export NIC=${NIC:-"eth1"}
|
|
||||||
export IPADDR=${IPADDR:-$(sudo ifconfig ${NIC} | awk '($1=="inet"){print $2}')}
|
|
||||||
export IPADDR=${IPADDR:-"10.1.0.15"}
|
|
||||||
export NETMASK=${NETMASK:-$(sudo ifconfig ${NIC} | awk '($3=="netmask"){print $4}')}
|
|
||||||
export NETMASK=${NETMASK:-"255.255.255.0"}
|
|
||||||
|
|
||||||
sudo ifconfig ${NIC} ${IPADDR} netmask ${NETMASK} up
|
|
||||||
|
|
||||||
export OPTION_ROUTER=${OPTION_ROUTE:-${IPADDR}}
|
|
||||||
export NEXTSERVER=${NEXTSERVER:-${IPADDR}}
|
|
||||||
|
|
||||||
if [ -z ${IP_START} ];then
|
|
||||||
temp_int=$(ipaddr-to-int ${IPADDR})
|
|
||||||
let temp_int++
|
|
||||||
IP_START=$(int-to-ipaddr ${temp_int})
|
|
||||||
fi
|
|
||||||
export IP_START
|
|
||||||
|
|
||||||
if [ -z ${IP_END} ];then
|
|
||||||
broad_addr=$(sudo ipcalc ${IPADDR} ${NETMASK} -b | awk -F = '{print $2}')
|
|
||||||
temp_int=$(ipadd-to-int ${broad_addr})
|
|
||||||
let temp_int--
|
|
||||||
IP_END=$(int-to-ipaddr ${temp_int})
|
|
||||||
fi
|
|
||||||
export IP_END
|
|
||||||
|
|
||||||
# check the validation of IP_START and IP_END
|
|
||||||
for ip_var in IP_START IP_END;do
|
|
||||||
if [[ $(eval ipcalc \$${ip_var} ${NETMASK} -n) != $(ipcalc ${IPADDR} ${NETMASK} -n) ]];then
|
|
||||||
eval echo "ERROR: The ${ip_var}:\$${ip_var} and install nic are not in the same subnet.";
|
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
|
||||||
done
|
|
||||||
ip_start_int=$(ipaddr_to_int ${IP_START})
|
|
||||||
ip_end_int=$(ipaddr_to_int ${IP_END})
|
|
||||||
let ip_range=${ip_end_int}-${ip_start_int}
|
|
||||||
if [ ${ip_range} -le 0 ];then
|
|
||||||
echo "There is no avialable IPs between IP_START:'${IP_START}' and IP_END:'${IP_END}'."
|
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# print all variables about IP
|
export ipaddr=$(ifconfig $NIC | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}')
|
||||||
for ip_var in ${ip_vars};do
|
echo " this line "
|
||||||
eval echo "${ip_var}=\$${ip_var}"
|
if [ -z "$ipaddr" ]; then
|
||||||
done
|
export ipaddr=$(ifconfig $NIC | grep 'inet ' | sed 's/^[ \t]*//g' | sed 's/[ \t]\+/ /g' | cut -d' ' -f2)
|
||||||
|
fi
|
||||||
|
loadvars IPADDR ${ipaddr}
|
||||||
|
ipcalc $IPADDR -c
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "ip addr $IPADDR format should be x.x.x.x"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
export netmask=$(ifconfig $NIC | grep Mask | cut -d: -f4)
|
||||||
|
if [ -z "$netmask" ]; then
|
||||||
|
export netmask=$(ifconfig $NIC | grep netmask | sed 's/^[ \t]*//g' | sed 's/[ \t]\+/ /g' | cut -d' ' -f4)
|
||||||
|
fi
|
||||||
|
loadvars NETMASK ${netmask}
|
||||||
|
export netaddr=$(ipcalc $IPADDR $NETMASK -n |cut -f 2 -d '=')
|
||||||
|
export netprefix=$(ipcalc $IPADDR $NETMASK -p |cut -f 2 -d '=')
|
||||||
|
subnet=${netaddr}/${netprefix}
|
||||||
|
ipcalc $subnet -c
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "subnet $subnet format should be x.x.x.x/x"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
loadvars OPTION_ROUTER $(route -n | grep '^0.0.0.0' | xargs | cut -d ' ' -f 2)
|
||||||
|
ipcalc $OPTION_ROUTER -c
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "router $OPTION_ROUTER format should be x.x.x.x"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
export ip_start=$(echo "$IPADDR"|cut -f 1,2,3 -d '.')."100"
|
||||||
|
export ip_end=$(echo "$IPADDR"|cut -f 1,2,3 -d '.')."250"
|
||||||
|
loadvars IP_START "$ip_start"
|
||||||
|
ipcalc $IP_START -c
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "ip start $IP_START format should be x.x.x.x"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "ip start address is $IP_START"
|
||||||
|
fi
|
||||||
|
ip_start_net=$(ipcalc $IP_START $NETMASK -n |cut -f 2 -d '=')
|
||||||
|
if [[ "$ip_start_net" != "$netaddr" ]]; then
|
||||||
|
echo "ip start $IP_START is not in $subnet"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
loadvars IP_END "$ip_end"
|
||||||
|
ipcalc $IP_END -c
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "ip end $IP_END format should be x.x.x.x"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
ip_end_net=$(ipcalc $IP_END $NETMASK -n |cut -f 2 -d '=')
|
||||||
|
if [[ "$ip_end_net" != "$netaddr" ]]; then
|
||||||
|
echo "ip end $IP_END is not in $subnet"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
ip_start_int=$(ipaddr_convert $IP_START)
|
||||||
|
ip_end_int=$(ipaddr_convert $IP_END)
|
||||||
|
let ip_range=${ip_end_int}-${ip_start_int}
|
||||||
|
if [ $ip_range -le 0 ]; then
|
||||||
|
echo "there is no available ips to assign between $IP_START and $IP_END"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "there will be at most $ip_range hosts deployed."
|
||||||
|
loadvars NEXTSERVER $IPADDR
|
||||||
|
ipcalc $NEXTSERVER -c
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "next server $NEXTSERVER format should be x.x.x.x"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
export NAMESERVER_DOMAINS=${NAMESERVER_DOMAINS:-"ods.com"}
|
loadvars NAMESERVER_DOMAINS "ods.com"
|
||||||
export NAMESERVER_REVERSE_ZONES=${NAMESERVER_REVERSE_ZONES:-"unused"}
|
loadvars NAMESERVER_REVERSE_ZONES "unused"
|
||||||
export WEB_SOURCE=${WEB_SOURCE:-"http://git.openstack.org/openstack/compass-web"}
|
loadvars WEB_SOURCE 'http://git.openstack.org/openstack/compass-web'
|
||||||
export ADAPTERS_SOURCE=${ADAPTERS_SOURCE:-"https://gerrit.opnfv.org/gerrit/compass4nfv"}
|
loadvars ADAPTERS_SOURCE 'https://gerrit.opnfv.org/gerrit/compass4nfv'
|
||||||
|
|
||||||
echo "set the iptables' rules so that the openstack hosts installed can access remote compass through agent server"
|
|
||||||
set_iptables ${PUBLIC_IP} ${IPADDR} ${NETMASK}
|
|
||||||
|
|
||||||
rm -rf /etc/yum.repos.d/compass_install.repo 2>/dev/nullcp
|
|
||||||
cp ${COMPASSDIR}/misc/compass_install.repo /etc/yum.repos.d/
|
|
||||||
|
|
||||||
echo "script dir: $SCRIPT_DIR"
|
echo "script dir: $SCRIPT_DIR"
|
||||||
echo "compass dir is $COMPASSDIR"
|
echo "compass dir is $COMPASSDIR"
|
||||||
@ -343,66 +272,11 @@ echo "Install the Package Installer Tool"
|
|||||||
# source ${COMPASSDIR}/install/chef.sh || exit $?
|
# source ${COMPASSDIR}/install/chef.sh || exit $?
|
||||||
source ${COMPASSDIR}/install/ansible.sh || exit $?
|
source ${COMPASSDIR}/install/ansible.sh || exit $?
|
||||||
|
|
||||||
echo "Install log agent."
|
echo "Download and install Compass Web"
|
||||||
source ${COMPASSDIR}/install/logstash-forwarder.sh || exit $?
|
source ${COMPASSDIR}/install/compass_web.sh || exit $?
|
||||||
|
|
||||||
if [ "$FULL_COMPASS_SERVER" == "true" ]; then
|
|
||||||
echo "Download and install Compass Web"
|
|
||||||
source ${COMPASSDIR}/install/compass_web.sh || exit $?
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Download and Setup Compass and related services"
|
echo "Download and Setup Compass and related services"
|
||||||
source ${COMPASSDIR}/install/compass.sh || exit $?
|
source ${COMPASSDIR}/install/compass.sh || exit $?
|
||||||
|
|
||||||
figlet -ctf slant Installation Complete!
|
figlet -ctf slant Installation Complete!
|
||||||
echo -e "It takes\x1b[32m $SECONDS \x1b[0mseconds during the installation."
|
echo -e "It takes\x1b[32m $SECONDS \x1b[0mseconds during the installation."
|
||||||
|
|
||||||
if [ "$FULL_COMPASS_SERVER" == "false" ]; then
|
|
||||||
machine_list_conf="MACHINE_LIST = [ { '${switch_IP}': [ "
|
|
||||||
for host in ${PXE_MACs[@]}; do
|
|
||||||
port=$(echo ${host} | awk -F , '{print $1}' | awk -F = '{print $2}')
|
|
||||||
mac=$(echo ${host} | awk -F , '{print $2}' | awk -F = '{print $2}')
|
|
||||||
machine_list_conf="${machine_list_conf}${comma}\n{'port': '${port}', 'mac': '${mac}', 'vlan': '0'}"
|
|
||||||
comma=","
|
|
||||||
done
|
|
||||||
machine_list_conf="${machine_list_conf}\n ] } ]"
|
|
||||||
sudo echo -e ${machine_list_conf} > /etc/compass/machine_list/machine_list.conf
|
|
||||||
|
|
||||||
# rm -rf /var/ansible/roles/keystone/vars/Debian.yml 2>/dev/null
|
|
||||||
# cp ${COMPASSDIR}/misc/adapter_changes/Debian.yml /var/ansible/roles/keystone/vars/
|
|
||||||
# rm -rf /var/ansible/roles/keystone/tasks/keystone_install.yml 2>/dev/null
|
|
||||||
# cp ${COMPASSDIR}/misc/adapter_changes/keystone_install.yml /var/ansible/roles/keystone/tasks/
|
|
||||||
# rm -rf /var/ansible/openstack_mitaka/HA-ansible-multinodes.yml 2>/dev/null
|
|
||||||
# cp ${COMPASSDIR}/misc/adapter_changes/HA-ansible-multinodes.yml /var/ansible/openstack_mitaka/
|
|
||||||
rm -rf /var/lib/cobbler/snippets/preseed_post_anamon 2>/dev/null
|
|
||||||
cp ${COMPASSDIR}/misc/adapter_changes/preseed_post_anamon_remote /var/lib/cobbler/snippets/preseed_post_anamon
|
|
||||||
else
|
|
||||||
rm -rf /var/lib/cobbler/snippets/preseed_post_anamon 2>/dev/null
|
|
||||||
cp ${COMPASSDIR}/misc/adapter_changes/preseed_post_anamon_local /var/lib/cobbler/snippets/preseed_post_anamon
|
|
||||||
fi
|
|
||||||
|
|
||||||
# sudo sed -i 's/^CELERY_DEFAULT_QUEUE.*/CELERY_DEFAULT_QUEUE = \"'"${USER_EMAIL}"'\"/g' /etc/compass/celeryconfig
|
|
||||||
# sudo sed -i 's/^CELERY_DEFAULT_EXCHANGE.*/CELERY_DEFAULT_EXCHANGE = \"'"${USER_EMAIL}"'\"/g' /etc/compass/celeryconfig
|
|
||||||
# sudo sed -i 's/^CELERY_DEFAULT_ROUTING_KEY.*/CELERY_DEFAULT_ROUTING_KEY = \"'"${USER_EMAIL}"'\"/g' /etc/compass/celeryconfig
|
|
||||||
|
|
||||||
# Restart services
|
|
||||||
systemctl restart httpd.service
|
|
||||||
sleep 10
|
|
||||||
echo "Checking if httpd is running"
|
|
||||||
sudo systemctl status httpd.service
|
|
||||||
if [[ "$?" == "0" ]]; then
|
|
||||||
echo "httpd is running"
|
|
||||||
else
|
|
||||||
echo "httpd is not running"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
systemctl restart compass-celeryd.service
|
|
||||||
echo "Checking if httpd is running"
|
|
||||||
sudo systemctl status compass-celeryd.service
|
|
||||||
if [[ "$?" == "0" ]]; then
|
|
||||||
echo "celeryd is running"
|
|
||||||
else
|
|
||||||
echo "celeryd is not running"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
@ -1,22 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
|
|
||||||
echo "Installing logstash-forwarder"
|
|
||||||
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
|
||||||
source $DIR/install.conf
|
|
||||||
|
|
||||||
sudo cp -rf $COMPASSDIR/misc/logstash-forwarder/logstash-forwarder.repo /etc/yum.repos.d/logstash-forwarder.repo
|
|
||||||
sudo yum -y install logstash-forwarder
|
|
||||||
sudo rm -rf /etc/logstash-forwarder.conf
|
|
||||||
sudo cp -rf $COMPASSDIR/misc/logstash-forwarder/logstash-forwarder.conf /etc/logstash-forwarder.conf
|
|
||||||
sudo mkdir -p /etc/pki/tls/certs
|
|
||||||
sudo cp -rf $COMPASSDIR/misc/logstash-forwarder/logstash-forwarder.crt /etc/pki/tls/certs/logstash-forwarder.crt
|
|
||||||
|
|
||||||
sudo systemctl restart logstash-forwarder.service
|
|
||||||
sleep 3
|
|
||||||
echo "checking if logstash-forwarder is running"
|
|
||||||
sudo systemctl status logstash-forwarder.service
|
|
||||||
if [[ "$?" != 0 ]]; then
|
|
||||||
echo "logstash-forwarder is not running"
|
|
||||||
exit
|
|
||||||
fi
|
|
@ -104,61 +104,58 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
#update mysqld
|
#update mysqld
|
||||||
if [ "$FULL_COMPASS_SERVER" == "true" ]; then
|
echo "update mysqld"
|
||||||
echo "update mysqld"
|
mkdir -p /var/log/mysql
|
||||||
mkdir -p /var/log/mysql
|
chmod -R 777 /var/log/mysql
|
||||||
chmod -R 777 /var/log/mysql
|
sleep 10
|
||||||
sleep 10
|
systemctl restart mysql.service
|
||||||
systemctl restart mysql.service
|
sudo sleep 10
|
||||||
sudo sleep 10
|
systemctl status mysql.service
|
||||||
systemctl status mysql.service
|
if [[ "$?" != "0" ]]; then
|
||||||
if [[ "$?" != "0" ]]; then
|
echo "failed to restart mysqld"
|
||||||
echo "failed to restart mysqld"
|
exit 1
|
||||||
exit 1
|
else
|
||||||
else
|
echo "mysqld restarted"
|
||||||
echo "mysqld restarted"
|
fi
|
||||||
fi
|
MYSQL_USER=${MYSQL_USER:-root}
|
||||||
|
MYSQL_OLD_PASSWORD=${MYSQL_OLD_PASSWORD:-root}
|
||||||
|
MYSQL_PASSWORD=${MYSQL_PASSWORD:-root}
|
||||||
|
MYSQL_SERVER=${MYSQL_SERVER:-127.0.0.1}
|
||||||
|
MYSQL_PORT=${MYSQL_PORT:-3306}
|
||||||
|
MYSQL_DATABASE=${MYSQL_DATABASE:-compass}
|
||||||
|
# first time set mysql password
|
||||||
|
sudo mysqladmin -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u ${MYSQL_USER} -p"${MYSQL_OLD_PASSWORD}" password ${MYSQL_PASSWORD}
|
||||||
|
if [[ "$?" != "0" ]]; then
|
||||||
|
echo "setting up mysql initial password"
|
||||||
|
sudo mysqladmin -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u ${MYSQL_USER} password ${MYSQL_PASSWORD}
|
||||||
|
fi
|
||||||
|
mysql -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} -e "show databases;"
|
||||||
|
if [[ "$?" != "0" ]]; then
|
||||||
|
echo "mysql password set failed"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "mysql password set succeeded"
|
||||||
|
fi
|
||||||
|
|
||||||
MYSQL_USER=${MYSQL_USER:-root}
|
sudo mysql -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} -e "drop database ${MYSQL_DATABASE}"
|
||||||
MYSQL_OLD_PASSWORD=${MYSQL_OLD_PASSWORD:-root}
|
sudo mysql -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} -e "create database ${MYSQL_DATABASE}"
|
||||||
MYSQL_PASSWORD=${MYSQL_PASSWORD:-root}
|
if [[ "$?" != "0" ]]; then
|
||||||
MYSQL_SERVER=${MYSQL_SERVER:-127.0.0.1}
|
echo "mysql database set failed"
|
||||||
MYSQL_PORT=${MYSQL_PORT:-3306}
|
exit 1
|
||||||
MYSQL_DATABASE=${MYSQL_DATABASE:-compass}
|
fi
|
||||||
# first time set mysql password
|
|
||||||
sudo mysqladmin -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u ${MYSQL_USER} -p"${MYSQL_OLD_PASSWORD}" password ${MYSQL_PASSWORD}
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "setting up mysql initial password"
|
|
||||||
sudo mysqladmin -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u ${MYSQL_USER} password ${MYSQL_PASSWORD}
|
|
||||||
fi
|
|
||||||
mysql -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} -e "show databases;"
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "mysql password set failed"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "mysql password set succeeded"
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo mysql -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} -e "drop database ${MYSQL_DATABASE}"
|
sudo systemctl restart mysql.service
|
||||||
sudo mysql -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} -e "create database ${MYSQL_DATABASE}"
|
sudo systemctl status mysql.service
|
||||||
if [[ "$?" != "0" ]]; then
|
if [[ "$?" != "0" ]]; then
|
||||||
echo "mysql database set failed"
|
echo "mysqld is not started"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
sudo systemctl restart mysql.service
|
sudo systemctl restart rabbitmq-server.service
|
||||||
sudo systemctl status mysql.service
|
sudo systemctl status rabbitmq-server.service
|
||||||
if [[ "$?" != "0" ]]; then
|
if [[ "$?" != "0" ]]; then
|
||||||
echo "mysqld is not started"
|
echo "rabbitmq-server is not started"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
|
||||||
|
|
||||||
sudo systemctl restart rabbitmq-server.service
|
|
||||||
sudo systemctl status rabbitmq-server.service
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "rabbitmq-server is not started"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd $SCRIPT_DIR
|
cd $SCRIPT_DIR
|
||||||
@ -241,7 +238,6 @@ if [[ "$?" != "0" ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
pip install -U -r test-requirements.txt
|
pip install -U -r test-requirements.txt
|
||||||
pip install -U boto
|
|
||||||
if [[ "$?" != "0" ]]; then
|
if [[ "$?" != "0" ]]; then
|
||||||
echo "failed to install compass test requiremnts"
|
echo "failed to install compass test requiremnts"
|
||||||
deactivate
|
deactivate
|
||||||
|
@ -1,25 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
#set -x
|
|
||||||
### Register current user to compass
|
|
||||||
source install.conf
|
|
||||||
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
|
||||||
echo "The email address you use to register is ${USER_EMAIL}"
|
|
||||||
password=`< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c6`
|
|
||||||
USER_PASSWORD="${USER_PASSWORD:-$password}"
|
|
||||||
data=`echo "{\"email\":\"${USER_EMAIL}\",\"password\":\"${USER_PASSWORD}\"}"`
|
|
||||||
if [ "$FULL_COMPASS_SERVER" == "true" ]; then
|
|
||||||
COMPASS_API_SERVER="127.0.0.1"
|
|
||||||
fi
|
|
||||||
wget -O /tmp/user_info --post-data=$data --header=Content-Type:application/json "http://$COMPASS_API_SERVER/api/users/register"
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Register failed"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Register suceeded, your password is $USER_PASSWORD, please remember your password at all times."
|
|
||||||
sudo sed -i 's/^CELERY_DEFAULT_QUEUE.*/CELERY_DEFAULT_QUEUE = \"'"${USER_EMAIL}"'\"/g' /etc/compass/celeryconfig
|
|
||||||
sudo sed -i 's/^CELERY_DEFAULT_EXCHANGE.*/CELERY_DEFAULT_EXCHANGE = \"'"${USER_EMAIL}"'\"/g' /etc/compass/celeryconfig
|
|
||||||
sudo sed -i 's/^CELERY_DEFAULT_ROUTING_KEY.*/CELERY_DEFAULT_ROUTING_KEY = \"'"${USER_EMAIL}"'\"/g' /etc/compass/celeryconfig
|
|
||||||
|
|
||||||
systemctl restart compass-celeryd.service
|
|
@ -1,25 +0,0 @@
|
|||||||
#############################################
|
|
||||||
# Config File for Standalone Compass Server #
|
|
||||||
#############################################
|
|
||||||
export REMI="http://rpms.famillecollet.com/enterprise/remi-release-7.rpm"
|
|
||||||
|
|
||||||
export EPEL="https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm"
|
|
||||||
|
|
||||||
export MYSQL="http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm"
|
|
||||||
|
|
||||||
# Compass web related variables
|
|
||||||
export WEB_HOME="/tmp/web"
|
|
||||||
export WEB_SOURCE=
|
|
||||||
|
|
||||||
#set compass environment
|
|
||||||
export SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
|
||||||
export COMPASSDIR=${SCRIPT_DIR}/..
|
|
||||||
export TESTMODE="False"
|
|
||||||
|
|
||||||
# Set Local Repo sources
|
|
||||||
export LOCAL_REPO_US="https://s3-us-west-1.amazonaws.com/compass-local-repo"
|
|
||||||
export LOCAL_REPO_HUAWEI="http://huawei-repo.uubright.com/repos/compass"
|
|
||||||
|
|
||||||
# package hosting sources
|
|
||||||
export PIP_PACKAGES="http://205.177.226.237:9999/pip.tar.gz"
|
|
||||||
export EXTRA_PACKAGES="http://205.177.226.237:9999/packages.tar.gz"
|
|
@ -1,461 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# set -x
|
|
||||||
### Log the script outputs locally
|
|
||||||
exec > >(sudo tee standalone_server.log)
|
|
||||||
exec 2>&1
|
|
||||||
|
|
||||||
### Lock to prevent running multiple instances of the script.
|
|
||||||
LOCKFILE="/tmp/`basename $0`"
|
|
||||||
LOCKFD=99
|
|
||||||
|
|
||||||
if [ -f $LOCKFILE ]; then
|
|
||||||
LOCKED_PID=$(cat $LOCKFILE | head -n 1)
|
|
||||||
ps -p $LOCKED_PID &> /dev/null
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "the progress of pid $LOCKED_PID does not exist: `ps -p $LOCKED_PID`"
|
|
||||||
rm -f $LOCKFILE
|
|
||||||
else
|
|
||||||
echo "the progress of pid $LOCKED_PID is running: `ps -p $LOCKED_PID`"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "$LOCKFILE does not exist"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# PRIVATE
|
|
||||||
_lock()
|
|
||||||
{
|
|
||||||
echo "lock $LOCKFILE"
|
|
||||||
flock -$1 $LOCKFD
|
|
||||||
pid=$$
|
|
||||||
echo $pid 1>& $LOCKFD
|
|
||||||
}
|
|
||||||
|
|
||||||
_no_more_locking()
|
|
||||||
{
|
|
||||||
_lock u
|
|
||||||
_lock xn && rm -f $LOCKFILE
|
|
||||||
}
|
|
||||||
|
|
||||||
_prepare_locking()
|
|
||||||
{
|
|
||||||
eval "exec $LOCKFD>\"$LOCKFILE\""
|
|
||||||
trap _no_more_locking EXIT
|
|
||||||
}
|
|
||||||
|
|
||||||
# ON START
|
|
||||||
_prepare_locking
|
|
||||||
|
|
||||||
# PUBLIC
|
|
||||||
|
|
||||||
exlock_now()
|
|
||||||
{
|
|
||||||
_lock xn || exit 1
|
|
||||||
} # obtain an exclusive lock immediately or fail
|
|
||||||
|
|
||||||
exlock_now
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "failed to acquire lock $LOCKFILE"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
### Script Begins Here
|
|
||||||
|
|
||||||
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
|
||||||
|
|
||||||
### Load config
|
|
||||||
source $DIR/standalone_server.conf
|
|
||||||
### Load functions
|
|
||||||
source $DIR/install_func.sh
|
|
||||||
|
|
||||||
### Change selinux security policy
|
|
||||||
sudo setenforce 0
|
|
||||||
sudo sed -i 's/enforcing/disabled/g' /etc/selinux/config
|
|
||||||
|
|
||||||
### Add epel repo
|
|
||||||
sudo rpm -q epel-release
|
|
||||||
if [ "$?" != "0" ]; then
|
|
||||||
sudo rpm -Uvh $EPEL
|
|
||||||
if [ "$?" != "0" ]; then
|
|
||||||
echo "failed to install epel-release"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "sucessfaully installed epel-release"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "epel-release is already installed"
|
|
||||||
fi
|
|
||||||
|
|
||||||
sed -i 's/^mirrorlist=https/mirrorlist=http/g' /etc/yum.repos.d/epel.repo
|
|
||||||
|
|
||||||
sudo rpm -q atomic-release
|
|
||||||
if [ "$?" == "0" ]; then
|
|
||||||
sudo rpm -e atomic-release
|
|
||||||
fi
|
|
||||||
|
|
||||||
### Add remi repo
|
|
||||||
sudo rpm -q remi-release
|
|
||||||
if [ "$?" != "0" ]; then
|
|
||||||
sudo rpm -Uvh $REMI >& /dev/null
|
|
||||||
if [ "$?" != "0" ]; then
|
|
||||||
echo "failed to install remi-release"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "successfully installed remi-release"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "remi-release is already installed"
|
|
||||||
fi
|
|
||||||
|
|
||||||
### Trap any error code with related filename and line.
|
|
||||||
errtrap()
|
|
||||||
{
|
|
||||||
FILE=${BASH_SOURCE[1]:-$BASH_SOURCE[0]}
|
|
||||||
echo "[FILE: "$(basename $FILE)", LINE: $1] Error: Command or function exited with status $2"
|
|
||||||
}
|
|
||||||
|
|
||||||
if [[ "$-" == *x* ]]; then
|
|
||||||
trap 'errtrap $LINENO $?' ERR
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo yum -y install figlet >& /dev/null
|
|
||||||
figlet -ctf slant Compass Standalone Web-Server
|
|
||||||
|
|
||||||
sudo yum update -y
|
|
||||||
|
|
||||||
# assign all necessary values.
|
|
||||||
export WEB_SOURCE=${WEB_SOURCE:-"http://git.openstack.org/openstack/compass-web"}
|
|
||||||
|
|
||||||
rm -rf /etc/yum.repos.d/compass_install.repo 2>/dev/nullcp
|
|
||||||
cp ${COMPASSDIR}/misc/compass_install.repo /etc/yum.repos.d/
|
|
||||||
|
|
||||||
# Start: install required packages and dependencies
|
|
||||||
sudo yum --enablerepo=compass_install install -y $MYSQL
|
|
||||||
sudo yum --enablerepo=compass_install --nogpgcheck install -y rsyslog logrotate ntp python python-devel git wget syslinux amqp mod_wsgi httpd bind rsync yum-utils gcc unzip openssl openssl098e ca-certificates mysql-devel mysql-server mysql MySQL-python python-virtualenv python-setuptools python-pip bc libselinux-python libffi-devel openssl-devel rabbitmq-server
|
|
||||||
sudo yum --setopt=tsflags=noscripts -y remove redis
|
|
||||||
sudo yum --enablerepo=compass_install --nogpgcheck install -y redis
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "failed to install yum dependency"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# sync system time
|
|
||||||
sudo service ntpd stop
|
|
||||||
ntpdate 0.centos.pool.ntp.org
|
|
||||||
sudo service ntpd start
|
|
||||||
sudo sleep 10
|
|
||||||
sudo service ntpd status
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "ntpd is not started"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Disable firewalld
|
|
||||||
sudo systemctl stop firewalld.service
|
|
||||||
|
|
||||||
sudo easy_install --upgrade pip
|
|
||||||
sudo pip install --upgrade pip
|
|
||||||
sudo pip install --upgrade setuptools
|
|
||||||
sudo pip install --upgrade virtualenv
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "failed to install easy install and pip."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo pip install virtualenvwrapper
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "failed to install virtualenvwrapper"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo systemctl enable httpd.service
|
|
||||||
sudo systemctl enable sshd.service
|
|
||||||
sudo systemctl enable rsyslog.service
|
|
||||||
sudo systemctl enable ntpd.service
|
|
||||||
sudo systemctl enable redis.service
|
|
||||||
sudo systemctl enable mysqld.service
|
|
||||||
sudo systemctl enable rabbitmq.service
|
|
||||||
# Finish: dependency and package install finished.
|
|
||||||
|
|
||||||
# Start: prepare installation
|
|
||||||
|
|
||||||
# Crate backup dir
|
|
||||||
sudo mkdir -p /root/backup
|
|
||||||
|
|
||||||
# update logrotate.d
|
|
||||||
echo "update logrotate config"
|
|
||||||
sudo cp -rn /etc/logrotate.d /root/backup/
|
|
||||||
rm -f /etc/logrotate.d/*
|
|
||||||
sudo cp -rf $COMPASSDIR/misc/logrotate.d/* /etc/logrotate.d/
|
|
||||||
sudo chmod 644 /etc/logrotate.d/*
|
|
||||||
|
|
||||||
# update ntp conf
|
|
||||||
echo "update ntp config"
|
|
||||||
sudo cp -rn /etc/ntp.conf /root/backup/
|
|
||||||
sudo rm -f /etc/ntp.conf
|
|
||||||
sudo cp -rf $COMPASSDIR/misc/ntp/ntp.conf /etc/ntp.conf
|
|
||||||
sudo chmod 644 /etc/ntp.conf
|
|
||||||
sudo systemctl stop ntpd.service
|
|
||||||
sudo ntpdate 0.centos.pool.ntp.org
|
|
||||||
sudo systemctl start ntpd.service
|
|
||||||
sudo sleep 10
|
|
||||||
sudo systemctl status ntpd.service
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "ntp is not started"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "ntp conf is updated"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# update httpd
|
|
||||||
echo "update httpd"
|
|
||||||
mkdir -p /var/log/httpd
|
|
||||||
chmod -R 777 /var/log/httpd
|
|
||||||
|
|
||||||
systemctl restart httpd.service
|
|
||||||
sudo sleep 10
|
|
||||||
systemctl status httpd.service
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "httpd is not started"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "httpd conf is updated"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# update mysqld
|
|
||||||
echo "update mysqld"
|
|
||||||
mkdir -p /var/log/mysql
|
|
||||||
chmod -R 777 /var/log/mysql
|
|
||||||
sleep 10
|
|
||||||
sudo systemctl restart mysql.service
|
|
||||||
sudo sleep 10
|
|
||||||
sudo systemctl status mysql.service
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "failed to restart mysqld"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "mysqld restarted"
|
|
||||||
fi
|
|
||||||
MYSQL_USER=${MYSQL_USER:-root}
|
|
||||||
MYSQL_OLD_PASSWORD=${MYSQL_OLD_PASSWORD:-root}
|
|
||||||
MYSQL_PASSWORD=${MYSQL_PASSWORD:-root}
|
|
||||||
MYSQL_SERVER=${MYSQL_SERVER:-127.0.0.1}
|
|
||||||
MYSQL_PORT=${MYSQL_PORT:-3306}
|
|
||||||
MYSQL_DATABASE=${MYSQL_DATABASE:-compass}
|
|
||||||
# first time set mysql password
|
|
||||||
sudo mysqladmin -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u ${MYSQL_USER} -p"${MYSQL_OLD_PASSWORD}" password ${MYSQL_PASSWORD}
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "setting up mysql initial password"
|
|
||||||
sudo mysqladmin -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u ${MYSQL_USER} password ${MYSQL_PASSWORD}
|
|
||||||
fi
|
|
||||||
mysql -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} -e "show databases;"
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "mysql password set failed"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "mysql password set succeeded"
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo mysql -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} -e "drop database ${MYSQL_DATABASE}"
|
|
||||||
sudo mysql -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} -e "create database ${MYSQL_DATABASE}"
|
|
||||||
sudo mysql -h${MYSQL_SERVER} --port=${MYSQL_PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} -e "GRANT ALL PRIVILEGES ON *.* TO '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD'"
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "mysql database set failed"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo systemctl restart mysql.service
|
|
||||||
sudo systemctl status mysql.service
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "mysqld is not started"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo systemctl restart rabbitmq-server.service
|
|
||||||
sudo systemctl status rabbitmq-server.service
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "rabbitmq-server is not started"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get websource now
|
|
||||||
if [ -z $WEB_SOURCE ]; then
|
|
||||||
echo "web source $WEB_SOURCE is not set"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
copy2dir "$WEB_SOURCE" "$WEB_HOME" || exit $?
|
|
||||||
|
|
||||||
# Set up virtualenv
|
|
||||||
source `which virtualenvwrapper.sh`
|
|
||||||
if ! lsvirtualenv |grep compass-core>/dev/null; then
|
|
||||||
mkvirtualenv --system-site-packages compass-core
|
|
||||||
fi
|
|
||||||
cd $COMPASSDIR
|
|
||||||
workon compass-core
|
|
||||||
easy_install --upgrade pip
|
|
||||||
rm -rf ${WORKON_HOME}/compass-core/build
|
|
||||||
echo "install compass requirements"
|
|
||||||
pip install -U -r requirements.txt
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "failed to install compass requiremnts"
|
|
||||||
deactivate
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
pip install -U boto
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "failed to install compass test requiremnts"
|
|
||||||
deactivate
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "intall compass requirements succeeded"
|
|
||||||
deactivate
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Setup compass web components
|
|
||||||
sudo mkdir -p /var/www/compass_web
|
|
||||||
sudo rm -rf /var/www/compass_web/*
|
|
||||||
|
|
||||||
sudo mkdir -p /var/www/compass_web/v2.5
|
|
||||||
sudo cp -rf $WEB_HOME/v2.5/target/* /var/www/compass_web/v2.5/
|
|
||||||
|
|
||||||
sudo systemctl restart httpd.service
|
|
||||||
sleep 10
|
|
||||||
|
|
||||||
echo "Checking if httpd is running"
|
|
||||||
sudo systemctl status httpd.service
|
|
||||||
if [[ "$?" == "0" ]]; then
|
|
||||||
echo "httpd is running"
|
|
||||||
else
|
|
||||||
echo "httpd is not running"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Setup compass server
|
|
||||||
cd $SCRIPT_DIR
|
|
||||||
|
|
||||||
sudo mkdir -p /etc/compass
|
|
||||||
sudo rm -rf /etc/compass/*
|
|
||||||
sudo mkdir -p /opt/compass/bin
|
|
||||||
sudo rm -rf /opt/compass/bin/*
|
|
||||||
sudo mkdir -p /var/log/compass
|
|
||||||
sudo rm -rf /var/log/compass/*
|
|
||||||
sudo mkdir -p /var/www/compass
|
|
||||||
sudo rm -rf /var/www/compass/*
|
|
||||||
|
|
||||||
sudo cp -rf $COMPASSDIR/misc/apache/ods-server.conf /etc/httpd/conf.d/ods-server.conf
|
|
||||||
sudo cp -rf $COMPASSDIR/misc/apache/http_pip.conf /etc/httpd/conf.d/http_pip.conf
|
|
||||||
sudo cp -rf $COMPASSDIR/misc/apache/images.conf /etc/httpd/conf.d/images.conf
|
|
||||||
sudo cp -rf $COMPASSDIR/misc/apache/packages.conf /etc/httpd/conf.d/packages.conf
|
|
||||||
sudo cp -rf $COMPASSDIR/conf/* /etc/compass/
|
|
||||||
sudo cp -rf $COMPASSDIR/service/* /etc/init.d/
|
|
||||||
sudo cp -rf $COMPASSDIR/bin/*.py /opt/compass/bin/
|
|
||||||
sudo cp -rf $COMPASSDIR/bin/*.sh /opt/compass/bin/
|
|
||||||
sudo cp -rf $COMPASSDIR/bin/ansible_callbacks /opt/compass/bin/
|
|
||||||
sudo cp -rf $COMPASSDIR/bin/compassd /usr/bin/
|
|
||||||
sudo cp -rf $COMPASSDIR/bin/switch_virtualenv.py.template /opt/compass/bin/switch_virtualenv.py
|
|
||||||
sudo ln -s -f /opt/compass/bin/compass_check.py /usr/bin/compass
|
|
||||||
sudo ln -s -f /opt/compass/bin/compass_wsgi.py /var/www/compass/compass.wsgi
|
|
||||||
sudo cp -rf $COMPASSDIR/bin/chef/* /opt/compass/bin/
|
|
||||||
sudo cp -rf $COMPASSDIR/bin/cobbler/* /opt/compass/bin/
|
|
||||||
|
|
||||||
sudo usermod -a -G `groups $USER|awk '{print$3}'` apache
|
|
||||||
|
|
||||||
# setup ods server
|
|
||||||
if [ ! -f /usr/lib64/libcrypto.so ]; then
|
|
||||||
sudo cp -rf /usr/lib64/libcrypto.so.6 /usr/lib64/libcrypto.so
|
|
||||||
fi
|
|
||||||
|
|
||||||
download -u "$PIP_PACKAGES" `basename $PIP_PACKAGES` unzip /var/www/ || exit $?
|
|
||||||
download -u "$EXTRA_PACKAGES" `basename $EXTRA_PACKAGES` unzip /var/www/ || exit $?
|
|
||||||
|
|
||||||
sudo mkdir -p /opt/compass/db
|
|
||||||
sudo chmod -R 777 /opt/compass/db
|
|
||||||
sudo chmod -R 777 /var/log/compass
|
|
||||||
sudo chmod -R 777 /var/log/chef
|
|
||||||
sudo echo "export C_FORCE_ROOT=1" > /etc/profile.d/celery_env.sh
|
|
||||||
sudo chmod +x /etc/profile.d/celery_env.sh
|
|
||||||
source `which virtualenvwrapper.sh`
|
|
||||||
if ! lsvirtualenv |grep compass-core>/dev/null; then
|
|
||||||
mkvirtualenv --system-site-packages compass-core
|
|
||||||
fi
|
|
||||||
cd $COMPASSDIR
|
|
||||||
workon compass-core
|
|
||||||
|
|
||||||
python setup.py install
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "failed to install compass package"
|
|
||||||
deactivate
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "compass package is installed in virtualenv under current dir"
|
|
||||||
fi
|
|
||||||
|
|
||||||
udo sed -i "s/\$ipaddr/$IPADDR/g" /etc/compass/setting
|
|
||||||
sudo sed -i "s/\$hostname/$HOSTNAME/g" /etc/compass/setting
|
|
||||||
sudo sed -i "s/\$gateway/$OPTION_ROUTER/g" /etc/compass/setting
|
|
||||||
domains=$(echo $NAMESERVER_DOMAINS | sed "s/,/','/g")
|
|
||||||
sudo sed -i "s/\$domains/$domains/g" /etc/compass/setting
|
|
||||||
sudo sed -i "/DATABASE_SERVER =/c\DATABASE_SERVER = '127.0.0.1:3306'" /etc/compass/setting
|
|
||||||
sudo sed -i "s|\$PythonHome|$VIRTUAL_ENV|g" /opt/compass/bin/switch_virtualenv.py
|
|
||||||
sudo ln -s -f $VIRTUAL_ENV/bin/celery /opt/compass/bin/celery
|
|
||||||
|
|
||||||
deactivate
|
|
||||||
|
|
||||||
sudo mkdir -p /var/log/redis
|
|
||||||
sudo chown -R redis:root /var/log/redis
|
|
||||||
sudo mkdir -p /var/lib/redis/
|
|
||||||
sudo rm -rf /var/lib/redis/*
|
|
||||||
sudo chown -R redis:root /var/lib/redis
|
|
||||||
sudo mkdir -p /var/run/redis
|
|
||||||
sudo chown -R redis:root /var/run/redis
|
|
||||||
sudo mkdir -p /var/lib/redis
|
|
||||||
sudo chown -R redis:root /var/lib/redis
|
|
||||||
sudo rm -rf /var/lib/redis/dump.rdb
|
|
||||||
sudo systemctl kill redis-server
|
|
||||||
sudo rm -rf /var/run/redis/redis.pid
|
|
||||||
sudo systemctl restart redis.service
|
|
||||||
sleep 10
|
|
||||||
echo "Checking if redis is running"
|
|
||||||
sudo systemctl status redis.service
|
|
||||||
if [[ "$?" == "0" ]]; then
|
|
||||||
echo "redis is running"
|
|
||||||
else
|
|
||||||
ps -ef | grep redis
|
|
||||||
echo "redis is not running"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
sudo mv /etc/compass/celeryconfig_local /etc/compass/celeryconfig
|
|
||||||
/opt/compass/bin/refresh.sh
|
|
||||||
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "failed to refresh compassd service"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "compassed service is refreshed"
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo systemctl status httpd.service
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "httpd is not started"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "httpd has already started"
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo systemctl status redis.service |grep running
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "redis is not started"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "redis has already started"
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo systemctl status mysql.service |grep running
|
|
||||||
if [[ "$?" != "0" ]]; then
|
|
||||||
echo "mysqld is not started"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
figlet -ctf slant Standalone Server Installation Complete!
|
|
||||||
echo -e "It takes\x1b[32m $SECONDS \x1b[0mseconds during the installation."
|
|
@ -1,18 +0,0 @@
|
|||||||
#############################################################################
|
|
||||||
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
|
|
||||||
#
|
|
||||||
# All rights reserved. This program and the accompanying materials
|
|
||||||
# are made available under the terms of the Apache License, Version 2.0
|
|
||||||
# which accompanies this distribution, and is available at
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#############################################################################
|
|
||||||
---
|
|
||||||
|
|
||||||
cron_path: "/var/spool/cron/crontabs"
|
|
||||||
|
|
||||||
packages:
|
|
||||||
- keystone
|
|
||||||
|
|
||||||
services:
|
|
||||||
- apache2
|
|
||||||
- keystone
|
|
@ -1,239 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: all
|
|
||||||
remote_user: root
|
|
||||||
pre_tasks:
|
|
||||||
- name: make sure ssh dir exist
|
|
||||||
file:
|
|
||||||
path: '{{ item.path }}'
|
|
||||||
owner: '{{ item.owner }}'
|
|
||||||
group: '{{ item.group }}'
|
|
||||||
state: directory
|
|
||||||
mode: 0755
|
|
||||||
with_items:
|
|
||||||
- path: /root/.ssh
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
|
|
||||||
- name: write ssh config
|
|
||||||
copy:
|
|
||||||
content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no"
|
|
||||||
dest: '{{ item.dest }}'
|
|
||||||
owner: '{{ item.owner }}'
|
|
||||||
group: '{{ item.group }}'
|
|
||||||
mode: 0600
|
|
||||||
with_items:
|
|
||||||
- dest: /root/.ssh/config
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
|
|
||||||
- name: generate ssh keys
|
|
||||||
shell: if [ ! -f ~/.ssh/id_rsa.pub ]; then ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N ""; else echo "already gen ssh key!"; fi;
|
|
||||||
|
|
||||||
- name: fetch ssh keys
|
|
||||||
fetch: src=/root/.ssh/id_rsa.pub dest=/tmp/ssh-keys-{{ ansible_hostname }} flat=yes
|
|
||||||
|
|
||||||
- authorized_key:
|
|
||||||
user: root
|
|
||||||
key: "{{ lookup('file', 'item') }}"
|
|
||||||
with_fileglob:
|
|
||||||
- /tmp/ssh-keys-*
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- common
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
remote_user: root
|
|
||||||
accelerate: false
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- setup-network
|
|
||||||
|
|
||||||
- hosts: ha
|
|
||||||
remote_user: root
|
|
||||||
accelerate: false
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- ha
|
|
||||||
|
|
||||||
- hosts: controller
|
|
||||||
remote_user: root
|
|
||||||
accelerate: false
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- memcached
|
|
||||||
- apache
|
|
||||||
- database
|
|
||||||
- mq
|
|
||||||
- keystone
|
|
||||||
- nova-controller
|
|
||||||
- neutron-controller
|
|
||||||
- cinder-controller
|
|
||||||
- glance
|
|
||||||
- neutron-common
|
|
||||||
- neutron-network
|
|
||||||
- ceilometer_controller
|
|
||||||
# - ext-network
|
|
||||||
- dashboard
|
|
||||||
- heat
|
|
||||||
- aodh
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
remote_user: root
|
|
||||||
accelerate: false
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- storage
|
|
||||||
|
|
||||||
- hosts: compute
|
|
||||||
remote_user: root
|
|
||||||
accelerate: false
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- nova-compute
|
|
||||||
- neutron-compute
|
|
||||||
- cinder-volume
|
|
||||||
- ceilometer_compute
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
remote_user: root
|
|
||||||
accelerate: false
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- secgroup
|
|
||||||
|
|
||||||
- hosts: ceph_adm
|
|
||||||
remote_user: root
|
|
||||||
accelerate: false
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles: []
|
|
||||||
# - ceph-deploy
|
|
||||||
|
|
||||||
- hosts: ceph
|
|
||||||
remote_user: root
|
|
||||||
accelerate: false
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- ceph-purge
|
|
||||||
- ceph-config
|
|
||||||
|
|
||||||
- hosts: ceph_mon
|
|
||||||
remote_user: root
|
|
||||||
accelerate: false
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- ceph-mon
|
|
||||||
|
|
||||||
- hosts: ceph_osd
|
|
||||||
remote_user: root
|
|
||||||
accelerate: false
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- ceph-osd
|
|
||||||
|
|
||||||
- hosts: ceph
|
|
||||||
remote_user: root
|
|
||||||
accelerate: false
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- ceph-openstack
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
remote_user: root
|
|
||||||
accelerate: false
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- monitor
|
|
||||||
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
remote_user: root
|
|
||||||
accelerate: false
|
|
||||||
max_fail_percentage: 0
|
|
||||||
tasks:
|
|
||||||
- name: set bash to nova
|
|
||||||
user:
|
|
||||||
name: nova
|
|
||||||
shell: /bin/bash
|
|
||||||
|
|
||||||
- name: make sure ssh dir exist
|
|
||||||
file:
|
|
||||||
path: '{{ item.path }}'
|
|
||||||
owner: '{{ item.owner }}'
|
|
||||||
group: '{{ item.group }}'
|
|
||||||
state: directory
|
|
||||||
mode: 0755
|
|
||||||
with_items:
|
|
||||||
- path: /var/lib/nova/.ssh
|
|
||||||
owner: nova
|
|
||||||
group: nova
|
|
||||||
|
|
||||||
- name: copy ssh keys for nova
|
|
||||||
shell: cp -rf /root/.ssh/id_rsa /var/lib/nova/.ssh;
|
|
||||||
|
|
||||||
- name: write ssh config
|
|
||||||
copy:
|
|
||||||
content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no"
|
|
||||||
dest: '{{ item.dest }}'
|
|
||||||
owner: '{{ item.owner }}'
|
|
||||||
group: '{{ item.group }}'
|
|
||||||
mode: 0600
|
|
||||||
with_items:
|
|
||||||
- dest: /var/lib/nova/.ssh/config
|
|
||||||
owner: nova
|
|
||||||
group: nova
|
|
||||||
|
|
||||||
- authorized_key:
|
|
||||||
user: nova
|
|
||||||
key: "{{ lookup('file', 'item') }}"
|
|
||||||
with_fileglob:
|
|
||||||
- /tmp/ssh-keys-*
|
|
||||||
|
|
||||||
- name: chown ssh file
|
|
||||||
shell: chown -R nova:nova /var/lib/nova/.ssh;
|
|
||||||
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
remote_user: root
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- odl_cluster
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
remote_user: root
|
|
||||||
accelerate: false
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- onos_cluster
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
remote_user: root
|
|
||||||
sudo: True
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- open-contrail
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
remote_user: root
|
|
||||||
serial: 1
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- odl_cluster_neutron
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
remote_user: root
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- odl_cluster_post
|
|
||||||
|
|
||||||
- hosts: controller
|
|
||||||
remote_user: root
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- ext-network
|
|
||||||
|
|
||||||
- hosts: controller
|
|
||||||
remote_user: root
|
|
||||||
accelerate: false
|
|
||||||
max_fail_percentage: 0
|
|
||||||
roles:
|
|
||||||
- tacker
|
|
@ -1,74 +0,0 @@
|
|||||||
##############################################################################
|
|
||||||
# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
|
|
||||||
#
|
|
||||||
# All rights reserved. This program and the accompanying materials
|
|
||||||
# are made available under the terms of the Apache License, Version 2.0
|
|
||||||
# which accompanies this distribution, and is available at
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
##############################################################################
|
|
||||||
---
|
|
||||||
- include_vars: "{{ ansible_os_family }}.yml"
|
|
||||||
|
|
||||||
- name: disable auto start
|
|
||||||
copy:
|
|
||||||
content: "#!/bin/sh\nexit 101"
|
|
||||||
dest: "/usr/sbin/policy-rc.d"
|
|
||||||
mode: 0755
|
|
||||||
when: ansible_os_family == "Debian"
|
|
||||||
|
|
||||||
- name: install keystone packages
|
|
||||||
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
|
|
||||||
with_items: packages | union(packages_noarch)
|
|
||||||
|
|
||||||
- name: enable auto start
|
|
||||||
file:
|
|
||||||
path=/usr/sbin/policy-rc.d
|
|
||||||
state=absent
|
|
||||||
when: ansible_os_family == "Debian"
|
|
||||||
|
|
||||||
- name: generate keystone service list
|
|
||||||
lineinfile: dest=/opt/service create=yes line='{{ item }}'
|
|
||||||
with_items: services | union(services_noarch)
|
|
||||||
|
|
||||||
- name: delete sqlite database
|
|
||||||
file:
|
|
||||||
path: /var/lib/keystone/keystone.db
|
|
||||||
state: absent
|
|
||||||
|
|
||||||
- name: update keystone conf
|
|
||||||
template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
|
|
||||||
notify:
|
|
||||||
- restart keystone services
|
|
||||||
|
|
||||||
- name: update apache2 configs
|
|
||||||
template:
|
|
||||||
src: wsgi-keystone.conf.j2
|
|
||||||
dest: '{{ apache_config_dir }}/sites-available/wsgi-keystone.conf'
|
|
||||||
when: ansible_os_family == 'Debian'
|
|
||||||
notify:
|
|
||||||
- restart keystone services
|
|
||||||
|
|
||||||
- name: update apache2 configs
|
|
||||||
template:
|
|
||||||
src: wsgi-keystone.conf.j2
|
|
||||||
dest: '{{ apache_config_dir }}/wsgi-keystone.conf'
|
|
||||||
when: ansible_os_family == 'RedHat'
|
|
||||||
notify:
|
|
||||||
- restart keystone services
|
|
||||||
|
|
||||||
- name: enable keystone server
|
|
||||||
file:
|
|
||||||
src: "{{ apache_config_dir }}/sites-available/wsgi-keystone.conf"
|
|
||||||
dest: "{{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf"
|
|
||||||
state: "link"
|
|
||||||
when: ansible_os_family == 'Debian'
|
|
||||||
notify:
|
|
||||||
- restart keystone services
|
|
||||||
|
|
||||||
- name: keystone source files
|
|
||||||
template: src={{ item }} dest=/opt/{{ item }}
|
|
||||||
with_items:
|
|
||||||
- admin-openrc.sh
|
|
||||||
- demo-openrc.sh
|
|
||||||
|
|
||||||
- meta: flush_handlers
|
|
@ -1,80 +0,0 @@
|
|||||||
#if $str($getVar('anamon_enabled','')) == "1"
|
|
||||||
|
|
||||||
## install anamon script
|
|
||||||
#if $getVar("compass_server", "") != ""
|
|
||||||
wget -O /usr/local/sbin/anamon "http://$compass_server:$http_port/cobbler/aux/anamon"
|
|
||||||
#else
|
|
||||||
wget -O /usr/local/sbin/anamon "http://$server:$http_port/cobbler/aux/anamon"
|
|
||||||
#end if
|
|
||||||
## install anamon system service
|
|
||||||
cat << EOF > /etc/init.d/anamon.init
|
|
||||||
#raw
|
|
||||||
#!/bin/bash
|
|
||||||
## BEGIN INIT INFO
|
|
||||||
# Provides: anamon.init
|
|
||||||
# Default-Start: 3 5
|
|
||||||
# Default-Stop: 0 1 2 4 6
|
|
||||||
# Required-Start: $network
|
|
||||||
# Short-Description: Starts the cobbler anamon boot notification program
|
|
||||||
# Description: anamon runs the first time a machine is booted after
|
|
||||||
# installation.
|
|
||||||
## END INIT INFO
|
|
||||||
|
|
||||||
#
|
|
||||||
# anamon.init: Starts the cobbler post-install boot notification program
|
|
||||||
#
|
|
||||||
# chkconfig: 35 95 95
|
|
||||||
#
|
|
||||||
# description: anamon runs the first time a machine is booted after
|
|
||||||
# installation.
|
|
||||||
#
|
|
||||||
#end raw
|
|
||||||
cd /var/log/installer
|
|
||||||
gunzip initial-status.gz
|
|
||||||
cd -
|
|
||||||
#if $getVar("compass_server","") != ""
|
|
||||||
/usr/local/sbin/anamon --watchfile "/var/log/installer/syslog /var/log/installer/hardware-summary /var/log/installer/initial-status /var/log/installer/status" --name $name --server $compass_server --port $http_port --exit
|
|
||||||
#else
|
|
||||||
/usr/local/sbin/anamon --watchfile "/var/log/installer/syslog /var/log/installer/hardware-summary /var/log/installer/initial-status /var/log/installer/status" --name $name --server $server --port $http_port --exit
|
|
||||||
#end if
|
|
||||||
update-rc.d -f anamon remove
|
|
||||||
mv /etc/init.d/anamon.init /tmp/anamon.init
|
|
||||||
EOF
|
|
||||||
|
|
||||||
## adjust permissions
|
|
||||||
chmod 755 /etc/init.d/anamon.init /usr/local/sbin/anamon
|
|
||||||
test -d /selinux && restorecon /etc/init.d/anamon.init /usr/local/sbin/anamon
|
|
||||||
|
|
||||||
## enable the script
|
|
||||||
update-rc.d anamon.init defaults 95 95
|
|
||||||
#end if
|
|
||||||
|
|
||||||
## place start-up script for updating os state
|
|
||||||
#if $getVar('compass_server', '') != ""
|
|
||||||
#set srv = $getVar('compass_server','')
|
|
||||||
#else
|
|
||||||
#set srv = $getVar('server','')
|
|
||||||
#end if
|
|
||||||
cat << EOF > /etc/init.d/set_state
|
|
||||||
#raw
|
|
||||||
#!/bin/bash
|
|
||||||
# Provides: set_state
|
|
||||||
# Default-Start: 3 5
|
|
||||||
# Default-Stop: 0 1 2 4 6
|
|
||||||
# Required-Start: $network $ssh
|
|
||||||
# Short-Description: Notifies the os installation is finished
|
|
||||||
# Description: set_state runs the first time a machine is booted after
|
|
||||||
# installation.
|
|
||||||
#end raw
|
|
||||||
wget -O /tmp/os_state --post-data='{"ready": true}' --header=Content-Type:application/json "http://$srv/api/hosts/${host_id}/state_internal"
|
|
||||||
update-rc.d -f set_state remove
|
|
||||||
mv /etc/init.d/set_state /tmp/set_state
|
|
||||||
EOF
|
|
||||||
|
|
||||||
## adjust permissions
|
|
||||||
chmod 755 /etc/init.d/set_state
|
|
||||||
test -d /selinux && restorecon /etc/init.d/set_state
|
|
||||||
|
|
||||||
update-rc.d set_state defaults 99 99
|
|
||||||
|
|
||||||
echo "compass_server=$server" >> /etc/compass.conf
|
|
@ -1,80 +0,0 @@
|
|||||||
#if $str($getVar('anamon_enabled','')) == "1"
|
|
||||||
|
|
||||||
## install anamon script
|
|
||||||
#if $getVar("compass_server", "") != ""
|
|
||||||
wget -O /usr/local/sbin/anamon "http://$compass_server:$http_port/cobbler/aux/anamon"
|
|
||||||
#else
|
|
||||||
wget -O /usr/local/sbin/anamon "http://$server:$http_port/cobbler/aux/anamon"
|
|
||||||
#end if
|
|
||||||
## install anamon system service
|
|
||||||
cat << EOF > /etc/init.d/anamon.init
|
|
||||||
#raw
|
|
||||||
#!/bin/bash
|
|
||||||
## BEGIN INIT INFO
|
|
||||||
# Provides: anamon.init
|
|
||||||
# Default-Start: 3 5
|
|
||||||
# Default-Stop: 0 1 2 4 6
|
|
||||||
# Required-Start: $network
|
|
||||||
# Short-Description: Starts the cobbler anamon boot notification program
|
|
||||||
# Description: anamon runs the first time a machine is booted after
|
|
||||||
# installation.
|
|
||||||
## END INIT INFO
|
|
||||||
|
|
||||||
#
|
|
||||||
# anamon.init: Starts the cobbler post-install boot notification program
|
|
||||||
#
|
|
||||||
# chkconfig: 35 95 95
|
|
||||||
#
|
|
||||||
# description: anamon runs the first time a machine is booted after
|
|
||||||
# installation.
|
|
||||||
#
|
|
||||||
#end raw
|
|
||||||
cd /var/log/installer
|
|
||||||
gunzip initial-status.gz
|
|
||||||
cd -
|
|
||||||
#if $getVar("compass_server","") != ""
|
|
||||||
/usr/local/sbin/anamon --watchfile "/var/log/installer/syslog /var/log/installer/hardware-summary /var/log/installer/initial-status /var/log/installer/status" --name $name --server $compass_server --port $http_port --exit
|
|
||||||
#else
|
|
||||||
/usr/local/sbin/anamon --watchfile "/var/log/installer/syslog /var/log/installer/hardware-summary /var/log/installer/initial-status /var/log/installer/status" --name $name --server $server --port $http_port --exit
|
|
||||||
#end if
|
|
||||||
update-rc.d -f anamon remove
|
|
||||||
mv /etc/init.d/anamon.init /tmp/anamon.init
|
|
||||||
EOF
|
|
||||||
|
|
||||||
## adjust permissions
|
|
||||||
chmod 755 /etc/init.d/anamon.init /usr/local/sbin/anamon
|
|
||||||
test -d /selinux && restorecon /etc/init.d/anamon.init /usr/local/sbin/anamon
|
|
||||||
|
|
||||||
## enable the script
|
|
||||||
update-rc.d anamon.init defaults 95 95
|
|
||||||
#end if
|
|
||||||
|
|
||||||
## place start-up script for updating os state
|
|
||||||
#if $getVar('compass_server', '') != ""
|
|
||||||
#set srv = $getVar('compass_server','')
|
|
||||||
#else
|
|
||||||
#set srv = $getVar('server','')
|
|
||||||
#end if
|
|
||||||
cat << EOF > /etc/init.d/set_state
|
|
||||||
#raw
|
|
||||||
#!/bin/bash
|
|
||||||
# Provides: set_state
|
|
||||||
# Default-Start: 3 5
|
|
||||||
# Default-Stop: 0 1 2 4 6
|
|
||||||
# Required-Start: $network $ssh
|
|
||||||
# Short-Description: Notifies the os installation is finished
|
|
||||||
# Description: set_state runs the first time a machine is booted after
|
|
||||||
# installation.
|
|
||||||
#end raw
|
|
||||||
wget -O /tmp/os_state --post-data='{"ready": true}' --header=Content-Type:application/json "http://c.stack360.io/api/hosts/${host_id}/state_internal"
|
|
||||||
update-rc.d -f set_state remove
|
|
||||||
mv /etc/init.d/set_state /tmp/set_state
|
|
||||||
EOF
|
|
||||||
|
|
||||||
## adjust permissions
|
|
||||||
chmod 755 /etc/init.d/set_state
|
|
||||||
test -d /selinux && restorecon /etc/init.d/set_state
|
|
||||||
|
|
||||||
update-rc.d set_state defaults 99 99
|
|
||||||
|
|
||||||
echo "compass_server=$server" >> /etc/compass.conf
|
|
@ -1,9 +0,0 @@
|
|||||||
Alias /image /var/www/guestimg
|
|
||||||
|
|
||||||
<Directory "/var/www/guestimg">
|
|
||||||
SetEnv VIRTUALENV
|
|
||||||
Options Indexes FollowSymLinks
|
|
||||||
Order allow,deny
|
|
||||||
Allow from all
|
|
||||||
</Directory>
|
|
||||||
|
|
@ -1,9 +0,0 @@
|
|||||||
Alias /packages /var/www/packages
|
|
||||||
|
|
||||||
<Directory "/var/www/packages">
|
|
||||||
SetEnv VIRTUALENV
|
|
||||||
Options Indexes FollowSymLinks
|
|
||||||
Order allow,deny
|
|
||||||
Allow from all
|
|
||||||
</Directory>
|
|
||||||
|
|
@ -1,5 +0,0 @@
|
|||||||
[compass_install]
|
|
||||||
name=compass_repo
|
|
||||||
baseurl=http://205.177.226.237:9999/compass_install/centos7/
|
|
||||||
gpgcheck=0
|
|
||||||
enabled=1
|
|
@ -1,57 +0,0 @@
|
|||||||
{
|
|
||||||
# The network section covers network configuration :)
|
|
||||||
"network": {
|
|
||||||
# A list of downstream servers listening for our messages.
|
|
||||||
# logstash-forwarder will pick one at random and only switch if
|
|
||||||
# the selected one appears to be dead or unresponsive
|
|
||||||
"servers": [ "www.stack360.io:5000" ],
|
|
||||||
|
|
||||||
# The path to your client ssl certificate (optional)
|
|
||||||
#"ssl certificate": "./logstash-forwarder.crt",
|
|
||||||
# The path to your client ssl key (optional)
|
|
||||||
#"ssl key": "./logstash-forwarder.key",
|
|
||||||
|
|
||||||
# The path to your trusted ssl CA file. This is used
|
|
||||||
# to authenticate your downstream server.
|
|
||||||
"ssl ca": "/etc/pki/tls/certs/logstash-forwarder.crt",
|
|
||||||
|
|
||||||
# Network timeout in seconds. This is most important for
|
|
||||||
# logstash-forwarder determining whether to stop waiting for an
|
|
||||||
# acknowledgement from the downstream server. If an timeout is reached,
|
|
||||||
# logstash-forwarder will assume the connection or server is bad and
|
|
||||||
# will connect to a server chosen at random from the servers list.
|
|
||||||
"timeout": 15
|
|
||||||
},
|
|
||||||
|
|
||||||
# The list of files configurations
|
|
||||||
"files": [
|
|
||||||
{
|
|
||||||
"paths": [
|
|
||||||
"/var/log/compass/celery.log"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
# An array of hashes. Each hash tells what paths to watch and
|
|
||||||
# what fields to annotate on events from those paths.
|
|
||||||
#{
|
|
||||||
#"paths": [
|
|
||||||
# single paths are fine
|
|
||||||
#"/var/log/messages",
|
|
||||||
# globs are fine too, they will be periodically evaluated
|
|
||||||
# to see if any new files match the wildcard.
|
|
||||||
#"/var/log/*.log"
|
|
||||||
#],
|
|
||||||
|
|
||||||
# A dictionary of fields to annotate on each event.
|
|
||||||
#"fields": { "type": "syslog" }
|
|
||||||
#}, {
|
|
||||||
# A path of "-" means stdin.
|
|
||||||
#"paths": [ "-" ],
|
|
||||||
#"fields": { "type": "stdin" }
|
|
||||||
#}, {
|
|
||||||
#"paths": [
|
|
||||||
#"/var/log/apache/httpd-*.log"
|
|
||||||
#],
|
|
||||||
#"fields": { "type": "apache" }
|
|
||||||
#}
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,29 +0,0 @@
|
|||||||
-----BEGIN CERTIFICATE-----
|
|
||||||
MIIFAzCCAuugAwIBAgIJAKrryFncVfJNMA0GCSqGSIb3DQEBCwUAMBgxFjAUBgNV
|
|
||||||
BAMMDSouc3RhY2szNjAuaW8wHhcNMTYwODE4MTcwNzEyWhcNMjYwODE2MTcwNzEy
|
|
||||||
WjAYMRYwFAYDVQQDDA0qLnN0YWNrMzYwLmlvMIICIjANBgkqhkiG9w0BAQEFAAOC
|
|
||||||
Ag8AMIICCgKCAgEAw4p1OVw8tMeYfk9828FiRLPhYWwHD2OCVwlKr8P3bl974I/P
|
|
||||||
PhYTkrjEEe5SDYCWNcO58MxJ5X1vB2uQGNBBUFMni+KOqbVvcbPLL4Mkf8pjLdzD
|
|
||||||
2pItE1X7UQ8p1iYBEyAnLoV4MjA7CJ4MmUtOOcCb6keHWEhyJFOj9IzkTjFCbKuL
|
|
||||||
Y0paxayQOFlkuEK2d3Aa2HXgTLh3FFUX3kRqOcGg1uxEs2awf0nwP/NwcyfAgTSe
|
|
||||||
6yqVjnb3GLYdy283FqvxW1MrZS9UAxp/tAXRgMT8I9L2wSrjnPmrabBv5I+Kuf0p
|
|
||||||
EmLmxedOCTQsRYtYGL7TmySYgGuYtt/4UNKaG9tCPBnRXuMzlRClBDIoDhwLEKzC
|
|
||||||
LlE8JKyClNgutMKcrHbWlEJiGxooyDC1H9gwkHTi3w7qF9BYYBhkXFN2Sl0mPk0m
|
|
||||||
6NMfhQUAeqI0HlOsAX/HLwSWyRl0Nr2rvVJqgbbHRF18pADitQEuc/koT9qhON5f
|
|
||||||
BReYhQZIarwPJ/UBgwgadrP79bvWJ5u9Oga6H7yHf49/UYD3gQCvu3/Hxo/IY6AX
|
|
||||||
86eZg1ZObD6J0xpWb5jskVSgugar6Xq/h6iRkX8O3ssUdIyIxsIMhtCgxykSmdQY
|
|
||||||
FDEIWGZq2kdEVvF6GR/EYJRxvASI+27TXzuxP1UodQQOXa8xySkKu9U5SqMCAwEA
|
|
||||||
AaNQME4wHQYDVR0OBBYEFPLilPT6k6rRlxv9kCTW38VYy5AuMB8GA1UdIwQYMBaA
|
|
||||||
FPLilPT6k6rRlxv9kCTW38VYy5AuMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEL
|
|
||||||
BQADggIBAAdDy+R0bGCBQgiF2fGunl+AZ8zd0MZbtL4Kv53gunlhyaPQJ3Z+e3GF
|
|
||||||
V+Z9BhvMUXE/XN3bsAIZYJekvclysYLBdK1C8n9Rli+AbBSGjwgttRAXeEqaZCbE
|
|
||||||
QrNPukRgHThv6hyJNcf1TnR70xCBlcYOGQkEqWx1g0xrsG8ryGbum0BAG0YWLCYq
|
|
||||||
BboP16FGAPjDlb10ysWy7HuGJorf470Kyb2iRfp4PX/zdYbcA8gcggGCT323JwuD
|
|
||||||
Qwnd8kyX6+6pNhBJE3gAyPDhqvbFbpmo/Ia5pqmJkL3APee1bVI2itqZB/HnmyZ5
|
|
||||||
UCIzqf4uAuEodN6yXiImHt0TagXtL4eEXNE2qjwsFSaHeyP6iXPX5tc8RMHzFQjH
|
|
||||||
I3MzROHaeVUxs2rMxETGvq4+DebhGzCBKaJUWfMV6Y1+ovrE0MowcR8nO1Q4YQt3
|
|
||||||
to+W5IrjI7zzQ7+4XqZm+Yz1DQ4Kr7s2iyAExvkq7kU5FAow6SLPIaOl7kbas2M4
|
|
||||||
fwFisuEQT2Om+hbWWZTJ1T45KU5NjznkPqJZ9dCdyqs2mH7BE4vOkOULq81uFG06
|
|
||||||
VnJHcO+wZM1iCLa1hy7F2S4fDTjTBYDcuD5GFkulFLeFH5X7zoPz20OGTRMXZONI
|
|
||||||
CHEk1ibp8j/Q6bw1zd0jGm3KDUSx+0/Avfve/e28U4KAdNekrgQf
|
|
||||||
-----END CERTIFICATE-----
|
|
@ -1,6 +0,0 @@
|
|||||||
[logstashforwarder]
|
|
||||||
name=logstashforwarder repository
|
|
||||||
baseurl=http://packages.elasticsearch.org/logstashforwarder/centos
|
|
||||||
gpgcheck=1
|
|
||||||
gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch
|
|
||||||
enabled=1
|
|
@ -13,8 +13,8 @@ itsdangerous
|
|||||||
importlib
|
importlib
|
||||||
lazypy
|
lazypy
|
||||||
lockfile
|
lockfile
|
||||||
netaddr
|
|
||||||
MySQL-python
|
MySQL-python
|
||||||
|
netaddr
|
||||||
paramiko
|
paramiko
|
||||||
PyChef
|
PyChef
|
||||||
python-daemon!=2.0,!=2.0.1,!=2.0.2,!=2.0.3
|
python-daemon!=2.0,!=2.0.1,!=2.0.2,!=2.0.3
|
||||||
|
@ -48,22 +48,22 @@ start() {
|
|||||||
RETVAL=$?
|
RETVAL=$?
|
||||||
fi
|
fi
|
||||||
echo
|
echo
|
||||||
# retries=0
|
retries=0
|
||||||
# max_retries=10
|
max_retries=10
|
||||||
# output=''
|
output=''
|
||||||
# while [ $retries -lt $max_retries ]; do
|
while [ $retries -lt $max_retries ]; do
|
||||||
# output=$(C_FORCE_ROOT=1 CELERY_CONFIG_MODULE=compass.utils.celeryconfig_wrapper $CELERY status 2&>1)
|
output=$(C_FORCE_ROOT=1 CELERY_CONFIG_MODULE=compass.utils.celeryconfig_wrapper $CELERY status 2&>1)
|
||||||
# RETVAL=$?
|
RETVAL=$?
|
||||||
# if [ "$RETVAL" == "0" ]; then
|
if [ "$RETVAL" == "0" ]; then
|
||||||
# break
|
break
|
||||||
# else
|
else
|
||||||
# sleep 10
|
sleep 10
|
||||||
# fi
|
fi
|
||||||
# let retries=${retries}+1
|
let retries=${retries}+1
|
||||||
# done
|
done
|
||||||
# if [ "$RETVAL" != "0" ]; then
|
if [ "$RETVAL" != "0" ]; then
|
||||||
# echo output
|
echo output
|
||||||
# fi
|
fi
|
||||||
return $RETVAL
|
return $RETVAL
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -104,10 +104,10 @@ case "$1" in
|
|||||||
status -p /var/run/celery-worker.pid $CELERY
|
status -p /var/run/celery-worker.pid $CELERY
|
||||||
RETVAL=$?
|
RETVAL=$?
|
||||||
fi
|
fi
|
||||||
# if [ "$RETVAL" == "0" ]; then
|
if [ "$RETVAL" == "0" ]; then
|
||||||
# C_FORCE_ROOT=1 CELERY_CONFIG_MODULE=compass.utils.celeryconfig_wrapper $CELERY status
|
C_FORCE_ROOT=1 CELERY_CONFIG_MODULE=compass.utils.celeryconfig_wrapper $CELERY status
|
||||||
# RETVAL=$?
|
RETVAL=$?
|
||||||
# fi
|
fi
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Usage: $0 {start|stop|status|restart}"
|
echo "Usage: $0 {start|stop|status|restart}"
|
||||||
|
2
tox.ini
2
tox.ini
@ -28,7 +28,7 @@ commands = {posargs}
|
|||||||
commands = python setup.py testr --coverage --testr-args='{posargs}'
|
commands = python setup.py testr --coverage --testr-args='{posargs}'
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
ignore = H302,H304,H233,H803,F401,H104,H236,H237,H238,E501
|
ignore = H302,H304,H233,H803,F401,H104,H236,H237,H238
|
||||||
show-source = true
|
show-source = true
|
||||||
builtins = _
|
builtins = _
|
||||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools,build
|
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools,build
|
||||||
|
Loading…
x
Reference in New Issue
Block a user