fix pep8 warning and move major util code from bin to compass dir.
Change-Id: I7baa536888db1ca46cadcdf07ef9cc0a1a3a12fb
This commit is contained in:
parent
5a99dd5076
commit
7f080e4e51
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""import cookbooks to chef server."""
|
||||
import logging
|
||||
import os
|
||||
import os.path
|
||||
@ -14,12 +14,15 @@ flags.add('cookbooks_dir',
|
||||
default='/var/chef/cookbooks')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
def main():
|
||||
"""main entry"""
|
||||
flags.init()
|
||||
logsetting.init()
|
||||
cookbooks = []
|
||||
cookbooks_dir = flags.OPTIONS.cookbooks_dir
|
||||
logging.info('add cookbooks %s', cookbooks_dir)
|
||||
cmd = "knife cookbook upload --all --cookbook-path %s" % cookbooks_dir
|
||||
os.system(cmd)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""import databags to chef server."""
|
||||
import logging
|
||||
import os
|
||||
import os.path
|
||||
@ -13,7 +13,8 @@ flags.add('databags_dir',
|
||||
default='/var/chef/databags')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
def main():
|
||||
"""main entry"""
|
||||
flags.init()
|
||||
logsetting.init()
|
||||
databags = []
|
||||
@ -35,3 +36,7 @@ if __name__ == '__main__':
|
||||
databag_item, databag)
|
||||
cmd = 'knife data bag from file %s %s' % (databag, databag_item)
|
||||
os.system(cmd)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""script to import roles to chef server"""
|
||||
import logging
|
||||
import os
|
||||
import os.path
|
||||
@ -13,7 +13,8 @@ flags.add('roles_dir',
|
||||
default='/var/chef/roles')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
def main():
|
||||
"""main entry"""
|
||||
flags.init()
|
||||
logsetting.init()
|
||||
rolelist = []
|
||||
@ -27,3 +28,7 @@ if __name__ == '__main__':
|
||||
logging.info('add role %s', role)
|
||||
cmd = "knife role from file %s" % role
|
||||
os.system(cmd)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""script to migrate rendered kickstart files from cobbler to outside."""
|
||||
import xmlrpclib
|
||||
import logging
|
||||
|
||||
@ -7,17 +7,22 @@ from compass.utils import setting_wrapper as setting
|
||||
|
||||
|
||||
def main():
|
||||
"""main entry"""
|
||||
remote = xmlrpclib.Server(setting.COBBLER_INSTALLER_URL, allow_none=True)
|
||||
token = remote.login(*setting.COBBLER_INSTALLER_TOKEN)
|
||||
systems = remote.get_systems(token)
|
||||
for system in systems:
|
||||
data = remote.generate_kickstart('', system['name'])
|
||||
try:
|
||||
with open('/var/www/cblr_ks/%s' % system['name'], 'w') as f:
|
||||
with open(
|
||||
'/var/www/cblr_ks/%s' % system['name'], 'w'
|
||||
) as kickstart_file:
|
||||
logging.info("Migrating kickstart for %s", system['name'])
|
||||
f.write(data)
|
||||
except:
|
||||
kickstart_file.write(data)
|
||||
except Exception as error:
|
||||
logging.error("Directory /var/www/cblr_ks/ does not exist.")
|
||||
logging.exception(error)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.info("Running kickstart migration")
|
||||
|
727
bin/manage_db.py
727
bin/manage_db.py
@ -1,24 +1,27 @@
|
||||
#!/usr/bin/python
|
||||
"""utility binary to manage database."""
|
||||
import logging
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
from flask.ext.script import Manager
|
||||
|
||||
from compass.actions import clean_deployment
|
||||
from compass.actions import reinstall
|
||||
from compass.actions import deploy
|
||||
from compass.actions import clean_installing_progress
|
||||
from compass.actions import search
|
||||
from compass.api import app
|
||||
from compass.config_management.utils import config_manager
|
||||
from compass.config_management.utils import config_reference
|
||||
from compass.db import database
|
||||
from compass.db.model import Adapter, Role, Switch, SwitchConfig
|
||||
from compass.db.model import Machine, HostState, ClusterState
|
||||
from compass.db.model import Cluster, ClusterHost, LogProgressingHistory
|
||||
from compass.tasks.client import celery
|
||||
from compass.utils import flags
|
||||
from compass.utils import logsetting
|
||||
from compass.utils import setting_wrapper as setting
|
||||
from compass.utils import util
|
||||
|
||||
|
||||
flags.add('table_name',
|
||||
@ -26,23 +29,31 @@ flags.add('table_name',
|
||||
default='')
|
||||
flags.add('clusters',
|
||||
help=(
|
||||
'clusters to clean, the format is as '
|
||||
'clusters and hosts of each cluster, the format is as '
|
||||
'clusterid:hostname1,hostname2,...;...'),
|
||||
default='')
|
||||
flags.add('fake_switches_file',
|
||||
flags.add_bool('async',
|
||||
help='ryn in async mode',
|
||||
default=True)
|
||||
flags.add('switch_machines_file',
|
||||
help=(
|
||||
'files for switches and machines '
|
||||
'connected to each switch. each line in the file '
|
||||
'is <switch ip>,<switch port>,<vlan>,<mac>'),
|
||||
'is machine,<switch ip>,<switch port>,<vlan>,<mac> '
|
||||
'or switch,<switch_ip>,<switch_vendor>,'
|
||||
'<switch_version>,<switch_community>,<switch_state>'),
|
||||
default='')
|
||||
flags.add('fake_switches_vendor',
|
||||
help='switch vendor used to set fake switch and machines.',
|
||||
default='huawei')
|
||||
flags.add('search_config_properties',
|
||||
help='semicomma separated properties to search in config',
|
||||
flags.add('search_cluster_properties',
|
||||
help='comma separated properties to search in cluster config',
|
||||
default='')
|
||||
flags.add('print_config_properties',
|
||||
help='semicomma separated config properties to print',
|
||||
flags.add('print_cluster_properties',
|
||||
help='comma separated cluster config properties to print',
|
||||
default='')
|
||||
flags.add('search_host_properties',
|
||||
help='comma separated properties to search in host config',
|
||||
default='')
|
||||
flags.add('print_host_properties',
|
||||
help='comma separated host config properties to print',
|
||||
default='')
|
||||
|
||||
|
||||
@ -92,6 +103,7 @@ def createdb():
|
||||
if setting.DATABASE_TYPE == 'file':
|
||||
os.chmod(setting.DATABASE_FILE, 0777)
|
||||
|
||||
|
||||
@app_manager.command
|
||||
def dropdb():
|
||||
"""Drops database from sqlalchemy models"""
|
||||
@ -101,146 +113,39 @@ def dropdb():
|
||||
@app_manager.command
|
||||
def createtable():
|
||||
"""Create database table by --table_name"""
|
||||
if not flags.OPTIONS.table_name:
|
||||
print 'flag --table_name is missing'
|
||||
return
|
||||
|
||||
table_name = flags.OPTIONS.table_name
|
||||
if table_name and table_name in TABLE_MAPPING:
|
||||
database.create_table(TABLE_MAPPING[table_name])
|
||||
else:
|
||||
if table_name not in TABLE_MAPPING:
|
||||
print '--table_name should be in %s' % TABLE_MAPPING.keys()
|
||||
return
|
||||
|
||||
database.create_table(TABLE_MAPPING[table_name])
|
||||
|
||||
|
||||
@app_manager.command
|
||||
def droptable():
|
||||
"""Drop database table by --talbe_name"""
|
||||
if not flags.OPTIONS.table_name:
|
||||
print 'flag --table_name is missing'
|
||||
return
|
||||
|
||||
table_name = flags.OPTIONS.table_name
|
||||
if table_name and table_name in TABLE_MAPPING:
|
||||
database.drop_table(TABLE_MAPPING[table_name])
|
||||
else:
|
||||
if table_name not in TABLE_MAPPING:
|
||||
print '--table_name should be in %s' % TABLE_MAPPING.keys()
|
||||
return
|
||||
|
||||
database.drop_table(TABLE_MAPPING[table_name])
|
||||
|
||||
|
||||
@app_manager.command
|
||||
def sync_from_installers():
|
||||
"""set adapters in Adapter table from installers."""
|
||||
# TODO(xiaodong): Move the code to config_manager.
|
||||
manager = config_manager.ConfigManager()
|
||||
adapters = manager.get_adapters()
|
||||
target_systems = set()
|
||||
roles_per_target_system = {}
|
||||
for adapter in adapters:
|
||||
target_systems.add(adapter['target_system'])
|
||||
|
||||
for target_system in target_systems:
|
||||
roles_per_target_system[target_system] = manager.get_roles(
|
||||
target_system)
|
||||
|
||||
with database.session() as session:
|
||||
session.query(Adapter).delete()
|
||||
session.query(Role).delete()
|
||||
for adapter in adapters:
|
||||
session.add(Adapter(**adapter))
|
||||
|
||||
for target_system, roles in roles_per_target_system.items():
|
||||
for role in roles:
|
||||
session.add(Role(**role))
|
||||
|
||||
|
||||
def _get_switch_ips(switch):
|
||||
"""Helper function to get switch ips."""
|
||||
ips = []
|
||||
blocks = switch['switch_ips'].split('.')
|
||||
ip_blocks_list = []
|
||||
for block in blocks:
|
||||
ip_blocks_list.append([])
|
||||
sub_blocks = block.split(',')
|
||||
for sub_block in sub_blocks:
|
||||
if not sub_block:
|
||||
continue
|
||||
|
||||
if '-' in sub_block:
|
||||
start_block, end_block = sub_block.split('-', 1)
|
||||
start_block = int(start_block)
|
||||
end_block = int(end_block)
|
||||
if start_block > end_block:
|
||||
continue
|
||||
|
||||
ip_block = start_block
|
||||
while ip_block <= end_block:
|
||||
ip_blocks_list[-1].append(str(ip_block))
|
||||
ip_block += 1
|
||||
|
||||
else:
|
||||
ip_blocks_list[-1].append(sub_block)
|
||||
|
||||
ip_prefixes = [[]]
|
||||
for ip_blocks in ip_blocks_list:
|
||||
prefixes = []
|
||||
for ip_block in ip_blocks:
|
||||
for prefix in ip_prefixes:
|
||||
prefixes.append(prefix + [ip_block])
|
||||
|
||||
ip_prefixes = prefixes
|
||||
|
||||
for prefix in ip_prefixes:
|
||||
if not prefix:
|
||||
continue
|
||||
|
||||
ips.append('.'.join(prefix))
|
||||
|
||||
logging.debug('found switch ips: %s', ips)
|
||||
return ips
|
||||
|
||||
|
||||
def _get_switch_filter_ports(switch):
|
||||
"""Helper function to get switch filter ports."""
|
||||
port_pat = re.compile(r'(\D*)(\d+(?:-\d+)?)')
|
||||
filter_ports = []
|
||||
for port_range in switch['filter_ports'].split(','):
|
||||
if not port_range:
|
||||
continue
|
||||
|
||||
mat = port_pat.match(port_range)
|
||||
if not mat:
|
||||
filter_ports.append(port_range)
|
||||
else:
|
||||
port_prefix = mat.group(1)
|
||||
port_range = mat.group(2)
|
||||
if '-' in port_range:
|
||||
start_port, end_port = port_range.split('-', 1)
|
||||
start_port = int(start_port)
|
||||
end_port = int(end_port)
|
||||
if start_port > end_port:
|
||||
continue
|
||||
|
||||
port = start_port
|
||||
while port <= end_port:
|
||||
filter_ports.append('%s%s' % (port_prefix, port))
|
||||
port += 1
|
||||
|
||||
else:
|
||||
filter_ports.append('%s%s' % (port_prefix, port_range))
|
||||
|
||||
logging.debug('filter ports: %s', filter_ports)
|
||||
return filter_ports
|
||||
|
||||
|
||||
def _get_switch_config():
|
||||
"""Helper function to get switch config."""
|
||||
switch_configs = []
|
||||
if not hasattr(setting, 'SWITCHES') or not setting.SWITCHES:
|
||||
logging.info('no switch configs to set')
|
||||
return switch_configs
|
||||
|
||||
for switch in setting.SWITCHES:
|
||||
ips = _get_switch_ips(switch)
|
||||
filter_ports = _get_switch_filter_ports(switch)
|
||||
|
||||
for ip_addr in ips:
|
||||
for filter_port in filter_ports:
|
||||
switch_configs.append(
|
||||
{'ip': ip_addr, 'filter_port': filter_port})
|
||||
|
||||
logging.debug('switch configs: %s', switch_configs)
|
||||
return switch_configs
|
||||
with database.session():
|
||||
manager = config_manager.ConfigManager()
|
||||
manager.update_adapters_from_installers()
|
||||
|
||||
|
||||
@app_manager.command
|
||||
@ -261,127 +166,9 @@ def sync_switch_configs():
|
||||
integer or a rnage of integer like xx-xx.
|
||||
The example of filter_ports is like: ae1-5,20-40.
|
||||
"""
|
||||
switch_configs = _get_switch_config()
|
||||
switch_config_tuples = set([])
|
||||
with database.session() as session:
|
||||
session.query(SwitchConfig).delete(synchronize_session='fetch')
|
||||
for switch_config in switch_configs:
|
||||
switch_config_tuple = tuple(switch_config.values())
|
||||
if switch_config_tuple in switch_config_tuples:
|
||||
logging.debug('ignore adding switch config: %s',
|
||||
switch_config)
|
||||
continue
|
||||
else:
|
||||
logging.debug('add switch config: %s', switch_config)
|
||||
switch_config_tuples.add(switch_config_tuple)
|
||||
|
||||
session.add(SwitchConfig(**switch_config))
|
||||
|
||||
|
||||
def _get_clusters():
|
||||
"""Helper function to get clusters from flag --clusters."""
|
||||
clusters = {}
|
||||
logging.debug('get clusters from flag: %s', flags.OPTIONS.clusters)
|
||||
for clusterid_and_hostnames in flags.OPTIONS.clusters.split(';'):
|
||||
if not clusterid_and_hostnames:
|
||||
continue
|
||||
|
||||
if ':' in clusterid_and_hostnames:
|
||||
clusterid_str, hostnames_str = clusterid_and_hostnames.split(
|
||||
':', 1)
|
||||
else:
|
||||
clusterid_str = clusterid_and_hostnames
|
||||
hostnames_str = ''
|
||||
|
||||
clusterid = int(clusterid_str)
|
||||
hostnames = [
|
||||
hostname for hostname in hostnames_str.split(',')
|
||||
if hostname
|
||||
]
|
||||
clusters[clusterid] = hostnames
|
||||
|
||||
logging.debug('got clusters from flag: %s', clusters)
|
||||
with database.session() as session:
|
||||
clusterids = clusters.keys()
|
||||
if not clusterids:
|
||||
cluster_list = session.query(Cluster).all()
|
||||
clusterids = [cluster.id for cluster in cluster_list]
|
||||
|
||||
for clusterid in clusterids:
|
||||
hostnames = clusters.get(clusterid, [])
|
||||
if not hostnames:
|
||||
host_list = session.query(ClusterHost).filter_by(
|
||||
cluster_id=clusterid).all()
|
||||
hostids = [host.id for host in host_list]
|
||||
clusters[clusterid] = hostids
|
||||
else:
|
||||
hostids = []
|
||||
for hostname in hostnames:
|
||||
host = session.query(ClusterHost).filter_by(
|
||||
cluster_id=clusterid, hostname=hostname).first()
|
||||
if host:
|
||||
hostids.append(host.id)
|
||||
clusters[clusterid] = hostids
|
||||
|
||||
return clusters
|
||||
|
||||
|
||||
def _clean_clusters(clusters):
|
||||
"""Helper function to clean clusters."""
|
||||
# TODO(xiaodong): Move the code to config manager.
|
||||
manager = config_manager.ConfigManager()
|
||||
logging.info('clean cluster hosts: %s', clusters)
|
||||
with database.session() as session:
|
||||
for clusterid, hostids in clusters.items():
|
||||
cluster = session.query(Cluster).filter_by(id=clusterid).first()
|
||||
if not cluster:
|
||||
continue
|
||||
|
||||
all_hostids = [host.id for host in cluster.hosts]
|
||||
logging.debug('all hosts in cluster %s is: %s',
|
||||
clusterid, all_hostids)
|
||||
|
||||
logging.info('clean hosts %s in cluster %s',
|
||||
hostids, clusterid)
|
||||
|
||||
adapter = cluster.adapter
|
||||
for hostid in hostids:
|
||||
host = session.query(ClusterHost).filter_by(id=hostid).first()
|
||||
if not host:
|
||||
continue
|
||||
|
||||
log_dir = os.path.join(
|
||||
setting.INSTALLATION_LOGDIR,
|
||||
'%s.%s' % (host.hostname, clusterid))
|
||||
logging.info('clean log dir %s', log_dir)
|
||||
shutil.rmtree(log_dir, True)
|
||||
session.query(LogProgressingHistory).filter(
|
||||
LogProgressingHistory.pathname.startswith(
|
||||
'%s/' % log_dir)).delete(
|
||||
synchronize_session='fetch')
|
||||
|
||||
logging.info('clean host %s', hostid)
|
||||
manager.clean_host_config(
|
||||
hostid,
|
||||
os_version=adapter.os,
|
||||
target_system=adapter.target_system)
|
||||
session.query(ClusterHost).filter_by(
|
||||
id=hostid).delete(synchronize_session='fetch')
|
||||
session.query(HostState).filter_by(
|
||||
id=hostid).delete(synchronize_session='fetch')
|
||||
|
||||
if set(all_hostids) == set(hostids):
|
||||
logging.info('clean cluster %s', clusterid)
|
||||
manager.clean_cluster_config(
|
||||
clusterid,
|
||||
os_version=adapter.os,
|
||||
target_system=adapter.target_system)
|
||||
session.query(Cluster).filter_by(
|
||||
id=clusterid).delete(synchronize_session='fetch')
|
||||
session.query(ClusterState).filter_by(
|
||||
id=clusterid).delete(synchronize_session='fetch')
|
||||
|
||||
manager.sync()
|
||||
with database.session():
|
||||
manager = config_manager.ConfigManager()
|
||||
manager.update_switch_filters()
|
||||
|
||||
|
||||
@app_manager.command
|
||||
@ -392,77 +179,11 @@ def clean_clusters():
|
||||
The clusters and hosts are defined in --clusters.
|
||||
the clusters flag is as clusterid:hostname1,hostname2,...;...
|
||||
"""
|
||||
clusters = _get_clusters()
|
||||
_clean_clusters(clusters)
|
||||
os.system('service rsyslog restart')
|
||||
|
||||
|
||||
def _clean_installation_progress(clusters):
|
||||
"""Helper function to clean installation progress."""
|
||||
# TODO(xiaodong): Move the code to config manager.
|
||||
logging.info('clean installation progress for cluster hosts: %s',
|
||||
clusters)
|
||||
with database.session() as session:
|
||||
for clusterid, hostids in clusters.items():
|
||||
cluster = session.query(Cluster).filter_by(
|
||||
id=clusterid).first()
|
||||
if not cluster:
|
||||
continue
|
||||
|
||||
logging.info(
|
||||
'clean installation progress for hosts %s in cluster %s',
|
||||
hostids, clusterid)
|
||||
|
||||
all_hostids = [host.id for host in cluster.hosts]
|
||||
logging.debug('all hosts in cluster %s is: %s',
|
||||
clusterid, all_hostids)
|
||||
|
||||
for hostid in hostids:
|
||||
host = session.query(ClusterHost).filter_by(id=hostid).first()
|
||||
if not host:
|
||||
continue
|
||||
|
||||
log_dir = os.path.join(
|
||||
setting.INSTALLATION_LOGDIR,
|
||||
'%s.%s' % (host.hostname, clusterid))
|
||||
|
||||
logging.info('clean log dir %s', log_dir)
|
||||
shutil.rmtree(log_dir, True)
|
||||
|
||||
session.query(LogProgressingHistory).filter(
|
||||
LogProgressingHistory.pathname.startswith(
|
||||
'%s/' % log_dir)).delete(
|
||||
synchronize_session='fetch')
|
||||
|
||||
logging.info('clean host installation progress for %s',
|
||||
hostid)
|
||||
if host.state and host.state.state != 'UNINITIALIZED':
|
||||
session.query(ClusterHost).filter_by(
|
||||
id=hostid).update({
|
||||
'mutable': False
|
||||
}, synchronize_session='fetch')
|
||||
session.query(HostState).filter_by(id=hostid).update({
|
||||
'state': 'INSTALLING',
|
||||
'progress': 0.0,
|
||||
'message': '',
|
||||
'severity': 'INFO'
|
||||
}, synchronize_session='fetch')
|
||||
|
||||
if set(all_hostids) == set(hostids):
|
||||
logging.info('clean cluster installation progress %s',
|
||||
clusterid)
|
||||
if cluster.state and cluster.state != 'UNINITIALIZED':
|
||||
session.query(Cluster).filter_by(
|
||||
id=clusterid).update({
|
||||
'mutable': False
|
||||
}, synchronize_session='fetch')
|
||||
session.query(ClusterState).filter_by(
|
||||
id=clusterid).update({
|
||||
'state': 'INSTALLING',
|
||||
'progress': 0.0,
|
||||
'message': '',
|
||||
'severity': 'INFO'
|
||||
}, synchronize_session='fetch')
|
||||
cluster_hosts = util.get_clusters_from_str(flags.OPTIONS.clusters)
|
||||
if flags.OPTIONS.async:
|
||||
celery.send_task('compass.tasks.clean_deployment', (cluster_hosts,))
|
||||
else:
|
||||
clean_deployment.clean_deployment(cluster_hosts)
|
||||
|
||||
|
||||
@app_manager.command
|
||||
@ -473,306 +194,106 @@ def clean_installation_progress():
|
||||
The cluster and hosts is defined in --clusters.
|
||||
The clusters flags is as clusterid:hostname1,hostname2,...;...
|
||||
"""
|
||||
clusters = _get_clusters()
|
||||
_clean_installation_progress(clusters)
|
||||
os.system('service rsyslog restart')
|
||||
|
||||
|
||||
def _reinstall_hosts(clusters):
|
||||
"""Helper function to reinstall hosts."""
|
||||
# TODO(xiaodong): Move the code to config_manager.
|
||||
logging.info('reinstall cluster hosts: %s', clusters)
|
||||
manager = config_manager.ConfigManager()
|
||||
with database.session() as session:
|
||||
for clusterid, hostids in clusters.items():
|
||||
cluster = session.query(Cluster).filter_by(id=clusterid).first()
|
||||
if not cluster:
|
||||
continue
|
||||
|
||||
all_hostids = [host.id for host in cluster.hosts]
|
||||
logging.debug('all hosts in cluster %s is: %s',
|
||||
clusterid, all_hostids)
|
||||
|
||||
logging.info('reinstall hosts %s in cluster %s',
|
||||
hostids, clusterid)
|
||||
adapter = cluster.adapter
|
||||
for hostid in hostids:
|
||||
host = session.query(ClusterHost).filter_by(id=hostid).first()
|
||||
if not host:
|
||||
continue
|
||||
|
||||
log_dir = os.path.join(
|
||||
setting.INSTALLATION_LOGDIR,
|
||||
'%s.%s' % (host.hostname, clusterid))
|
||||
logging.info('clean log dir %s', log_dir)
|
||||
shutil.rmtree(log_dir, True)
|
||||
session.query(LogProgressingHistory).filter(
|
||||
LogProgressingHistory.pathname.startswith(
|
||||
'%s/' % log_dir)).delete(
|
||||
synchronize_session='fetch')
|
||||
|
||||
logging.info('reinstall host %s', hostid)
|
||||
manager.reinstall_host(
|
||||
hostid,
|
||||
os_version=adapter.os,
|
||||
target_system=adapter.target_system)
|
||||
if host.state and host.state.state != 'UNINITIALIZED':
|
||||
session.query(ClusterHost).filter_by(
|
||||
id=hostid).update({
|
||||
'mutable': False
|
||||
}, synchronize_session='fetch')
|
||||
session.query(HostState).filter_by(
|
||||
id=hostid).update({
|
||||
'state': 'INSTALLING',
|
||||
'progress': 0.0,
|
||||
'message': '',
|
||||
'severity': 'INFO'
|
||||
}, synchronize_session='fetch')
|
||||
|
||||
if set(all_hostids) == set(hostids):
|
||||
logging.info('reinstall cluster %s',
|
||||
clusterid)
|
||||
if cluster.state and cluster.state != 'UNINITIALIZED':
|
||||
session.query(Cluster).filter_by(
|
||||
id=clusterid).update({
|
||||
'mutable': False
|
||||
}, synchronize_session='fetch')
|
||||
session.query(ClusterState).filter_by(
|
||||
id=clusterid).update({
|
||||
'state': 'INSTALLING',
|
||||
'progress': 0.0,
|
||||
'message': '',
|
||||
'severity': 'INFO'
|
||||
}, synchronize_session='fetch')
|
||||
|
||||
manager.sync()
|
||||
cluster_hosts = util.get_clusters_from_str(flags.OPTIONS.clusters)
|
||||
if flags.OPTIONS.async:
|
||||
celery.send_task('compass.tasks.clean_installing_progress',
|
||||
(cluster_hosts,))
|
||||
else:
|
||||
clean_installing_progress.clean_installing_progress(cluster_hosts)
|
||||
|
||||
|
||||
@app_manager.command
|
||||
def reinstall_hosts():
|
||||
def reinstall_clusters():
|
||||
"""Reinstall hosts in clusters.
|
||||
|
||||
.. note::
|
||||
The hosts are defined in --clusters.
|
||||
The clusters flag is as clusterid:hostname1,hostname2,...;...
|
||||
"""
|
||||
clusters = _get_clusters()
|
||||
_reinstall_hosts(clusters)
|
||||
os.system('service rsyslog restart')
|
||||
|
||||
|
||||
def _get_fake_switch_machines(switch_ips, switch_machines):
|
||||
"""Helper function to get fake switch machines."""
|
||||
missing_flags = False
|
||||
if not flags.OPTIONS.fake_switches_vendor:
|
||||
print 'the flag --fake_switches_vendor should be specified'
|
||||
missing_flags = True
|
||||
|
||||
if not flags.OPTIONS.fake_switches_file:
|
||||
print 'the flag --fake_switches_file should be specified.'
|
||||
print 'each line in fake_switches_files presents one machine'
|
||||
print 'the format of each line is <%s>,<%s>,<%s>,<%s>' % (
|
||||
'switch ip as xxx.xxx.xxx.xxx',
|
||||
'switch port as xxx12',
|
||||
'vlan as 1',
|
||||
'mac as xx:xx:xx:xx:xx:xx')
|
||||
missing_flags = True
|
||||
|
||||
if missing_flags:
|
||||
return False
|
||||
|
||||
try:
|
||||
with open(flags.OPTIONS.fake_switches_file) as switch_file:
|
||||
for line in switch_file:
|
||||
line = line.strip()
|
||||
switch_ip, switch_port, vlan, mac = line.split(',', 3)
|
||||
if switch_ip not in switch_ips:
|
||||
switch_ips.append(switch_ip)
|
||||
|
||||
switch_machines.setdefault(switch_ip, []).append({
|
||||
'mac': mac,
|
||||
'port': switch_port,
|
||||
'vlan': int(vlan)
|
||||
})
|
||||
|
||||
except Exception as error:
|
||||
logging.error('failed to parse file %s',
|
||||
flags.OPTIONS.fake_switches_file)
|
||||
logging.exception(error)
|
||||
return False
|
||||
|
||||
return True
|
||||
cluster_hosts = util.get_clusters_from_str(flags.OPTIONS.clusters)
|
||||
if flags.OPTIONS.async:
|
||||
celery.send_task('compass.tasks.reinstall', (cluster_hosts,))
|
||||
else:
|
||||
reinstall.reinstall(cluster_hosts)
|
||||
|
||||
|
||||
@app_manager.command
|
||||
def set_fake_switch_machine():
|
||||
"""Set fake switches and machines.
|
||||
def deploy_clusters():
|
||||
"""Deploy hosts in clusters.
|
||||
|
||||
.. note::
|
||||
--fake_switches_vendor is the vendor name for all fake switches.
|
||||
the default value is 'huawei'
|
||||
--fake_switches_file is the filename which stores all fake switches
|
||||
and fake machines.
|
||||
each line in fake_switches_files presents one machine.
|
||||
the format of each line <switch_ip>,<switch_port>,<vlan>,<mac>.
|
||||
The hosts are defined in --clusters.
|
||||
The clusters flag is as clusterid:hostname1,hostname2,...;...
|
||||
"""
|
||||
# TODO(xiaodong): Move the main code to config manager.
|
||||
switch_ips = []
|
||||
switch_machines = {}
|
||||
vendor = flags.OPTIONS.fake_switches_vendor
|
||||
credential = {
|
||||
'version' : 'v2c',
|
||||
'community' : 'public',
|
||||
}
|
||||
if not _get_fake_switch_machines(switch_ips, switch_machines):
|
||||
cluster_hosts = util.get_clusters_from_str(flags.OPTIONS.clusters)
|
||||
if flags.OPTIONS.async:
|
||||
celery.send_task('compass.tasks.deploy', (cluster_hosts,))
|
||||
else:
|
||||
deploy.deploy(cluster_hosts)
|
||||
|
||||
|
||||
@app_manager.command
|
||||
def set_switch_machines():
|
||||
"""Set switches and machines.
|
||||
|
||||
.. note::
|
||||
--switch_machines_file is the filename which stores all switches
|
||||
and machines information.
|
||||
each line in fake_switches_files presents one machine.
|
||||
the format of each line machine,<switch_ip>,<switch_port>,<vlan>,<mac>
|
||||
or switch,<switch_ip>,<switch_vendor>,<switch_version>,
|
||||
<switch_community>,<switch_state>
|
||||
"""
|
||||
if not flags.OPTIONS.switch_machines_file:
|
||||
print 'flag --switch_machines_file is missing'
|
||||
return
|
||||
|
||||
with database.session() as session:
|
||||
session.query(Switch).delete(synchronize_session='fetch')
|
||||
session.query(Machine).delete(synchronize_session='fetch')
|
||||
for switch_ip in switch_ips:
|
||||
logging.info('add switch %s', switch_ip)
|
||||
switch = Switch(ip=switch_ip, vendor_info=vendor,
|
||||
credential=credential,
|
||||
state='under_monitoring')
|
||||
logging.debug('add switch %s', switch_ip)
|
||||
session.add(switch)
|
||||
|
||||
machines = switch_machines[switch_ip]
|
||||
for item in machines:
|
||||
logging.debug('add machine %s', item)
|
||||
machine = Machine(**item)
|
||||
machine.switch = switch
|
||||
|
||||
session.add(machine)
|
||||
|
||||
|
||||
def _get_config_properties():
|
||||
"""Helper function to get config properties."""
|
||||
if not flags.OPTIONS.search_config_properties:
|
||||
logging.info('the flag --search_config_properties is not specified.')
|
||||
return {}
|
||||
|
||||
search_config_properties = flags.OPTIONS.search_config_properties
|
||||
config_properties = {}
|
||||
for config_property in search_config_properties.split(';'):
|
||||
if not config_property:
|
||||
continue
|
||||
|
||||
if '=' not in config_property:
|
||||
logging.debug('ignore config property %s '
|
||||
'since there is no = in it.', config_property)
|
||||
continue
|
||||
|
||||
property_name, property_value = config_property.split('=', 1)
|
||||
config_properties[property_name] = property_value
|
||||
|
||||
logging.debug('get search config properties: %s', config_properties)
|
||||
return config_properties
|
||||
|
||||
|
||||
def _get_print_properties():
|
||||
"""Helper function to get what properties to print."""
|
||||
if not flags.OPTIONS.print_config_properties:
|
||||
logging.info('the flag --print_config_properties is not specified.')
|
||||
return []
|
||||
|
||||
print_config_properties = flags.OPTIONS.print_config_properties
|
||||
config_properties = []
|
||||
for config_property in print_config_properties.split(';'):
|
||||
if not config_property:
|
||||
continue
|
||||
|
||||
config_properties.append(config_property)
|
||||
|
||||
logging.debug('get print config properties: %s', config_properties)
|
||||
return config_properties
|
||||
|
||||
|
||||
|
||||
def _match_config_properties(config, config_properties):
|
||||
"""Helper function to check if config properties are match."""
|
||||
# TODO(xiaodong): Move the code to config manager.
|
||||
ref = config_reference.ConfigReference(config)
|
||||
for property_name, property_value in config_properties.items():
|
||||
config_value = ref.get(property_name)
|
||||
if config_value is None:
|
||||
return False
|
||||
|
||||
if isinstance(config_value, list):
|
||||
found = False
|
||||
for config_value_item in config_value:
|
||||
if str(config_value_item) == str(property_value):
|
||||
found = True
|
||||
|
||||
if not found:
|
||||
return False
|
||||
|
||||
else:
|
||||
if not str(config_value) == str(property_value):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _print_config_properties(config, config_properties):
|
||||
"""Helper function to print config properties."""
|
||||
ref = config_reference.ConfigReference(config)
|
||||
print_properties = []
|
||||
for property_name in config_properties:
|
||||
config_value = ref.get(property_name)
|
||||
if config_value is None:
|
||||
logging.error('did not found %s in %s',
|
||||
property_name, config)
|
||||
continue
|
||||
|
||||
print_properties.append('%s=%s' % (property_name, config_value))
|
||||
|
||||
print ';'.join(print_properties)
|
||||
switches, switch_machines = util.get_switch_machines_from_file(
|
||||
flags.OPTIONS.switch_machines_file)
|
||||
with database.session():
|
||||
manager = config_manager.ConfigManager()
|
||||
manager.update_switch_and_machines(switches, switch_machines)
|
||||
|
||||
|
||||
@app_manager.command
|
||||
def search_hosts():
|
||||
"""Search hosts by properties.
|
||||
def search_cluster_hosts():
|
||||
"""Search cluster hosts by properties.
|
||||
|
||||
.. note::
|
||||
--search_config_properties defines what properties are used to search.
|
||||
the format of search_config_properties is as
|
||||
--search_cluster_properties defines what properties are used to search.
|
||||
the format of search_cluster_properties is as
|
||||
<property_name>=<property_value>;... If no search properties are set,
|
||||
It will returns properties of all hosts.
|
||||
--print_config_properties defines what properties to print.
|
||||
the format of print_config_properties is as
|
||||
--print_cluster_properties defines what properties to print.
|
||||
the format of print_cluster_properties is as
|
||||
<property_name>;...
|
||||
"""
|
||||
config_properties = _get_config_properties()
|
||||
print_properties = _get_print_properties()
|
||||
with database.session() as session:
|
||||
hosts = session.query(ClusterHost).all()
|
||||
for host in hosts:
|
||||
if _match_config_properties(host.config, config_properties):
|
||||
_print_config_properties(host.config, print_properties)
|
||||
|
||||
|
||||
@app_manager.command
|
||||
def search_clusters():
|
||||
"""Search clusters by properties.
|
||||
|
||||
.. note::
|
||||
--search_config_properties defines what properties are used to search.
|
||||
the format of search_config_properties is as
|
||||
--search_host_properties defines what properties are used to search.
|
||||
the format of search_host_properties is as
|
||||
<property_name>=<property_value>;... If no search properties are set,
|
||||
It will returns properties of all hosts.
|
||||
--print_config_properties defines what properties to print.
|
||||
the format of print_config_properties is as
|
||||
--print_host_properties defines what properties to print.
|
||||
the format of print_host_properties is as
|
||||
<property_name>;...
|
||||
|
||||
"""
|
||||
config_properties = _get_config_properties()
|
||||
print_properties = _get_print_properties()
|
||||
with database.session() as session:
|
||||
clusters = session.query(Cluster).all()
|
||||
for cluster in clusters:
|
||||
if _match_config_properties(cluster.config, config_properties):
|
||||
_print_config_properties(cluster.config, print_properties)
|
||||
cluster_properties = util.get_properties_from_str(
|
||||
flags.OPTIONS.search_cluster_properties)
|
||||
cluster_properties_name = util.get_properties_name_from_str(
|
||||
flags.OPTIONS.print_cluster_properties)
|
||||
host_properties = util.get_properties_from_str(
|
||||
flags.OPTIONS.search_host_properties)
|
||||
host_properties_name = util.get_properties_name_from_str(
|
||||
flags.OPTIONS.print_host_properties)
|
||||
cluster_hosts = util.get_clusters_from_str(flags.OPTIONS.clusters)
|
||||
cluster_properties, cluster_host_properties = search.search(
|
||||
cluster_hosts, cluster_properties,
|
||||
cluster_properties_name, host_properties,
|
||||
host_properties_name)
|
||||
print 'clusters properties:'
|
||||
util.print_properties(cluster_properties)
|
||||
for clusterid, host_properties in cluster_host_properties.items():
|
||||
print 'hosts properties under cluster %s' % clusterid
|
||||
util.print_properties(host_properties)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -1,113 +1,76 @@
|
||||
#!/usr/bin/python
|
||||
"""main script to poll machines which is connected to the switches."""
|
||||
import daemon
|
||||
import functools
|
||||
import lockfile
|
||||
import logging
|
||||
import sys
|
||||
import signal
|
||||
import time
|
||||
|
||||
from compass.actions import poll_switch
|
||||
from multiprocessing import Pool
|
||||
|
||||
from compass.actions import poll_switch
|
||||
from compass.actions import util
|
||||
from compass.db import database
|
||||
from compass.db.model import Switch
|
||||
from compass.tasks.client import celery
|
||||
from compass.utils import daemonize
|
||||
from compass.utils import flags
|
||||
from compass.utils import logsetting
|
||||
from compass.utils import setting_wrapper as setting
|
||||
|
||||
|
||||
flags.add('switchids',
|
||||
help='comma seperated switch ids',
|
||||
flags.add('switch_ips',
|
||||
help='comma seperated switch ips',
|
||||
default='')
|
||||
flags.add_bool('async',
|
||||
help='ryn in async mode',
|
||||
default=True)
|
||||
flags.add_bool('once',
|
||||
help='run once or forever',
|
||||
default=False)
|
||||
flags.add('thread_pool_size',
|
||||
help='thread pool size when run in noasync mode',
|
||||
default='4')
|
||||
flags.add('run_interval',
|
||||
help='run interval in seconds',
|
||||
default=setting.POLLSWITCH_INTERVAL)
|
||||
flags.add_bool('daemonize',
|
||||
help='run as daemon',
|
||||
default=False)
|
||||
|
||||
|
||||
BUSY = False
|
||||
KILLED = False
|
||||
def pollswitches(switch_ips):
|
||||
"""poll switch"""
|
||||
poll_switch_ips = []
|
||||
with database.session():
|
||||
poll_switch_ips = util.update_switch_ips(switch_ips)
|
||||
|
||||
def handle_term(signum, frame):
|
||||
global BUSY
|
||||
global KILLED
|
||||
logging.info('Caught signal %s', signum)
|
||||
KILLED = True
|
||||
if not BUSY:
|
||||
sys.exit(0)
|
||||
if flags.OPTIONS.async:
|
||||
for poll_switch_ip in poll_switch_ips:
|
||||
celery.send_task(
|
||||
'compass.tasks.pollswitch',
|
||||
(poll_switch_ip,)
|
||||
)
|
||||
|
||||
else:
|
||||
try:
|
||||
pool = Pool(processes=int(flags.OPTIONS.thread_pool_size))
|
||||
for poll_switch_ip in poll_switch_ips:
|
||||
pool.apply_async(
|
||||
poll_switch.poll_switch,
|
||||
(poll_switch_ip,)
|
||||
)
|
||||
|
||||
def main(argv):
|
||||
global BUSY
|
||||
global KILLED
|
||||
switchids = [int(switchid) for switchid in flags.OPTIONS.switchids.split(',') if switchid]
|
||||
signal.signal(signal.SIGTERM, handle_term)
|
||||
signal.signal(signal.SIGHUP, handle_term)
|
||||
pool.close()
|
||||
pool.join()
|
||||
except Exception as error:
|
||||
logging.error('failed to poll switches %s',
|
||||
poll_switch_ips)
|
||||
logging.exception(error)
|
||||
|
||||
while True:
|
||||
BUSY = True
|
||||
with database.session() as session:
|
||||
switch_ips = {}
|
||||
switches = session.query(Switch).all()
|
||||
for switch in switches:
|
||||
switch_ips[switch.id] = switch.ip
|
||||
if not switchids:
|
||||
poll_switchids = [switch.id for switch in switches]
|
||||
else:
|
||||
poll_switchids = switchids
|
||||
logging.info('poll switches to get machines mac: %s',
|
||||
poll_switchids)
|
||||
for switchid in poll_switchids:
|
||||
if switchid not in switch_ips:
|
||||
logging.error('there is no switch ip for switch %s',
|
||||
switchid)
|
||||
continue
|
||||
if flags.OPTIONS.async:
|
||||
celery.send_task('compass.tasks.pollswitch',
|
||||
(switch_ips[switchid],))
|
||||
else:
|
||||
try:
|
||||
poll_switch.poll_switch(switch_ips[switchid])
|
||||
except Exception as error:
|
||||
logging.error('failed to poll switch %s',
|
||||
switch_ips[switchid])
|
||||
|
||||
BUSY = False
|
||||
if KILLED:
|
||||
logging.info('exit poll switch loop')
|
||||
break
|
||||
|
||||
if flags.OPTIONS.once:
|
||||
logging.info('finish poll switch')
|
||||
break
|
||||
|
||||
if flags.OPTIONS.run_interval > 0:
|
||||
logging.info('will rerun poll switch after %s seconds',
|
||||
flags.OPTIONS.run_interval)
|
||||
time.sleep(flags.OPTIONS.run_interval)
|
||||
else:
|
||||
logging.info('rerun poll switch imediately')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
flags.init()
|
||||
logsetting.init()
|
||||
logging.info('run poll_switch: %s', sys.argv)
|
||||
if flags.OPTIONS.daemonize:
|
||||
with daemon.DaemonContext(
|
||||
pidfile=lockfile.FileLock('/var/run/poll_switch.pid'),
|
||||
stderr=open('/tmp/poll_switch_err.log', 'w+'),
|
||||
stdout=open('/tmp/poll_switch_out.log', 'w+')
|
||||
):
|
||||
logging.info('run poll switch as daemon')
|
||||
main(sys.argv)
|
||||
else:
|
||||
main(sys.argv)
|
||||
logging.info('run poll_switch')
|
||||
daemonize.daemonize(
|
||||
functools.partial(
|
||||
pollswitches,
|
||||
[switch_ip
|
||||
for switch_ip in flags.OPTIONS.switch_ips.split(',')
|
||||
if switch_ip]),
|
||||
flags.OPTIONS.run_interval,
|
||||
pidfile=lockfile.FileLock('/var/run/poll_switch.pid'),
|
||||
stderr=open('/tmp/poll_switch_err.log', 'w+'),
|
||||
stdout=open('/tmp/poll_switch_out.log', 'w+'))
|
||||
|
@ -1,109 +1,53 @@
|
||||
#!/usr/bin/python
|
||||
"""main script to run as service to update hosts installing progress."""
|
||||
import functools
|
||||
import lockfile
|
||||
import logging
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import daemon
|
||||
|
||||
from compass.actions import progress_update
|
||||
from compass.db import database
|
||||
from compass.db.model import Cluster
|
||||
from compass.actions import update_progress
|
||||
from compass.tasks.client import celery
|
||||
from compass.utils import daemonize
|
||||
from compass.utils import flags
|
||||
from compass.utils import logsetting
|
||||
from compass.utils import setting_wrapper as setting
|
||||
from compass.utils import util
|
||||
|
||||
|
||||
flags.add('clusterids',
|
||||
help='comma seperated cluster ids',
|
||||
flags.add('clusters',
|
||||
help=(
|
||||
'clusters to clean, the format is as '
|
||||
'clusterid:hostname1,hostname2,...;...'),
|
||||
default='')
|
||||
flags.add_bool('async',
|
||||
help='ryn in async mode',
|
||||
help='run in async mode',
|
||||
default=True)
|
||||
flags.add_bool('once',
|
||||
help='run once or forever',
|
||||
default=False)
|
||||
flags.add('run_interval',
|
||||
help='run interval in seconds',
|
||||
default=setting.PROGRESS_UPDATE_INTERVAL)
|
||||
flags.add_bool('daemonize',
|
||||
help='run as daemon',
|
||||
default=False)
|
||||
|
||||
|
||||
BUSY = False
|
||||
KILLED = False
|
||||
|
||||
def handle_term(signum, frame):
|
||||
global BUSY
|
||||
global KILLED
|
||||
logging.info('Caught signal %s', signum)
|
||||
KILLED = True
|
||||
if not BUSY:
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def main(argv):
|
||||
def progress_update(cluster_hosts):
|
||||
"""entry function."""
|
||||
global BUSY
|
||||
global KILLED
|
||||
clusterids = [
|
||||
int(clusterid) for clusterid in flags.OPTIONS.clusterids.split(',')
|
||||
if clusterid
|
||||
]
|
||||
signal.signal(signal.SIGINT, handle_term)
|
||||
|
||||
while True:
|
||||
BUSY = True
|
||||
with database.session() as session:
|
||||
if not clusterids:
|
||||
clusters = session.query(Cluster).all()
|
||||
update_clusterids = [cluster.id for cluster in clusters]
|
||||
else:
|
||||
update_clusterids = clusterids
|
||||
|
||||
logging.info('update progress for clusters: %s', update_clusterids)
|
||||
for clusterid in update_clusterids:
|
||||
if flags.OPTIONS.async:
|
||||
celery.send_task('compass.tasks.progress_update', (clusterid,))
|
||||
else:
|
||||
try:
|
||||
progress_update.update_progress(clusterid)
|
||||
except Exception as error:
|
||||
logging.error('failed to update progress for cluster %s',
|
||||
clusterid)
|
||||
logging.exception(error)
|
||||
pass
|
||||
|
||||
BUSY = False
|
||||
if KILLED:
|
||||
logging.info('exit progress update loop')
|
||||
break
|
||||
|
||||
if flags.OPTIONS.once:
|
||||
logging.info('trigger installer finsished')
|
||||
break
|
||||
|
||||
if flags.OPTIONS.run_interval > 0:
|
||||
logging.info('will rerun the trigger install after %s',
|
||||
flags.OPTIONS.run_interval)
|
||||
time.sleep(flags.OPTIONS.run_interval)
|
||||
else:
|
||||
logging.info('rerun the trigger installer immediately')
|
||||
if flags.OPTIONS.async:
|
||||
celery.send_task('compass.tasks.update_progress', (cluster_hosts,))
|
||||
else:
|
||||
try:
|
||||
update_progress.update_progress(cluster_hosts)
|
||||
except Exception as error:
|
||||
logging.error('failed to update progress for cluster_hosts: %s',
|
||||
cluster_hosts)
|
||||
logging.exception(error)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
flags.init()
|
||||
logsetting.init()
|
||||
logging.info('run progress update: %s', sys.argv)
|
||||
if flags.OPTIONS.daemonize:
|
||||
with daemon.DaemonContext(
|
||||
pidfile=lockfile.FileLock('/var/run/progress_update.pid'),
|
||||
stderr=open('/tmp/progress_update_err.log', 'w+'),
|
||||
stdout=open('/tmp/progress_update_out.log', 'w+')
|
||||
):
|
||||
logging.info('run progress update as daemon')
|
||||
main(sys.argv)
|
||||
else:
|
||||
main(sys.argv)
|
||||
logging.info('run progress update')
|
||||
daemonize.daemonize(
|
||||
functools.partial(
|
||||
progress_update,
|
||||
util.get_clusters_from_str(flags.OPTIONS.clusters)),
|
||||
flags.OPTIONS.run_interval,
|
||||
pidfile=lockfile.FileLock('/var/run/progress_update.pid'),
|
||||
stderr=open('/tmp/progress_update_err.log', 'w+'),
|
||||
stdout=open('/tmp/progress_update_out.log', 'w+'))
|
||||
|
23
bin/runserver.py
Normal file → Executable file
23
bin/runserver.py
Normal file → Executable file
@ -1,3 +1,22 @@
|
||||
#!/usr/bin/python
|
||||
from baseplate import app
|
||||
app.run(host = '0.0.0.0',debug = True)
|
||||
"""main script to start an instance of compass server ."""
|
||||
import logging
|
||||
|
||||
from compass.api import app
|
||||
from compass.utils import flags
|
||||
from compass.utils import logsetting
|
||||
|
||||
|
||||
flags.add('server_host',
|
||||
help='server host address',
|
||||
default='0.0.0.0')
|
||||
flags.add_bool('debug',
|
||||
help='run in debug mode',
|
||||
default=True)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
flags.init()
|
||||
logsetting.init()
|
||||
logging.info('run server')
|
||||
app.run(host=flags.OPTIONS.server_host, debug=flags.OPTIONS.debug)
|
||||
|
@ -1,51 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from compass.db import database
|
||||
from compass.db.model import Cluster
|
||||
from compass.tasks.client import celery
|
||||
from compass.utils import flags
|
||||
from compass.utils import logsetting
|
||||
from compass.actions import trigger_install
|
||||
|
||||
|
||||
flags.add('clusterids',
|
||||
help='comma seperated cluster ids',
|
||||
default='')
|
||||
flags.add_bool('async',
|
||||
help='ryn in async mode')
|
||||
|
||||
|
||||
def main(argv):
|
||||
flags.init()
|
||||
logsetting.init()
|
||||
clusterids = [
|
||||
int(clusterid) for clusterid in flags.OPTIONS.clusterids.split(',')
|
||||
if clusterid
|
||||
]
|
||||
with database.session() as session:
|
||||
if not clusterids:
|
||||
clusters = session.query(Cluster).all()
|
||||
trigger_clusterids = [cluster.id for cluster in clusters]
|
||||
else:
|
||||
trigger_clusterids = clusterids
|
||||
|
||||
logging.info('trigger installer for clusters: %s',
|
||||
trigger_clusterids)
|
||||
for clusterid in trigger_clusterids:
|
||||
hosts = session.query(
|
||||
ClusterHost).filter_by(
|
||||
cluster_id=clsuterid).all()
|
||||
hostids = [host.id for host in hosts]
|
||||
if flags.OPTIONS.async:
|
||||
celery.send_task('compass.tasks.trigger_install',
|
||||
(clusterid, hostids))
|
||||
else:
|
||||
trigger_install.trigger_install(clusterid, hostids)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv)
|
||||
|
28
compass/actions/clean_deployment.py
Normal file
28
compass/actions/clean_deployment.py
Normal file
@ -0,0 +1,28 @@
|
||||
"""Module to clean deployment of a given cluster
|
||||
|
||||
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
|
||||
"""
|
||||
import logging
|
||||
|
||||
from compass.actions import util
|
||||
from compass.config_management.utils.config_manager import ConfigManager
|
||||
from compass.db import database
|
||||
|
||||
|
||||
def clean_deployment(cluster_hosts):
|
||||
"""Clean deployment of clusters.
|
||||
|
||||
:param cluster_hosts: clusters and hosts in each cluster to clean.
|
||||
:type cluster_hosts: dict of int to list of int
|
||||
|
||||
.. note::
|
||||
The function should be called out of database session.
|
||||
"""
|
||||
logging.debug('clean cluster_hosts: %s', cluster_hosts)
|
||||
with database.session():
|
||||
cluster_hosts, os_versions, target_systems = (
|
||||
util.update_cluster_hosts(cluster_hosts))
|
||||
manager = ConfigManager()
|
||||
manager.clean_cluster_and_hosts(
|
||||
cluster_hosts, os_versions, target_systems)
|
||||
manager.sync()
|
29
compass/actions/clean_installing_progress.py
Normal file
29
compass/actions/clean_installing_progress.py
Normal file
@ -0,0 +1,29 @@
|
||||
"""Module to clean installing progress of a given cluster
|
||||
|
||||
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
|
||||
"""
|
||||
import logging
|
||||
|
||||
from compass.actions import util
|
||||
from compass.config_management.utils.config_manager import ConfigManager
|
||||
from compass.db import database
|
||||
|
||||
|
||||
def clean_installing_progress(cluster_hosts):
|
||||
"""Clean installing progress of clusters.
|
||||
|
||||
:param cluster_hosts: clusters and hosts in each cluster to clean.
|
||||
:type cluster_hosts: dict of int to list of int
|
||||
|
||||
.. note::
|
||||
The function should be called out of database session.
|
||||
"""
|
||||
logging.debug('clean installing progress of cluster_hosts: %s',
|
||||
cluster_hosts)
|
||||
with database.session():
|
||||
cluster_hosts, os_versions, target_systems = (
|
||||
util.update_cluster_hosts(cluster_hosts))
|
||||
manager = ConfigManager()
|
||||
manager.clean_cluster_and_hosts_installing_progress(
|
||||
cluster_hosts, os_versions, target_systems)
|
||||
manager.sync()
|
@ -1,19 +1,20 @@
|
||||
"""Compass Command Line Interface"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
from subprocess import Popen
|
||||
|
||||
from compass.actions.health_check import check
|
||||
from compass.utils.util import pretty_print
|
||||
|
||||
ACTION_MAP = {"check": "apache celery dhcp dns hds misc os_installer "
|
||||
"package_installer squid tftp".split(" "),
|
||||
"refresh": "db sync".split(" "),
|
||||
}
|
||||
ACTION_MAP = {
|
||||
"check": "apache celery dhcp dns hds misc os_installer "
|
||||
"package_installer squid tftp".split(" "),
|
||||
"refresh": "db sync".split(" "),
|
||||
}
|
||||
|
||||
|
||||
class BootCLI:
|
||||
class BootCLI(object):
|
||||
"""CLI to do compass check."""
|
||||
|
||||
def __init__(self):
|
||||
return
|
||||
@ -33,7 +34,8 @@ class BootCLI:
|
||||
method = "self.run_" + action + "(module)"
|
||||
eval(method)
|
||||
|
||||
def get_action(self, args):
|
||||
@classmethod
|
||||
def get_action(cls, args):
|
||||
"""
|
||||
This method returns an action type.
|
||||
For 'compass check dhcp' command, it will return 'check'.
|
||||
@ -44,7 +46,8 @@ class BootCLI:
|
||||
return args[1]
|
||||
return None
|
||||
|
||||
def get_module(self, action, args):
|
||||
@classmethod
|
||||
def get_module(cls, action, args):
|
||||
"""
|
||||
This method returns a module.
|
||||
For 'compass check dhcp' command, it will return 'dhcp'.
|
||||
@ -66,19 +69,21 @@ class BootCLI:
|
||||
if module is None:
|
||||
pretty_print("Starting: Compass Health Check",
|
||||
"==============================")
|
||||
c = check.BootCheck()
|
||||
res = c.run()
|
||||
chk = check.BootCheck()
|
||||
res = chk.run()
|
||||
self.output_check_result(res)
|
||||
|
||||
else:
|
||||
pretty_print("Checking Module: %s" % module,
|
||||
"============================")
|
||||
c = check.BootCheck()
|
||||
method = "c.check_" + module + "()"
|
||||
chk = check.BootCheck()
|
||||
method = "chk.check_" + module + "()"
|
||||
res = eval(method)
|
||||
print "\n".join(msg for msg in res[1])
|
||||
|
||||
def output_check_result(self, result):
|
||||
@classmethod
|
||||
def output_check_result(cls, result):
|
||||
"""output check result."""
|
||||
if result == {}:
|
||||
return
|
||||
pretty_print("\n",
|
||||
@ -86,7 +91,6 @@ class BootCLI:
|
||||
"* Compass Health Check Report *",
|
||||
"===============================")
|
||||
successful = True
|
||||
num = 1
|
||||
for key in result.keys():
|
||||
if result[key][0] == 0:
|
||||
successful = False
|
||||
@ -101,7 +105,9 @@ class BootCLI:
|
||||
"deploying!"
|
||||
sys.exit(1)
|
||||
|
||||
def run_refresh(self, action=None):
|
||||
@classmethod
|
||||
def run_refresh(cls, action=None):
|
||||
"""Run refresh."""
|
||||
## TODO: replace refresh.sh with refresh.py
|
||||
if action is None:
|
||||
pretty_print("Refreshing Compass...",
|
||||
@ -117,7 +123,9 @@ class BootCLI:
|
||||
Popen(['/opt/compass/bin/manage_db.py sync_from_installers'],
|
||||
shell=True)
|
||||
|
||||
def print_help(self, module_help=""):
|
||||
@classmethod
|
||||
def print_help(cls, module_help=""):
|
||||
"""print help."""
|
||||
if module_help == "":
|
||||
pretty_print("usage\n=====",
|
||||
"compass <refresh|check>",
|
||||
|
28
compass/actions/deploy.py
Normal file
28
compass/actions/deploy.py
Normal file
@ -0,0 +1,28 @@
|
||||
"""Module to deploy a given cluster
|
||||
|
||||
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
|
||||
"""
|
||||
import logging
|
||||
|
||||
from compass.actions import util
|
||||
from compass.config_management.utils.config_manager import ConfigManager
|
||||
from compass.db import database
|
||||
|
||||
|
||||
def deploy(cluster_hosts):
|
||||
"""Deploy clusters.
|
||||
|
||||
:param cluster_hosts: clusters and hosts in each cluster to deploy.
|
||||
:type cluster_hosts: dict of int to list of int
|
||||
|
||||
.. note::
|
||||
The function should be called out of database session.
|
||||
"""
|
||||
logging.debug('deploy cluster_hosts: %s', cluster_hosts)
|
||||
with database.session():
|
||||
cluster_hosts, os_versions, target_systems = (
|
||||
util.update_cluster_hosts(cluster_hosts))
|
||||
manager = ConfigManager()
|
||||
manager.install_cluster_and_hosts(
|
||||
cluster_hosts, os_versions, target_systems)
|
||||
manager.sync()
|
@ -1,12 +1,10 @@
|
||||
"""Base class for Compass Health Check"""
|
||||
|
||||
import compass.utils.setting_wrapper as setting
|
||||
import utils as health_check_utils
|
||||
|
||||
from compass.utils.util import pretty_print
|
||||
from compass.actions.health_check import utils as health_check_utils
|
||||
from compass.utils import setting_wrapper as setting
|
||||
|
||||
|
||||
class BaseCheck:
|
||||
class BaseCheck(object):
|
||||
"""health check base class."""
|
||||
|
||||
def __init__(self):
|
||||
self.config = setting
|
||||
@ -15,8 +13,10 @@ class BaseCheck:
|
||||
self.dist, self.version, self.release = health_check_utils.get_dist()
|
||||
|
||||
def _set_status(self, code, message):
|
||||
"""set status"""
|
||||
self.code = code
|
||||
self.messages.append(message)
|
||||
|
||||
def get_status(self):
|
||||
"""get status"""
|
||||
return (self.code, self.messages)
|
||||
|
@ -1,21 +1,25 @@
|
||||
"""Main Entry Point of Compass Health Check"""
|
||||
|
||||
import check_apache as apache
|
||||
import check_celery as celery
|
||||
import check_dhcp as dhcp
|
||||
import check_dns as dns
|
||||
import check_hds as hds
|
||||
import check_os_installer as os_installer
|
||||
import check_package_installer as package_installer
|
||||
import check_squid as squid
|
||||
import check_tftp as tftp
|
||||
import check_misc as misc
|
||||
import base
|
||||
from compass.actions.health_check import check_apache as apache
|
||||
from compass.actions.health_check import check_celery as celery
|
||||
from compass.actions.health_check import check_dhcp as dhcp
|
||||
from compass.actions.health_check import check_dns as dns
|
||||
from compass.actions.health_check import check_hds as hds
|
||||
from compass.actions.health_check import (
|
||||
check_os_installer as os_installer)
|
||||
from compass.actions.health_check import (
|
||||
check_package_installer as package_installer)
|
||||
from compass.actions.health_check import check_squid as squid
|
||||
from compass.actions.health_check import check_tftp as tftp
|
||||
from compass.actions.health_check import check_misc as misc
|
||||
from compass.actions.health_check import base
|
||||
|
||||
|
||||
class BootCheck(base.BaseCheck):
|
||||
"""health check for all components"""
|
||||
|
||||
def run(self):
|
||||
"""do health check"""
|
||||
status = {}
|
||||
status['apache'] = self.check_apache()
|
||||
status['celery'] = self.check_celery()
|
||||
@ -31,41 +35,51 @@ class BootCheck(base.BaseCheck):
|
||||
return status
|
||||
|
||||
def check_apache(self):
|
||||
"""do apache health check"""
|
||||
checker = apache.ApacheCheck()
|
||||
return checker.run()
|
||||
|
||||
def check_celery(self):
|
||||
"""do celery health check"""
|
||||
checker = celery.CeleryCheck()
|
||||
return checker.run()
|
||||
|
||||
def check_dhcp(self):
|
||||
"""do dhcp health check"""
|
||||
checker = dhcp.DhcpCheck()
|
||||
return checker.run()
|
||||
|
||||
def check_dns(self):
|
||||
"""do dns health check"""
|
||||
checker = dns.DnsCheck()
|
||||
return checker.run()
|
||||
|
||||
def check_hds(self):
|
||||
"""do hds health check"""
|
||||
checker = hds.HdsCheck()
|
||||
return checker.run()
|
||||
|
||||
def check_os_installer(self):
|
||||
"""do os installer health check"""
|
||||
checker = os_installer.OsInstallerCheck()
|
||||
return checker.run()
|
||||
|
||||
def check_package_installer(self):
|
||||
"""do package installer health check"""
|
||||
checker = package_installer.PackageInstallerCheck()
|
||||
return checker.run()
|
||||
|
||||
def check_squid(self):
|
||||
"""do squid health check"""
|
||||
checker = squid.SquidCheck()
|
||||
return checker.run()
|
||||
|
||||
def check_tftp(self):
|
||||
"""do tftp health check"""
|
||||
checker = tftp.TftpCheck()
|
||||
return checker.run()
|
||||
|
||||
def check_misc(self):
|
||||
"""do misc health check"""
|
||||
checker = misc.MiscCheck()
|
||||
return checker.run()
|
||||
|
@ -1,22 +1,18 @@
|
||||
"""Health Check module for Apache service"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import commands
|
||||
import socket
|
||||
import urllib2
|
||||
|
||||
from socket import *
|
||||
|
||||
import utils as health_check_utils
|
||||
import base
|
||||
import logging
|
||||
from compass.actions.health_check import base
|
||||
from compass.actions.health_check import utils as health_check_utils
|
||||
|
||||
|
||||
class ApacheCheck(base.BaseCheck):
|
||||
|
||||
"""apache server health check class."""
|
||||
NAME = "Apache Check"
|
||||
|
||||
def run(self):
|
||||
"""do the healthcheck"""
|
||||
if self.dist in ("centos", "redhat", "fedora", "scientific linux"):
|
||||
apache_service = 'httpd'
|
||||
else:
|
||||
@ -32,8 +28,7 @@ class ApacheCheck(base.BaseCheck):
|
||||
return (self.code, self.messages)
|
||||
|
||||
def check_apache_conf(self, apache_service):
|
||||
"""
|
||||
Validates if Apache settings.
|
||||
"""Validates if Apache settings.
|
||||
|
||||
:param apache_service : service type of apache, os dependent.
|
||||
e.g. httpd or apache2
|
||||
@ -63,7 +58,7 @@ class ApacheCheck(base.BaseCheck):
|
||||
apache_service)
|
||||
if not serv_err_msg == "":
|
||||
self._set_status(0, serv_err_msg)
|
||||
if 'http' != getservbyport(80):
|
||||
if 'http' != socket.getservbyport(80):
|
||||
self._set_status(
|
||||
0,
|
||||
"[%s]Error: Apache is not listening on port 80."
|
||||
|
@ -1,19 +1,20 @@
|
||||
"""Health Check module for Celery"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import commands
|
||||
|
||||
import base
|
||||
import utils as health_check_utils
|
||||
from celery.task.control import inspect
|
||||
|
||||
from compass.actions.health_check import base
|
||||
from compass.actions.health_check import utils as health_check_utils
|
||||
|
||||
|
||||
class CeleryCheck(base.BaseCheck):
|
||||
|
||||
"""celery health check class."""
|
||||
NAME = "Celery Check"
|
||||
|
||||
def run(self):
|
||||
"""do health check"""
|
||||
self.check_compass_celery_setting()
|
||||
print "[Done]"
|
||||
self.check_celery_backend()
|
||||
@ -28,10 +29,11 @@ class CeleryCheck(base.BaseCheck):
|
||||
"""Validates Celery settings"""
|
||||
|
||||
print "Checking Celery setting......",
|
||||
SETTING_MAP = {'logfile': 'CELERY_LOGFILE',
|
||||
'configdir': 'CELERYCONFIG_DIR',
|
||||
'configfile': 'CELERYCONFIG_FILE',
|
||||
}
|
||||
setting_map = {
|
||||
'logfile': 'CELERY_LOGFILE',
|
||||
'configdir': 'CELERYCONFIG_DIR',
|
||||
'configfile': 'CELERYCONFIG_FILE',
|
||||
}
|
||||
|
||||
res = health_check_utils.validate_setting('Celery',
|
||||
self.config,
|
||||
@ -61,9 +63,10 @@ class CeleryCheck(base.BaseCheck):
|
||||
self._set_status(0, res)
|
||||
|
||||
unset = []
|
||||
for item in ['logfile', 'configdir', 'configfile']:
|
||||
if eval(item) == "":
|
||||
unset.append(SETTING_MAP[item])
|
||||
for item in [logfile, configdir, configfile]:
|
||||
if item == "":
|
||||
unset.append(setting_map[item])
|
||||
|
||||
if len(unset) != 0:
|
||||
self._set_status(0,
|
||||
"[%s]Error: Unset celery settings: %s"
|
||||
@ -94,13 +97,16 @@ class CeleryCheck(base.BaseCheck):
|
||||
0,
|
||||
"[%s]Error: No running Celery workers were found."
|
||||
% self.NAME)
|
||||
except IOError as e:
|
||||
except IOError as error:
|
||||
self._set_status(
|
||||
0,
|
||||
"[%s]Error: Failed to connect to the backend: %s"
|
||||
% (self.NAME, str(e)))
|
||||
% (self.NAME, str(error)))
|
||||
from errno import errorcode
|
||||
if len(e.args) > 0 and errorcode.get(e.args[0]) == 'ECONNREFUSED':
|
||||
if (
|
||||
len(error.args) > 0 and
|
||||
errorcode.get(error.args[0]) == 'ECONNREFUSED'
|
||||
):
|
||||
self.messages.append(
|
||||
"[%s]Error: RabbitMQ server isn't running"
|
||||
% self.NAME)
|
||||
|
@ -4,18 +4,18 @@ import os
|
||||
import re
|
||||
import commands
|
||||
import xmlrpclib
|
||||
import sys
|
||||
import socket
|
||||
|
||||
from socket import *
|
||||
|
||||
import base
|
||||
from compass.actions.health_check import base
|
||||
|
||||
|
||||
class DhcpCheck(base.BaseCheck):
|
||||
"""dhcp health check class."""
|
||||
|
||||
NAME = "DHCP Check"
|
||||
|
||||
def run(self):
|
||||
"""do health check"""
|
||||
installer = self.config.OS_INSTALLER
|
||||
method_name = "self.check_" + installer + "_dhcp()"
|
||||
return eval(method_name)
|
||||
@ -24,10 +24,10 @@ class DhcpCheck(base.BaseCheck):
|
||||
"""Checks if Cobbler has taken over DHCP service"""
|
||||
|
||||
try:
|
||||
self.remote = xmlrpclib.Server(
|
||||
remote = xmlrpclib.Server(
|
||||
self.config.COBBLER_INSTALLER_URL,
|
||||
allow_none=True)
|
||||
self.token = self.remote.login(
|
||||
remote.login(
|
||||
*self.config.COBBLER_INSTALLER_TOKEN)
|
||||
except:
|
||||
self._set_status(
|
||||
@ -36,11 +36,11 @@ class DhcpCheck(base.BaseCheck):
|
||||
"the tokens provided in the config file" % self.NAME)
|
||||
return (self.code, self.messages)
|
||||
|
||||
cobbler_settings = self.remote.get_settings()
|
||||
cobbler_settings = remote.get_settings()
|
||||
if cobbler_settings['manage_dhcp'] == 0:
|
||||
self.messages.append(
|
||||
"[%s]Info: DHCP service is not managed by Compass"
|
||||
% self.NAME)
|
||||
"[%s]Info: DHCP service is "
|
||||
"not managed by Compass" % self.NAME)
|
||||
return (self.code, self.messages)
|
||||
self.check_cobbler_dhcp_template()
|
||||
print "[Done]"
|
||||
@ -57,68 +57,76 @@ class DhcpCheck(base.BaseCheck):
|
||||
|
||||
print "Checking DHCP template......",
|
||||
if os.path.exists("/etc/cobbler/dhcp.template"):
|
||||
VAR_MAP = {"match_next_server": False,
|
||||
"match_subnet": False,
|
||||
"match_filename": False,
|
||||
"match_range": False,
|
||||
}
|
||||
var_map = {
|
||||
"match_next_server": False,
|
||||
"match_subnet": False,
|
||||
"match_filename": False,
|
||||
"match_range": False,
|
||||
}
|
||||
|
||||
ip_regex = re.compile('^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')
|
||||
ip_regex = re.compile(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')
|
||||
|
||||
f = open("/etc/cobbler/dhcp.template")
|
||||
for line in f.readlines():
|
||||
dhcp_template = open("/etc/cobbler/dhcp.template")
|
||||
for line in dhcp_template.readlines():
|
||||
if line.find("next_server") != -1:
|
||||
elmlist = line.split(" ")
|
||||
for elm in elmlist:
|
||||
if ";" in elm:
|
||||
elm = elm[:-2]
|
||||
|
||||
if "$next_server" in elm or ip_regex.match(elm):
|
||||
VAR_MAP["match_next_server"] = True
|
||||
var_map["match_next_server"] = True
|
||||
|
||||
elif line.find("subnet") != -1 and line.find("{") != -1:
|
||||
elmlist = line.split(" ")
|
||||
for elm in elmlist:
|
||||
if ip_regex.match(elm):
|
||||
if elm[-1] == "0" and "255" not in elm:
|
||||
VAR_MAP["match_subnet"] = True
|
||||
var_map["match_subnet"] = True
|
||||
elif elm[-1] != "0":
|
||||
self.messages.append(
|
||||
"[%s]Error: Subnet should be set "
|
||||
"in the form of 192.168.0.0 in"
|
||||
"/etc/cobbler/dhcp.template"
|
||||
% self.NAME)
|
||||
"/etc/cobbler/dhcp.template" % self.NAME)
|
||||
|
||||
elif line.find("filename") != -1:
|
||||
VAR_MAP["match_filename"] = True
|
||||
var_map["match_filename"] = True
|
||||
elif line.find("range dynamic-bootp") != -1:
|
||||
elmlist = line.split(" ")
|
||||
ip_count = 0
|
||||
for elm in elmlist:
|
||||
if ";" in elm and "\n" in elm:
|
||||
elm = elm[:-2]
|
||||
|
||||
if ip_regex.match(elm):
|
||||
ip_count += 1
|
||||
|
||||
if ip_count != 2:
|
||||
self.messages.append(
|
||||
"[%s]Error: DHCP range should be set "
|
||||
"between two IP addresses in "
|
||||
"/etc/cobbler/dhcp.template" % self.NAME)
|
||||
else:
|
||||
VAR_MAP["match_range"] = True
|
||||
var_map["match_range"] = True
|
||||
|
||||
dhcp_template.close()
|
||||
fails = []
|
||||
for var in var_map.keys():
|
||||
if var_map[var] is False:
|
||||
fails.append(var)
|
||||
|
||||
if len(fails) != 0:
|
||||
self._set_status(
|
||||
0,
|
||||
"[%s]Info: DHCP template file "
|
||||
"failed components: %s" % (
|
||||
self.NAME, ' '.join(failed for failed in fails)))
|
||||
|
||||
f.close()
|
||||
failed = []
|
||||
for var in VAR_MAP.keys():
|
||||
if VAR_MAP[var] is False:
|
||||
failed.append(var)
|
||||
if len(failed) != 0:
|
||||
self._set_status(0,
|
||||
"[%s]Info: DHCP template file "
|
||||
"failed components: %s"
|
||||
% (self.NAME, ' '.join(f for f in failed)))
|
||||
else:
|
||||
self._set_status(0,
|
||||
"[%s]Error: DHCP template file doesn't exist, "
|
||||
"health check failed." % self.NAME)
|
||||
self._set_status(
|
||||
0,
|
||||
"[%s]Error: DHCP template file doesn't exist, "
|
||||
"health check failed." % self.NAME)
|
||||
return True
|
||||
|
||||
def check_dhcp_service(self):
|
||||
@ -128,11 +136,12 @@ class DhcpCheck(base.BaseCheck):
|
||||
if not 'dhcp' in commands.getoutput('ps -ef'):
|
||||
self._set_status(
|
||||
0,
|
||||
"[%s]Error: dhcp service does not seem to be running"
|
||||
% self.NAME)
|
||||
if getservbyport(67) != 'bootps':
|
||||
"[%s]Error: dhcp service does not "
|
||||
"seem to be running" % self.NAME)
|
||||
|
||||
if socket.getservbyport(67) != 'bootps':
|
||||
self._set_status(
|
||||
0,
|
||||
"[%s]Error: bootps is not listening on port 67"
|
||||
% self.NAME)
|
||||
"[%s]Error: bootps is not listening "
|
||||
"on port 67" % self.NAME)
|
||||
return True
|
||||
|
@ -1,20 +1,19 @@
|
||||
"""Health Check module for DNS service"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import xmlrpclib
|
||||
import commands
|
||||
import os
|
||||
import socket
|
||||
import xmlrpclib
|
||||
|
||||
from socket import *
|
||||
|
||||
import base
|
||||
from compass.actions.health_check import base
|
||||
|
||||
|
||||
class DnsCheck(base.BaseCheck):
|
||||
|
||||
"""dns health check class."""
|
||||
NAME = "DNS Check"
|
||||
|
||||
def run(self):
|
||||
"""do health check"""
|
||||
installer = self.config.OS_INSTALLER
|
||||
method_name = "self.check_" + installer + "_dns()"
|
||||
return eval(method_name)
|
||||
@ -23,10 +22,10 @@ class DnsCheck(base.BaseCheck):
|
||||
"""Checks if Cobbler has taken over DNS service"""
|
||||
|
||||
try:
|
||||
self.remote = xmlrpclib.Server(
|
||||
remote = xmlrpclib.Server(
|
||||
self.config.COBBLER_INSTALLER_URL,
|
||||
allow_none=True)
|
||||
self.token = self.remote.login(
|
||||
remote.login(
|
||||
*self.config.COBBLER_INSTALLER_TOKEN)
|
||||
except:
|
||||
self._set_status(0,
|
||||
@ -35,7 +34,7 @@ class DnsCheck(base.BaseCheck):
|
||||
% self.NAME)
|
||||
return (self.code, self.messages)
|
||||
|
||||
cobbler_settings = self.remote.get_settings()
|
||||
cobbler_settings = remote.get_settings()
|
||||
if cobbler_settings['manage_dns'] == 0:
|
||||
self.messages.append('[DNS]Info: DNS is not managed by Compass')
|
||||
return (self.code, self.messages)
|
||||
@ -54,49 +53,58 @@ class DnsCheck(base.BaseCheck):
|
||||
|
||||
print "Checking DNS template......",
|
||||
if os.path.exists("/etc/cobbler/named.template"):
|
||||
VAR_MAP = {"match_port": False,
|
||||
"match_allow_query": False,
|
||||
}
|
||||
f = open("/etc/cobbler/named.template")
|
||||
host_ip = gethostbyname(gethostname())
|
||||
var_map = {
|
||||
"match_port": False,
|
||||
"match_allow_query": False,
|
||||
}
|
||||
named_template = open("/etc/cobbler/named.template")
|
||||
host_ip = socket.gethostbyname(socket.gethostname())
|
||||
missing_query = []
|
||||
for line in f.readlines():
|
||||
for line in named_template.readlines():
|
||||
if "listen-on port 53" in line and host_ip in line:
|
||||
VAR_MAP["match_port"] = True
|
||||
var_map["match_port"] = True
|
||||
|
||||
if "allow-query" in line:
|
||||
for subnet in ["127.0.0.0/8"]:
|
||||
if not subnet in line:
|
||||
missing_query.append(subnet)
|
||||
f.close()
|
||||
|
||||
if VAR_MAP["match_port"] is False:
|
||||
named_template.close()
|
||||
|
||||
if var_map["match_port"] is False:
|
||||
self.messages.append(
|
||||
"[%s]Error: named service port and/or IP is "
|
||||
"misconfigured in /etc/cobbler/named.template"
|
||||
% self.NAME)
|
||||
"[%s]Error: named service port "
|
||||
"and/or IP is misconfigured in "
|
||||
"/etc/cobbler/named.template" % self.NAME)
|
||||
|
||||
if len(missing_query) != 0:
|
||||
self.messages.append(
|
||||
"[%s]Error: Missing allow_query values in "
|
||||
"/etc/cobbler/named.template:%s"
|
||||
% (self.Name,
|
||||
', '.join(subnet for subnet in missing_query)))
|
||||
"/etc/cobbler/named.template:%s" % (
|
||||
self.NAME,
|
||||
', '.join(subnet for subnet in missing_query)))
|
||||
else:
|
||||
VAR_MAP["match_allow_query"] = True
|
||||
var_map["match_allow_query"] = True
|
||||
|
||||
failed = []
|
||||
for var in VAR_MAP.keys():
|
||||
if VAR_MAP[var] is False:
|
||||
failed.append(var)
|
||||
if len(failed) != 0:
|
||||
fails = []
|
||||
for var in var_map.keys():
|
||||
if var_map[var] is False:
|
||||
fails.append(var)
|
||||
|
||||
if len(fails) != 0:
|
||||
self._set_status(
|
||||
0,
|
||||
"[%s]Info: DNS template failed components: %s"
|
||||
% (self.NAME, ' '.join(f for f in failed)))
|
||||
"[%s]Info: DNS template failed components: "
|
||||
"%s" % (
|
||||
self.NAME,
|
||||
' '.join(failed for failed in fails)))
|
||||
|
||||
else:
|
||||
self._set_status(
|
||||
0,
|
||||
"[%s]Error: named template file doesn't exist, "
|
||||
"health check failed." % self.NAME)
|
||||
|
||||
return True
|
||||
|
||||
def check_dns_service(self):
|
||||
@ -106,12 +114,13 @@ class DnsCheck(base.BaseCheck):
|
||||
if not 'named' in commands.getoutput('ps -ef'):
|
||||
self._set_status(
|
||||
0,
|
||||
"[%s]Error: named service does not seem to be running"
|
||||
% self.NAME)
|
||||
"[%s]Error: named service does not seem to be "
|
||||
"running" % self.NAME)
|
||||
|
||||
if getservbyport(53) != 'domain':
|
||||
if socket.getservbyport(53) != 'domain':
|
||||
self._set_status(
|
||||
0,
|
||||
"[%s]Error: domain service is not listening on port 53"
|
||||
% self.NAME)
|
||||
"[%s]Error: domain service is not listening on port "
|
||||
"53" % self.NAME)
|
||||
|
||||
return None
|
||||
|
@ -1,17 +1,14 @@
|
||||
"""Health Check module for Hardware Discovery"""
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
import base
|
||||
import utils as health_check_utils
|
||||
from compass.actions.health_check import base
|
||||
from compass.actions.health_check import utils as health_check_utils
|
||||
|
||||
|
||||
class HdsCheck(base.BaseCheck):
|
||||
|
||||
"""hds health check class"""
|
||||
NAME = "HDS Check"
|
||||
|
||||
def run(self):
|
||||
"""do health check"""
|
||||
if self.dist in ("centos", "redhat", "fedora", "scientific linux"):
|
||||
pkg_type = "yum"
|
||||
else:
|
||||
@ -20,7 +17,7 @@ class HdsCheck(base.BaseCheck):
|
||||
pkg_module = __import__(pkg_type)
|
||||
except:
|
||||
self.messages.append("[%s]Error: No module named %s, "
|
||||
"please install it first."
|
||||
"please install it first."
|
||||
% (self.NAME, pkg_module))
|
||||
method_name = 'self.check_' + pkg_type + '_snmp(pkg_module)'
|
||||
eval(method_name)
|
||||
@ -56,6 +53,7 @@ class HdsCheck(base.BaseCheck):
|
||||
return True
|
||||
|
||||
def check_apt_snmp(self, pkg_module):
|
||||
"""do apt health check"""
|
||||
## TODO: add ubuntu package check here
|
||||
return None
|
||||
|
||||
|
@ -1,34 +1,29 @@
|
||||
"""Miscellaneous Health Check for Compass"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import commands
|
||||
import base
|
||||
import utils as health_check_utils
|
||||
from compass.actions.health_check import base
|
||||
from compass.actions.health_check import utils as health_check_utils
|
||||
|
||||
|
||||
class MiscCheck(base.BaseCheck):
|
||||
|
||||
"""health check for misc"""
|
||||
NAME = "Miscellaneous Check"
|
||||
|
||||
MISC_MAPPING = {
|
||||
"yum": "rsyslog ntp iproute openssh-clients python git wget "
|
||||
"python-setuptools python-netaddr python-flask "
|
||||
"python-flask-sqlalchemy python-amqplib amqp "
|
||||
"python-paramiko python-mock mod_wsgi httpd squid "
|
||||
"dhcp bind rsync yum-utils xinetd tftp-server gcc "
|
||||
"net-snmp-utils net-snmp python-daemon".split(" "),
|
||||
|
||||
"pip": "flask-script flask-restful celery six discover "
|
||||
"unittest2 chef".replace("-", "_").split(" "),
|
||||
|
||||
"disable": "iptables ip6tables".split(" "),
|
||||
|
||||
"enable": "httpd squid xinetd dhcpd named sshd rsyslog cobblerd "
|
||||
"ntpd compassd".split(" "),
|
||||
"yum": "rsyslog ntp iproute openssh-clients python git wget "
|
||||
"python-setuptools python-netaddr python-flask "
|
||||
"python-flask-sqlalchemy python-amqplib amqp "
|
||||
"python-paramiko python-mock mod_wsgi httpd squid "
|
||||
"dhcp bind rsync yum-utils xinetd tftp-server gcc "
|
||||
"net-snmp-utils net-snmp python-daemon".split(" "),
|
||||
"pip": "flask-script flask-restful celery six discover "
|
||||
"unittest2 chef".replace("-", "_").split(" "),
|
||||
"disable": "iptables ip6tables".split(" "),
|
||||
"enable": "httpd squid xinetd dhcpd named sshd rsyslog cobblerd "
|
||||
"ntpd compassd".split(" "),
|
||||
}
|
||||
|
||||
def run(self):
|
||||
"""do health check"""
|
||||
self.check_linux_dependencies()
|
||||
print "[Done]"
|
||||
self.check_pip_dependencies()
|
||||
@ -191,9 +186,9 @@ class MiscCheck(base.BaseCheck):
|
||||
"""Check if SELinux is disabled"""
|
||||
|
||||
print "Checking Selinux......",
|
||||
f = open("/etc/selinux/config")
|
||||
selinux = open("/etc/selinux/config")
|
||||
disabled = False
|
||||
for line in f.readlines():
|
||||
for line in selinux.readlines():
|
||||
if "SELINUX=disabled" in line:
|
||||
disabled = True
|
||||
break
|
||||
@ -203,4 +198,5 @@ class MiscCheck(base.BaseCheck):
|
||||
"[%s]Selinux is not disabled, "
|
||||
"please disable it in /etc/selinux/config." % self.NAME)
|
||||
|
||||
selinux.close()
|
||||
return True
|
||||
|
@ -1,17 +1,17 @@
|
||||
"""Compass Health Check module for OS Installer"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import xmlrpclib
|
||||
|
||||
import base
|
||||
from compass.actions.health_check import base
|
||||
|
||||
|
||||
class OsInstallerCheck(base.BaseCheck):
|
||||
|
||||
"""os installer health check"""
|
||||
NAME = "OS Installer Check"
|
||||
|
||||
def run(self):
|
||||
"""do health check"""
|
||||
installer = self.config.OS_INSTALLER
|
||||
method_name = 'self.' + installer + '_check()'
|
||||
return eval(method_name)
|
||||
@ -20,10 +20,10 @@ class OsInstallerCheck(base.BaseCheck):
|
||||
"""Runs cobbler check from xmlrpc client"""
|
||||
|
||||
try:
|
||||
self.remote = xmlrpclib.Server(
|
||||
remote = xmlrpclib.Server(
|
||||
self.config.COBBLER_INSTALLER_URL,
|
||||
allow_none=True)
|
||||
self.token = self.remote.login(
|
||||
token = remote.login(
|
||||
*self.config.COBBLER_INSTALLER_TOKEN)
|
||||
except:
|
||||
self.code = 0
|
||||
@ -37,7 +37,7 @@ class OsInstallerCheck(base.BaseCheck):
|
||||
"is properly configured" % self.NAME)
|
||||
return (self.code, self.messages)
|
||||
|
||||
check_result = self.remote.check(self.token)
|
||||
check_result = remote.check(token)
|
||||
|
||||
for index, message in enumerate(check_result):
|
||||
if "SELinux" in message:
|
||||
@ -47,18 +47,18 @@ class OsInstallerCheck(base.BaseCheck):
|
||||
for error_msg in check_result:
|
||||
self.messages.append("[%s]Error: " % self.NAME + error_msg)
|
||||
|
||||
if len(self.remote.get_distros()) == 0:
|
||||
if len(remote.get_distros()) == 0:
|
||||
self._set_status(0,
|
||||
"[%s]Error: No Cobbler distros found" % self.NAME)
|
||||
|
||||
if len(self.remote.get_profiles()) == 0:
|
||||
if len(remote.get_profiles()) == 0:
|
||||
self._set_status(0,
|
||||
"[%s]Error: No Cobbler profiles found"
|
||||
% self.NAME)
|
||||
|
||||
found_ppa = False
|
||||
if len(self.remote.get_repos()) != 0:
|
||||
for repo in self.remote.get_repos():
|
||||
if len(remote.get_repos()) != 0:
|
||||
for repo in remote.get_repos():
|
||||
if 'ppa_repo' in repo['mirror']:
|
||||
found_ppa = True
|
||||
break
|
||||
@ -67,48 +67,61 @@ class OsInstallerCheck(base.BaseCheck):
|
||||
"[%s]Error: No repository ppa_repo found"
|
||||
% self.NAME)
|
||||
|
||||
PATH_MAP = {'match_kickstart': ('/var/lib/cobbler/kickstarts/',
|
||||
['default.ks', ]
|
||||
),
|
||||
'match_snippets': ('/var/lib/cobbler/snippets/',
|
||||
[
|
||||
'chef',
|
||||
'chef-validator.pem',
|
||||
'client.rb',
|
||||
'first-boot.json',
|
||||
'kickstart_done',
|
||||
'kickstart_start',
|
||||
'network_config',
|
||||
'ntp.conf',
|
||||
'partition_disks',
|
||||
'partition_select',
|
||||
'post_anamon',
|
||||
'post_install_network_config',
|
||||
'pre_anamon',
|
||||
'pre_install_network_config',
|
||||
'rsyslogchef',
|
||||
'rsyslogconf',
|
||||
'yum.conf',
|
||||
]
|
||||
),
|
||||
'match_ks_mirror': ('/var/www/cobbler/',
|
||||
['ks_mirror']
|
||||
),
|
||||
'match_repo_mirror': ('/var/www/cobbler/',
|
||||
['repo_mirror/ppa_repo']
|
||||
),
|
||||
'match_iso': ('/var/lib/cobbler/', ['iso']),
|
||||
}
|
||||
path_map = {
|
||||
'match_kickstart': (
|
||||
'/var/lib/cobbler/kickstarts/',
|
||||
['default.ks', ]
|
||||
),
|
||||
'match_snippets': (
|
||||
'/var/lib/cobbler/snippets/',
|
||||
[
|
||||
'chef',
|
||||
'chef-validator.pem',
|
||||
'client.rb',
|
||||
'first-boot.json',
|
||||
'kickstart_done',
|
||||
'kickstart_start',
|
||||
'network_config',
|
||||
'ntp.conf',
|
||||
'partition_disks',
|
||||
'partition_select',
|
||||
'post_anamon',
|
||||
'post_install_network_config',
|
||||
'pre_anamon',
|
||||
'pre_install_network_config',
|
||||
'rsyslogchef',
|
||||
'rsyslogconf',
|
||||
'yum.conf',
|
||||
]
|
||||
),
|
||||
'match_ks_mirror': (
|
||||
'/var/www/cobbler/',
|
||||
['ks_mirror']
|
||||
),
|
||||
'match_repo_mirror': (
|
||||
'/var/www/cobbler/',
|
||||
['repo_mirror/ppa_repo']
|
||||
),
|
||||
'match_iso': (
|
||||
'/var/lib/cobbler/',
|
||||
['iso']
|
||||
),
|
||||
}
|
||||
not_exists = []
|
||||
for key in PATH_MAP.keys():
|
||||
for path in PATH_MAP[key][1]:
|
||||
if not os.path.exists(PATH_MAP[key][0] + path):
|
||||
not_exists.append(PATH_MAP[key][0] + path)
|
||||
for key in path_map.keys():
|
||||
for path in path_map[key][1]:
|
||||
if not os.path.exists(path_map[key][0] + path):
|
||||
not_exists.append(path_map[key][0] + path)
|
||||
|
||||
if len(not_exists) != 0:
|
||||
self._set_status(
|
||||
0,
|
||||
"[%s]Error: These locations do not exist: %s"
|
||||
% (self.NAME, ', '.join(item for item in not_exists)))
|
||||
"[%s]Error: These locations do not exist: "
|
||||
"%s" % (
|
||||
self.NAME,
|
||||
', '.join(item for item in not_exists)
|
||||
)
|
||||
)
|
||||
|
||||
if self.code == 1:
|
||||
self.messages.append(
|
||||
|
@ -1,19 +1,19 @@
|
||||
"""Health Check module for Package Installer"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import requests
|
||||
|
||||
import base
|
||||
import utils as health_check_utils
|
||||
import setting as health_check_setting
|
||||
from compass.actions.health_check import base
|
||||
from compass.actions.health_check import utils as health_check_utils
|
||||
from compass.actions.health_check import setting as health_check_setting
|
||||
|
||||
|
||||
class PackageInstallerCheck(base.BaseCheck):
|
||||
|
||||
"""package installer health check class."""
|
||||
NAME = "Package Installer Check"
|
||||
|
||||
def run(self):
|
||||
"""do health check"""
|
||||
installer = self.config.PACKAGE_INSTALLER
|
||||
method_name = "self." + installer + "_check()"
|
||||
return eval(method_name)
|
||||
@ -21,15 +21,16 @@ class PackageInstallerCheck(base.BaseCheck):
|
||||
def chef_check(self):
|
||||
"""Checks chef setting, cookbooks, databags and roles"""
|
||||
|
||||
CHEFDATA_MAP = {'CookBook': health_check_setting.COOKBOOKS,
|
||||
'DataBag': health_check_setting.DATABAGS,
|
||||
'Role': health_check_setting.ROLES,
|
||||
}
|
||||
chef_data_map = {
|
||||
'CookBook': health_check_setting.COOKBOOKS,
|
||||
'DataBag': health_check_setting.DATABAGS,
|
||||
'Role': health_check_setting.ROLES,
|
||||
}
|
||||
|
||||
total_missing = []
|
||||
for data_type in CHEFDATA_MAP.keys():
|
||||
for data_type in chef_data_map.keys():
|
||||
total_missing.append(self.check_chef_data(data_type,
|
||||
CHEFDATA_MAP[data_type]))
|
||||
chef_data_map[data_type]))
|
||||
print "[Done]"
|
||||
|
||||
missing = False
|
||||
@ -46,16 +47,19 @@ class PackageInstallerCheck(base.BaseCheck):
|
||||
', '.join(missed for missed in item[1])))
|
||||
self._set_status(
|
||||
0,
|
||||
"[%s]Error: Missing modules on chef server: %s. "
|
||||
% (self.NAME, ' ;'.join(message for message in messages)))
|
||||
"[%s]Error: Missing modules on chef server: "
|
||||
"%s." % (
|
||||
self.NAME,
|
||||
' ;'.join(message for message in messages)))
|
||||
|
||||
self.check_chef_config_dir()
|
||||
print "[Done]"
|
||||
if self.code == 1:
|
||||
self.messages.append(
|
||||
"[%s]Info: Package installer health check "
|
||||
"has completed. No problems found, all systems go."
|
||||
% self.NAME)
|
||||
"has completed. No problems found, all systems "
|
||||
"go." % self.NAME)
|
||||
|
||||
return (self.code, self.messages)
|
||||
|
||||
def check_chef_data(self, data_type, github_url):
|
||||
@ -79,26 +83,28 @@ class PackageInstallerCheck(base.BaseCheck):
|
||||
|
||||
return self.get_status()
|
||||
|
||||
self.api_ = chef.autoconfigure()
|
||||
api = chef.autoconfigure()
|
||||
|
||||
github = set([item['name']
|
||||
for item in
|
||||
requests.get(github_url).json()])
|
||||
github = set([
|
||||
item['name']
|
||||
for item in requests.get(github_url).json()
|
||||
])
|
||||
if data_type == 'CookBook':
|
||||
local = set(os.listdir('/var/chef/cookbooks'))
|
||||
elif data_type == 'Role':
|
||||
local = set([name
|
||||
for
|
||||
name, item
|
||||
in
|
||||
chef.Role.list(api=self.api_).iteritems()])
|
||||
github = set([item['name'].replace(".rb", "")
|
||||
for item in
|
||||
requests.get(github_url).json()])
|
||||
local = set([
|
||||
name for name, item in chef.Role.list(api=api).iteritems()
|
||||
])
|
||||
github = set([
|
||||
item['name'].replace(".rb", "")
|
||||
for item in requests.get(github_url).json()
|
||||
])
|
||||
else:
|
||||
local = set([item
|
||||
for item in
|
||||
eval('chef.' + data_type + '.list(api = self.api_)')])
|
||||
local = set([
|
||||
item for item in eval(
|
||||
'chef.' + data_type + '.list(api=api)'
|
||||
)
|
||||
])
|
||||
diff = github - local
|
||||
|
||||
if len(diff) <= 0:
|
||||
|
@ -1,21 +1,20 @@
|
||||
"""Health Check module for Squid service"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import commands
|
||||
import pwd
|
||||
import socket
|
||||
|
||||
from socket import *
|
||||
|
||||
import base
|
||||
import utils as health_check_utils
|
||||
from compass.actions.health_check import base
|
||||
from compass.actions.health_check import utils as health_check_utils
|
||||
|
||||
|
||||
class SquidCheck(base.BaseCheck):
|
||||
|
||||
"""Squid health check class."""
|
||||
NAME = "Squid Check"
|
||||
|
||||
def run(self):
|
||||
"""do health check"""
|
||||
self.check_squid_files()
|
||||
print "[Done]"
|
||||
self.check_squid_service()
|
||||
@ -30,10 +29,11 @@ class SquidCheck(base.BaseCheck):
|
||||
"""Validates squid config, cache directory and ownership"""
|
||||
|
||||
print "Checking Squid Files......",
|
||||
VAR_MAP = {'match_squid_conf': False,
|
||||
'match_squid_cache': False,
|
||||
'match_squid_ownership': False,
|
||||
}
|
||||
var_map = {
|
||||
'match_squid_conf': False,
|
||||
'match_squid_cache': False,
|
||||
'match_squid_ownership': False,
|
||||
}
|
||||
|
||||
conf_err_msg = health_check_utils.check_path(
|
||||
self.NAME,
|
||||
@ -46,22 +46,25 @@ class SquidCheck(base.BaseCheck):
|
||||
"[%s]Error: squid.conf has incorrect "
|
||||
"file permissions" % self.NAME)
|
||||
else:
|
||||
VAR_MAP['match_squid_conf'] = True
|
||||
var_map['match_squid_conf'] = True
|
||||
|
||||
squid_path_err_msg = health_check_utils.check_path(
|
||||
self.NAME,
|
||||
'/var/squid/')
|
||||
self.NAME, '/var/squid/')
|
||||
if not squid_path_err_msg == "":
|
||||
self.set_stauts(0, squid_path_err_msg)
|
||||
elif health_check_utils.check_path(self.NAME,
|
||||
'/var/squid/cache') != "":
|
||||
self._set_status(0, squid_path_err_msg)
|
||||
elif health_check_utils.check_path(
|
||||
self.NAME,
|
||||
'/var/squid/cache'
|
||||
) != "":
|
||||
self._set_status(
|
||||
0,
|
||||
health_check_utils.check_path(
|
||||
self.NAME,
|
||||
'/var/squid/cache'))
|
||||
'/var/squid/cache'
|
||||
)
|
||||
)
|
||||
else:
|
||||
VAR_MAP['match_squid_cache'] = True
|
||||
var_map['match_squid_cache'] = True
|
||||
uid = os.stat('/var/squid/').st_uid
|
||||
gid = os.stat('/var/squid/').st_gid
|
||||
if uid != gid or pwd.getpwuid(23).pw_name != 'squid':
|
||||
@ -70,16 +73,21 @@ class SquidCheck(base.BaseCheck):
|
||||
"[%s]Error: /var/squid directory ownership "
|
||||
"misconfigured" % self.NAME)
|
||||
else:
|
||||
VAR_MAP['match_squid_ownership'] = True
|
||||
var_map['match_squid_ownership'] = True
|
||||
|
||||
failed = []
|
||||
for key in VAR_MAP.keys():
|
||||
if VAR_MAP[key] is False:
|
||||
failed.append(key)
|
||||
if len(failed) != 0:
|
||||
fails = []
|
||||
for key in var_map.keys():
|
||||
if var_map[key] is False:
|
||||
fails.append(key)
|
||||
|
||||
if len(fails) != 0:
|
||||
self.messages.append(
|
||||
"[%s]Info: Failed components for squid config: %s"
|
||||
% (self.NAME, ', '.join(item for item in failed)))
|
||||
"[%s]Info: Failed components for squid config: "
|
||||
"%s" % (
|
||||
self.NAME,
|
||||
', '.join(item for item in fails)
|
||||
)
|
||||
)
|
||||
return True
|
||||
|
||||
def check_squid_service(self):
|
||||
@ -89,18 +97,20 @@ class SquidCheck(base.BaseCheck):
|
||||
if not 'squid' in commands.getoutput('ps -ef'):
|
||||
self._set_status(
|
||||
0,
|
||||
"[%s]Error: squid service does not seem running"
|
||||
% self.NAME)
|
||||
"[%s]Error: squid service does not seem "
|
||||
"running" % self.NAME)
|
||||
|
||||
try:
|
||||
if 'squid' != getservbyport(3128):
|
||||
if 'squid' != socket.getservbyport(3128):
|
||||
self._set_status(
|
||||
0,
|
||||
"[%s]Error: squid is not listening on 3128"
|
||||
% self.NAME)
|
||||
"[%s]Error: squid is not listening on "
|
||||
"3128" % self.NAME)
|
||||
|
||||
except:
|
||||
self._set_status(
|
||||
0,
|
||||
"[%s]Error: No service is listening on 3128, "
|
||||
"squid failed" % self.NAME)
|
||||
|
||||
return True
|
||||
|
@ -1,21 +1,19 @@
|
||||
"""Health Check module for TFTP service"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import xmlrpclib
|
||||
import commands
|
||||
from socket import *
|
||||
import socket
|
||||
|
||||
import base
|
||||
import utils as health_check_utils
|
||||
from compass.actions.health_check import base
|
||||
from compass.actions.health_check import utils as health_check_utils
|
||||
|
||||
|
||||
class TftpCheck(base.BaseCheck):
|
||||
|
||||
"""tftp health check class"""
|
||||
NAME = "TFTP Check"
|
||||
|
||||
def run(self):
|
||||
"""do health check"""
|
||||
installer = self.config.OS_INSTALLER
|
||||
method_name = "self.check_" + installer + "_tftp()"
|
||||
return eval(method_name)
|
||||
@ -29,10 +27,10 @@ class TftpCheck(base.BaseCheck):
|
||||
"""
|
||||
|
||||
try:
|
||||
self.remote = xmlrpclib.Server(
|
||||
remote = xmlrpclib.Server(
|
||||
self.config.COBBLER_INSTALLER_URL,
|
||||
allow_none=True)
|
||||
self.token = self.remote.login(
|
||||
remote.login(
|
||||
*self.config.COBBLER_INSTALLER_TOKEN)
|
||||
except:
|
||||
self._set_status(
|
||||
@ -41,7 +39,7 @@ class TftpCheck(base.BaseCheck):
|
||||
" provided in the config file" % self.NAME)
|
||||
return (self.code, self.messages)
|
||||
|
||||
cobbler_settings = self.remote.get_settings()
|
||||
cobbler_settings = remote.get_settings()
|
||||
if cobbler_settings['manage_tftp'] == 0:
|
||||
self.messages.append(
|
||||
'[TFTP]Info: tftp service is not managed by Compass')
|
||||
@ -79,7 +77,7 @@ class TftpCheck(base.BaseCheck):
|
||||
if not serv_err_msg == "":
|
||||
self._set_status(0, serv_err_msg)
|
||||
|
||||
if 'tftp' != getservbyport(69):
|
||||
if 'tftp' != socket.getservbyport(69):
|
||||
self._set_status(
|
||||
0,
|
||||
"[%s]Error: tftp doesn't seem to be listening "
|
||||
|
@ -1,6 +1,12 @@
|
||||
"""Health Check Settings"""
|
||||
|
||||
# Chef data on github
|
||||
COOKBOOKS="https://api.github.com/repos/stackforge/compass-adapters/contents/chef/cookbooks"
|
||||
ROLES="https://api.github.com/repos/stackforge/compass-adapters/contents/chef/roles"
|
||||
DATABAGS="https://api.github.com/repos/stackforge/compass-adapters/contents/chef/databags"
|
||||
COOKBOOKS = (
|
||||
"https://api.github.com/repos/stackforge"
|
||||
"/compass-adapters/contents/chef/cookbooks")
|
||||
ROLES = (
|
||||
"https://api.github.com/repos/stackforge"
|
||||
"/compass-adapters/contents/chef/roles")
|
||||
DATABAGS = (
|
||||
"https://api.github.com/repos/stackforge"
|
||||
"/compass-adapters/contents/chef/databags")
|
||||
|
@ -27,8 +27,8 @@ def validate_setting(module, setting, param):
|
||||
def get_dist():
|
||||
"""Returns the operating system related information"""
|
||||
|
||||
os, version, release = platform.linux_distribution()
|
||||
return (os.lower().strip(), version, release.lower().strip())
|
||||
os_version, version, release = platform.linux_distribution()
|
||||
return (os_version.lower().strip(), version, release.lower().strip())
|
||||
|
||||
|
||||
def check_path(module_name, path):
|
||||
@ -43,8 +43,9 @@ def check_path(module_name, path):
|
||||
"""
|
||||
err_msg = ""
|
||||
if not os.path.exists(path):
|
||||
err_msg = "[%s]Error: %s does not exsit, "
|
||||
"please check your configurations." % (module_name, path)
|
||||
err_msg = (
|
||||
"[%s]Error: %s does not exsit, "
|
||||
"please check your configurations.") % (module_name, path)
|
||||
return err_msg
|
||||
|
||||
|
||||
@ -60,8 +61,9 @@ def check_service_running(module_name, service_name):
|
||||
"""
|
||||
err_msg = ""
|
||||
if not service_name in commands.getoutput('ps -ef'):
|
||||
err_msg = "[%s]Error: %s is not running." \
|
||||
% (module_name, service_name)
|
||||
err_msg = "[%s]Error: %s is not running." % (
|
||||
module_name, service_name)
|
||||
|
||||
return err_msg
|
||||
|
||||
|
||||
@ -73,9 +75,10 @@ def check_chkconfig(service_name):
|
||||
:type service_name : string
|
||||
|
||||
"""
|
||||
on = False
|
||||
chk_on = False
|
||||
for service in os.listdir('/etc/rc3.d/'):
|
||||
if service_name in service and 'S' in service:
|
||||
on = True
|
||||
chk_on = True
|
||||
break
|
||||
return on
|
||||
|
||||
return chk_on
|
||||
|
@ -21,82 +21,87 @@ def poll_switch(ip_addr, req_obj='mac', oper="SCAN"):
|
||||
:type oper: str, should be one of ['SCAN', 'GET', 'SET']
|
||||
|
||||
.. note::
|
||||
The function should be called inside database session scope.
|
||||
The function should be called out of database session scope.
|
||||
|
||||
"""
|
||||
UNDERMONITORING = 'under_monitoring'
|
||||
UNREACHABLE = 'unreachable'
|
||||
under_monitoring = 'under_monitoring'
|
||||
unreachable = 'unreachable'
|
||||
|
||||
if not ip_addr:
|
||||
logging.error('No switch IP address is provided!')
|
||||
return
|
||||
|
||||
#Retrieve vendor info from switch table
|
||||
session = database.current_session()
|
||||
switch = session.query(Switch).filter_by(ip=ip_addr).first()
|
||||
logging.info("pollswitch: %s", switch)
|
||||
if not switch:
|
||||
logging.error('no switch found for %s', ip_addr)
|
||||
return
|
||||
with database.session() as session:
|
||||
#Retrieve vendor info from switch table
|
||||
switch = session.query(Switch).filter_by(ip=ip_addr).first()
|
||||
logging.info("pollswitch: %s", switch)
|
||||
if not switch:
|
||||
logging.error('no switch found for %s', ip_addr)
|
||||
return
|
||||
|
||||
credential = switch.credential
|
||||
logging.info("pollswitch: credential %r", credential)
|
||||
vendor = switch.vendor
|
||||
prev_state = switch.state
|
||||
hdmanager = HDManager()
|
||||
credential = switch.credential
|
||||
logging.info("pollswitch: credential %r", credential)
|
||||
vendor = switch.vendor
|
||||
prev_state = switch.state
|
||||
hdmanager = HDManager()
|
||||
|
||||
vendor, vstate, err_msg = hdmanager.get_vendor(ip_addr, credential)
|
||||
if not vendor:
|
||||
switch.state = vstate
|
||||
switch.err_msg = err_msg
|
||||
logging.info("*****error_msg: %s****", switch.err_msg)
|
||||
logging.error('no vendor found or match switch %s', switch)
|
||||
return
|
||||
vendor, vstate, err_msg = hdmanager.get_vendor(ip_addr, credential)
|
||||
if not vendor:
|
||||
switch.state = vstate
|
||||
switch.err_msg = err_msg
|
||||
logging.info("*****error_msg: %s****", switch.err_msg)
|
||||
logging.error('no vendor found or match switch %s', switch)
|
||||
return
|
||||
|
||||
switch.vendor = vendor
|
||||
switch.vendor = vendor
|
||||
|
||||
# Start to poll switch's mac address.....
|
||||
logging.debug('hdmanager learn switch from %s %s %s %s %s',
|
||||
ip_addr, credential, vendor, req_obj, oper)
|
||||
results = []
|
||||
|
||||
try:
|
||||
results = hdmanager.learn(ip_addr, credential, vendor, req_obj, oper)
|
||||
except:
|
||||
switch.state = UNREACHABLE
|
||||
switch.err_msg = "SNMP walk for querying MAC addresses timedout"
|
||||
return
|
||||
|
||||
logging.info("pollswitch %s result: %s", switch, results)
|
||||
if not results:
|
||||
logging.error('no result learned from %s %s %s %s %s',
|
||||
# Start to poll switch's mac address.....
|
||||
logging.debug('hdmanager learn switch from %s %s %s %s %s',
|
||||
ip_addr, credential, vendor, req_obj, oper)
|
||||
results = []
|
||||
|
||||
switch_id = switch.id
|
||||
filter_ports = session.query(SwitchConfig.filter_port)\
|
||||
.filter(SwitchConfig.ip == Switch.ip)\
|
||||
.filter(Switch.id == switch_id).all()
|
||||
logging.info("***********filter posts are %s********", filter_ports)
|
||||
if filter_ports:
|
||||
#Get all ports from tuples into list
|
||||
filter_ports = [i[0] for i in filter_ports]
|
||||
try:
|
||||
results = hdmanager.learn(
|
||||
ip_addr, credential, vendor, req_obj, oper)
|
||||
except Exception as error:
|
||||
logging.exception(error)
|
||||
switch.state = unreachable
|
||||
switch.err_msg = "SNMP walk for querying MAC addresses timedout"
|
||||
return
|
||||
|
||||
for entry in results:
|
||||
mac = entry['mac']
|
||||
port = entry['port']
|
||||
vlan = entry['vlan']
|
||||
if port in filter_ports:
|
||||
continue
|
||||
logging.info("pollswitch %s result: %s", switch, results)
|
||||
if not results:
|
||||
logging.error('no result learned from %s %s %s %s %s',
|
||||
ip_addr, credential, vendor, req_obj, oper)
|
||||
return
|
||||
|
||||
machine = session.query(Machine).filter_by(mac=mac, port=port,
|
||||
switch_id=switch_id).first()
|
||||
if not machine:
|
||||
machine = Machine(mac=mac, port=port, vlan=vlan)
|
||||
session.add(machine)
|
||||
machine.switch = switch
|
||||
switch_id = switch.id
|
||||
filter_ports = session.query(
|
||||
SwitchConfig.filter_port).filter(
|
||||
SwitchConfig.ip == Switch.ip).filter(
|
||||
Switch.id == switch_id).all()
|
||||
logging.info("***********filter posts are %s********", filter_ports)
|
||||
if filter_ports:
|
||||
#Get all ports from tuples into list
|
||||
filter_ports = [i[0] for i in filter_ports]
|
||||
|
||||
logging.debug('update switch %s state to under monitoring', switch)
|
||||
if prev_state != UNDERMONITORING:
|
||||
#Update error message in db
|
||||
switch.err_msg = ""
|
||||
switch.state = UNDERMONITORING
|
||||
for entry in results:
|
||||
mac = entry['mac']
|
||||
port = entry['port']
|
||||
vlan = entry['vlan']
|
||||
if port in filter_ports:
|
||||
continue
|
||||
|
||||
machine = session.query(Machine).filter_by(
|
||||
mac=mac, port=port, switch_id=switch_id).first()
|
||||
if not machine:
|
||||
machine = Machine(mac=mac, port=port, vlan=vlan)
|
||||
session.add(machine)
|
||||
machine.switch = switch
|
||||
|
||||
logging.debug('update switch %s state to under monitoring', switch)
|
||||
if prev_state != under_monitoring:
|
||||
#Update error message in db
|
||||
switch.err_msg = ""
|
||||
|
||||
switch.state = under_monitoring
|
||||
|
@ -1,61 +0,0 @@
|
||||
"""Module to update status and installing progress of the given cluster.
|
||||
|
||||
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
|
||||
"""
|
||||
import logging
|
||||
|
||||
from compass.db import database
|
||||
from compass.db.model import Cluster
|
||||
from compass.log_analyzor import progress_calculator
|
||||
from compass.utils import setting_wrapper as setting
|
||||
|
||||
|
||||
def update_progress(clusterid):
|
||||
"""Update status and installing progress of the given cluster.
|
||||
|
||||
:param clusterid: the id of the cluster to get the progress.
|
||||
:type clusterid: int
|
||||
|
||||
.. note::
|
||||
The function should be called out of the database session scope.
|
||||
In the function, it will update the database cluster_state and
|
||||
host_state table for the deploying cluster and hosts.
|
||||
|
||||
The function will also query log_progressing_history table to get
|
||||
the lastest installing progress and the position of log it has
|
||||
processed in the last run. The function uses these information to
|
||||
avoid recalculate the progress from the beginning of the log file.
|
||||
After the progress got updated, these information will be stored back
|
||||
to the log_progressing_history for next time run.
|
||||
"""
|
||||
os_version = ''
|
||||
target_system = ''
|
||||
hostids = []
|
||||
with database.session() as session:
|
||||
cluster = session.query(Cluster).filter_by(id=clusterid).first()
|
||||
if not cluster:
|
||||
logging.error('no cluster found for %s', clusterid)
|
||||
return
|
||||
|
||||
if not cluster.adapter:
|
||||
logging.error('there is no adapter for cluster %s', clusterid)
|
||||
return
|
||||
|
||||
os_version = cluster.adapter.os
|
||||
target_system = cluster.adapter.target_system
|
||||
if not cluster.state:
|
||||
logging.error('there is no state for cluster %s', clusterid)
|
||||
return
|
||||
|
||||
if cluster.state.state != 'INSTALLING':
|
||||
logging.error('the state %s is not in installing for cluster %s',
|
||||
cluster.state.state, clusterid)
|
||||
return
|
||||
|
||||
hostids = [host.id for host in cluster.hosts]
|
||||
|
||||
progress_calculator.update_progress(setting.OS_INSTALLER,
|
||||
os_version,
|
||||
setting.PACKAGE_INSTALLER,
|
||||
target_system,
|
||||
clusterid, hostids)
|
28
compass/actions/reinstall.py
Normal file
28
compass/actions/reinstall.py
Normal file
@ -0,0 +1,28 @@
|
||||
"""Module to reinstall a given cluster
|
||||
|
||||
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
|
||||
"""
|
||||
import logging
|
||||
|
||||
from compass.actions import util
|
||||
from compass.config_management.utils.config_manager import ConfigManager
|
||||
from compass.db import database
|
||||
|
||||
|
||||
def reinstall(cluster_hosts):
|
||||
"""Reinstall clusters.
|
||||
|
||||
:param cluster_hosts: clusters and hosts in each cluster to reinstall.
|
||||
:type cluster_hosts: dict of int to list of int
|
||||
|
||||
.. note::
|
||||
The function should be called out of database session.
|
||||
"""
|
||||
logging.debug('reinstall cluster_hosts: %s', cluster_hosts)
|
||||
with database.session():
|
||||
cluster_hosts, os_versions, target_systems = (
|
||||
util.update_cluster_hosts(cluster_hosts))
|
||||
manager = ConfigManager()
|
||||
manager.reinstall_cluster_and_hosts(
|
||||
cluster_hosts, os_versions, target_systems)
|
||||
manager.sync()
|
32
compass/actions/search.py
Normal file
32
compass/actions/search.py
Normal file
@ -0,0 +1,32 @@
|
||||
"""Module to search configs of given clusters
|
||||
|
||||
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
|
||||
"""
|
||||
import logging
|
||||
|
||||
from compass.actions import util
|
||||
from compass.config_management.utils.config_manager import ConfigManager
|
||||
from compass.db import database
|
||||
|
||||
|
||||
def search(cluster_hosts, cluster_propreties_match,
|
||||
cluster_properties_name, host_properties_match,
|
||||
host_properties_name):
|
||||
"""search clusters.
|
||||
|
||||
:param cluster_hosts: clusters and hosts in each cluster to search.
|
||||
:type cluster_hosts: dict of int to list of int
|
||||
|
||||
.. note::
|
||||
The function should be called out of database session.
|
||||
"""
|
||||
logging.debug('search cluster_hosts: %s', cluster_hosts)
|
||||
with database.session():
|
||||
cluster_hosts, os_versions, target_systems = (
|
||||
util.update_cluster_hosts(cluster_hosts))
|
||||
manager = ConfigManager()
|
||||
return manager.filter_cluster_and_hosts(
|
||||
cluster_hosts, cluster_propreties_match,
|
||||
cluster_properties_name, host_properties_match,
|
||||
host_properties_name, os_versions,
|
||||
target_systems)
|
@ -1,92 +0,0 @@
|
||||
"""Module to deploy a given cluster
|
||||
|
||||
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
|
||||
"""
|
||||
import logging
|
||||
import os
|
||||
import os.path
|
||||
import shutil
|
||||
|
||||
from compass.db import database
|
||||
from compass.db.model import Cluster, ClusterState, HostState
|
||||
from compass.db.model import LogProgressingHistory
|
||||
from compass.config_management.utils.config_manager import ConfigManager
|
||||
from compass.utils import setting_wrapper as setting
|
||||
|
||||
|
||||
def trigger_install(clusterid, hostids=[]):
|
||||
"""Deploy a given cluster.
|
||||
|
||||
:param clusterid: the id of the cluster to deploy.
|
||||
:type clusterid: int
|
||||
:param hostids: the ids of the hosts to deploy.
|
||||
:type hostids: list of int
|
||||
|
||||
.. note::
|
||||
The function should be called in database session.
|
||||
"""
|
||||
logging.debug('trigger install cluster %s hosts %s',
|
||||
clusterid, hostids)
|
||||
session = database.current_session()
|
||||
cluster = session.query(Cluster).filter_by(id=clusterid).first()
|
||||
if not cluster:
|
||||
logging.error('no cluster found for %s', clusterid)
|
||||
return
|
||||
|
||||
adapter = cluster.adapter
|
||||
if not adapter:
|
||||
logging.error('no proper adapter found for cluster %s', cluster.id)
|
||||
return
|
||||
|
||||
if cluster.mutable:
|
||||
logging.error('ignore installing cluster %s since it is mutable',
|
||||
cluster)
|
||||
return
|
||||
|
||||
if not cluster.state:
|
||||
cluster.state = ClusterState()
|
||||
|
||||
cluster.state.state = 'INSTALLING'
|
||||
cluster.state.progress = 0.0
|
||||
cluster.state.message = ''
|
||||
cluster.state.severity = 'INFO'
|
||||
|
||||
all_hostids = [host.id for host in cluster.hosts]
|
||||
update_hostids = []
|
||||
for host in cluster.hosts:
|
||||
if host.id not in hostids:
|
||||
logging.info('ignore installing %s since it is not in %s',
|
||||
host, hostids)
|
||||
continue
|
||||
|
||||
if host.mutable:
|
||||
logging.error('ignore installing %s since it is mutable',
|
||||
host)
|
||||
continue
|
||||
|
||||
log_dir = os.path.join(
|
||||
setting.INSTALLATION_LOGDIR,
|
||||
'%s.%s' % (host.hostname, clusterid))
|
||||
logging.info('clean log dir %s', log_dir)
|
||||
shutil.rmtree(log_dir, True)
|
||||
session.query(LogProgressingHistory).filter(
|
||||
LogProgressingHistory.pathname.startswith(
|
||||
'%s/' % log_dir)).delete(
|
||||
synchronize_session='fetch')
|
||||
|
||||
if not host.state:
|
||||
host.state = HostState()
|
||||
|
||||
host.state.state = 'INSTALLING'
|
||||
host.state.progress = 0.0
|
||||
host.state.message = ''
|
||||
host.state.severity = 'INFO'
|
||||
update_hostids.append(host.id)
|
||||
|
||||
os.system('service rsyslog restart')
|
||||
|
||||
manager = ConfigManager()
|
||||
manager.update_cluster_and_host_configs(
|
||||
clusterid, all_hostids, update_hostids,
|
||||
adapter.os, adapter.target_system)
|
||||
manager.sync()
|
74
compass/actions/update_progress.py
Normal file
74
compass/actions/update_progress.py
Normal file
@ -0,0 +1,74 @@
|
||||
"""Module to update status and installing progress of the given cluster.
|
||||
|
||||
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
|
||||
"""
|
||||
import logging
|
||||
|
||||
from compass.actions import util
|
||||
from compass.log_analyzor import progress_calculator
|
||||
from compass.db import database
|
||||
from compass.utils import setting_wrapper as setting
|
||||
|
||||
|
||||
def _cluster_filter(cluster):
|
||||
"""filter cluster."""
|
||||
if not cluster.state:
|
||||
logging.error('there is no state for cluster %s',
|
||||
cluster.id)
|
||||
return False
|
||||
|
||||
if cluster.state.state != 'INSTALLING':
|
||||
logging.error('the cluster %s state %s is not installing',
|
||||
cluster.id, cluster.state.state)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _host_filter(host):
|
||||
"""filter host."""
|
||||
if not host.state:
|
||||
logging.error('there is no state for host %s',
|
||||
host.id)
|
||||
return False
|
||||
|
||||
if host.state.state != 'INSTALLING':
|
||||
logging.error('the host %s state %s is not installing',
|
||||
host.id, host.state.state)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def update_progress(cluster_hosts):
|
||||
"""Update status and installing progress of the given cluster.
|
||||
|
||||
:param cluster_hosts: clusters and hosts in each cluster to update.
|
||||
:type cluster_hosts: dict of int to list of int
|
||||
|
||||
.. note::
|
||||
The function should be called out of the database session scope.
|
||||
In the function, it will update the database cluster_state and
|
||||
host_state table for the deploying cluster and hosts.
|
||||
|
||||
The function will also query log_progressing_history table to get
|
||||
the lastest installing progress and the position of log it has
|
||||
processed in the last run. The function uses these information to
|
||||
avoid recalculate the progress from the beginning of the log file.
|
||||
After the progress got updated, these information will be stored back
|
||||
to the log_progressing_history for next time run.
|
||||
"""
|
||||
logging.debug('update installing progress of cluster_hosts: %s',
|
||||
cluster_hosts)
|
||||
os_versions = {}
|
||||
target_systems = {}
|
||||
with database.session():
|
||||
cluster_hosts, os_versions, target_systems = (
|
||||
util.update_cluster_hosts(
|
||||
cluster_hosts, _cluster_filter, _host_filter))
|
||||
|
||||
progress_calculator.update_progress(setting.OS_INSTALLER,
|
||||
os_versions,
|
||||
setting.PACKAGE_INSTALLER,
|
||||
target_systems,
|
||||
cluster_hosts)
|
73
compass/actions/util.py
Normal file
73
compass/actions/util.py
Normal file
@ -0,0 +1,73 @@
|
||||
"""Module to provide util for actions
|
||||
|
||||
.. moduleauthor:: Xiaodong Wang ,xiaodongwang@huawei.com>
|
||||
"""
|
||||
import logging
|
||||
|
||||
from compass.db import database
|
||||
from compass.db.model import Switch
|
||||
from compass.db.model import Cluster
|
||||
|
||||
|
||||
def update_switch_ips(switch_ips):
|
||||
"""get updated switch ips."""
|
||||
session = database.current_session()
|
||||
switches = session.query(Switch).all()
|
||||
if switch_ips:
|
||||
return [
|
||||
switch.ip for switch in switches
|
||||
if switch.ip in switch_ips
|
||||
]
|
||||
else:
|
||||
return [switch.ip for switch in switches]
|
||||
|
||||
|
||||
def update_cluster_hosts(cluster_hosts,
|
||||
cluster_filter=None, host_filter=None):
|
||||
"""get updated clusters and hosts per cluster from cluster hosts."""
|
||||
session = database.current_session()
|
||||
os_versions = {}
|
||||
target_systems = {}
|
||||
updated_cluster_hosts = {}
|
||||
clusters = session.query(Cluster).all()
|
||||
for cluster in clusters:
|
||||
if cluster_hosts and cluster.id not in cluster_hosts:
|
||||
logging.debug('ignore cluster %s sinc it is not in %s',
|
||||
cluster.id, cluster_hosts)
|
||||
continue
|
||||
|
||||
adapter = cluster.adapter
|
||||
if not cluster.adapter:
|
||||
logging.error('there is no adapter for cluster %s',
|
||||
cluster.id)
|
||||
continue
|
||||
|
||||
if cluster_filter and not cluster_filter(cluster):
|
||||
logging.debug('filter cluster %s', cluster.id)
|
||||
continue
|
||||
|
||||
updated_cluster_hosts[cluster.id] = []
|
||||
os_versions[cluster.id] = adapter.os
|
||||
target_systems[cluster.id] = adapter.target_system
|
||||
|
||||
if (
|
||||
cluster.id not in cluster_hosts or
|
||||
not cluster_hosts[cluster.id]
|
||||
):
|
||||
hostids = [host.id for host in cluster.hosts]
|
||||
else:
|
||||
hostids = cluster_hosts[cluster.id]
|
||||
|
||||
for host in cluster.hosts:
|
||||
if host.id not in hostids:
|
||||
logging.debug('ignore host %s which is not in %s',
|
||||
host.id, hostids)
|
||||
continue
|
||||
|
||||
if host_filter and not host_filter(host):
|
||||
logging.debug('filter host %s', host.id)
|
||||
continue
|
||||
|
||||
updated_cluster_hosts[cluster.id].append(host.id)
|
||||
|
||||
return (updated_cluster_hosts, os_versions, target_systems)
|
@ -1,7 +1,9 @@
|
||||
__all__ = ['Flask', 'SQLAlchemy', 'compass_api']
|
||||
|
||||
from flask import Flask
|
||||
from flask.ext.sqlalchemy import SQLAlchemy
|
||||
|
||||
app = Flask(__name__)
|
||||
app.debug = True
|
||||
|
||||
import compass.api.api
|
||||
from compass.api import api as compass_api
|
||||
|
@ -88,10 +88,10 @@ class SwitchList(Resource):
|
||||
""" Get Ip prefex as pattern used to query switches.
|
||||
Switches' Ip addresses need to match this pattern.
|
||||
"""
|
||||
count = int(prefix/8)
|
||||
count = int(prefix / 8)
|
||||
if count == 0:
|
||||
count = 1
|
||||
return network.rsplit('.', count)[0]+'.'
|
||||
return network.rsplit('.', count)[0] + '.'
|
||||
|
||||
from netaddr import IPNetwork, IPAddress
|
||||
|
||||
@ -103,8 +103,8 @@ class SwitchList(Resource):
|
||||
result_set = []
|
||||
if limit:
|
||||
result_set = session.query(ModelSwitch).filter(
|
||||
ModelSwitch.ip.startswith(ip_filter)).limit(limit)\
|
||||
.all()
|
||||
ModelSwitch.ip.startswith(ip_filter)).limit(
|
||||
limit).all()
|
||||
else:
|
||||
result_set = session.query(ModelSwitch).filter(
|
||||
ModelSwitch.ip.startswith(ip_filter)).all()
|
||||
@ -160,7 +160,7 @@ class SwitchList(Resource):
|
||||
error_msg = "Invalid IP address format!"
|
||||
return errors.handle_invalid_usage(
|
||||
errors.UserInvalidUsage(error_msg)
|
||||
)
|
||||
)
|
||||
|
||||
new_switch = {}
|
||||
with database.session() as session:
|
||||
@ -188,9 +188,12 @@ class SwitchList(Resource):
|
||||
celery.send_task("compass.tasks.pollswitch", (ip_addr,))
|
||||
logging.info('new switch added: %s', new_switch)
|
||||
return util.make_json_response(
|
||||
202, {"status": "accepted",
|
||||
"switch": new_switch}
|
||||
)
|
||||
202,
|
||||
{
|
||||
"status": "accepted",
|
||||
"switch": new_switch
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class Switch(Resource):
|
||||
@ -213,7 +216,7 @@ class Switch(Resource):
|
||||
|
||||
return errors.handle_not_exist(
|
||||
errors.ObjectDoesNotExist(error_msg)
|
||||
)
|
||||
)
|
||||
|
||||
switch_res['id'] = switch.id
|
||||
switch_res['ip'] = switch.ip
|
||||
@ -332,7 +335,7 @@ class MachineList(Resource):
|
||||
return errors.UserInvalidUsage(
|
||||
errors.UserInvalidUsage(error_msg)
|
||||
)
|
||||
#TODO: supporte query filtered port
|
||||
#TODO: support query filtered port
|
||||
if filter_clause:
|
||||
machines = session.query(ModelMachine)\
|
||||
.filter(and_(*filter_clause)).all()
|
||||
@ -438,15 +441,14 @@ class Cluster(Resource):
|
||||
cluster_resp = {}
|
||||
resp = {}
|
||||
with database.session() as session:
|
||||
cluster = session.query(ModelCluster)\
|
||||
.filter_by(id=cluster_id)\
|
||||
.first()
|
||||
cluster = session.query(
|
||||
ModelCluster).filter_by(id=cluster_id).first()
|
||||
logging.debug('cluster is %s', cluster)
|
||||
if not cluster:
|
||||
error_msg = 'Cannot found the cluster with id=%s' % cluster_id
|
||||
return errors.handle_not_exist(
|
||||
errors.ObjectDoesNotExist(error_msg)
|
||||
)
|
||||
)
|
||||
|
||||
if resource:
|
||||
# List resource details
|
||||
@ -517,9 +519,12 @@ class Cluster(Resource):
|
||||
}
|
||||
|
||||
return util.make_json_response(
|
||||
200, {"status": "OK",
|
||||
"cluster": cluster_resp}
|
||||
)
|
||||
200,
|
||||
{
|
||||
"status": "OK",
|
||||
"cluster": cluster_resp
|
||||
}
|
||||
)
|
||||
|
||||
def put(self, cluster_id, resource):
|
||||
"""
|
||||
@ -538,25 +543,28 @@ class Cluster(Resource):
|
||||
}
|
||||
request_data = json.loads(request.data)
|
||||
with database.session() as session:
|
||||
cluster = session.query(ModelCluster).filter_by(id=cluster_id)\
|
||||
.first()
|
||||
cluster = session.query(
|
||||
ModelCluster).filter_by(id=cluster_id).first()
|
||||
|
||||
if not cluster:
|
||||
error_msg = 'You are trying to update a non-existing cluster!'
|
||||
return errors.handle_not_exist(
|
||||
errors.ObjectDoesNotExist(error_msg)
|
||||
)
|
||||
)
|
||||
|
||||
if resource not in request_data:
|
||||
error_msg = "Invalid resource name '%s'" % resource
|
||||
return errors.handle_invalid_usage(
|
||||
errors.UserInvalidUsage(error_msg))
|
||||
errors.UserInvalidUsage(error_msg)
|
||||
)
|
||||
|
||||
value = request_data[resource]
|
||||
|
||||
if resource not in resources.keys():
|
||||
error_msg = "Invalid resource name '%s'" % resource
|
||||
return errors.handle_invalid_usage(
|
||||
errors.UserInvalidUsage(error_msg))
|
||||
errors.UserInvalidUsage(error_msg)
|
||||
)
|
||||
|
||||
validate_func = resources[resource]['validator']
|
||||
module = globals()['util']
|
||||
@ -564,14 +572,18 @@ class Cluster(Resource):
|
||||
|
||||
if is_valid:
|
||||
column = resources[resource]['column']
|
||||
session.query(ModelCluster).filter_by(id=cluster_id)\
|
||||
.update({column: json.dumps(value)})
|
||||
session.query(
|
||||
ModelCluster).filter_by(id=cluster_id).update(
|
||||
{column: json.dumps(value)}
|
||||
)
|
||||
else:
|
||||
return errors.handle_mssing_input(
|
||||
errors.InputMissingError(msg))
|
||||
errors.InputMissingError(msg)
|
||||
)
|
||||
|
||||
return util.make_json_response(
|
||||
200, {"status": "OK"})
|
||||
200, {"status": "OK"}
|
||||
)
|
||||
|
||||
|
||||
@app.route("/clusters", methods=['GET'])
|
||||
@ -595,11 +607,13 @@ def list_clusters():
|
||||
.all()
|
||||
elif state == 'installing':
|
||||
# The deployment of this cluster is in progress.
|
||||
clusters = session.query(ModelCluster)\
|
||||
.filter(ModelCluster.id == ClusterState.id,
|
||||
or_(ClusterState.state == 'INSTALLING',
|
||||
ClusterState.state == 'UNINITIALIZED'))\
|
||||
.all()
|
||||
clusters = session.query(
|
||||
ModelCluster).filter(
|
||||
ModelCluster.id == ClusterState.id,
|
||||
or_(
|
||||
ClusterState.state == 'INSTALLING',
|
||||
ClusterState.state == 'UNINITIALIZED'
|
||||
)).all()
|
||||
elif state == 'failed':
|
||||
# The deployment of this cluster is failed.
|
||||
clusters = session.query(ModelCluster)\
|
||||
@ -648,20 +662,21 @@ def execute_cluster_action(cluster_id):
|
||||
failed_machines = []
|
||||
for host in hosts:
|
||||
# Check if machine exists
|
||||
machine = session.query(ModelMachine).filter_by(id=host)\
|
||||
.first()
|
||||
machine = session.query(
|
||||
ModelMachine).filter_by(id=host).first()
|
||||
if not machine:
|
||||
error_msg = "Machine id=%s does not exist!" % host
|
||||
return errors.handle_not_exist(
|
||||
errors.ObjectDoesNotExist(error_msg)
|
||||
)
|
||||
clusterhost = session.query(ModelClusterHost)\
|
||||
.filter_by(machine_id=host)\
|
||||
.first()
|
||||
)
|
||||
|
||||
clusterhost = session.query(
|
||||
ModelClusterHost).filter_by(machine_id=host).first()
|
||||
if clusterhost:
|
||||
# Machine is already used
|
||||
failed_machines.append(clusterhost.machine_id)
|
||||
continue
|
||||
|
||||
# Add the available machine to available_machines list
|
||||
available_machines.append(machine)
|
||||
|
||||
@ -672,7 +687,8 @@ def execute_cluster_action(cluster_id):
|
||||
error_msg = "Conflict!"
|
||||
return errors.handle_duplicate_object(
|
||||
errors.ObjectDuplicateError(error_msg), value
|
||||
)
|
||||
)
|
||||
|
||||
for machine, host in zip(available_machines, hosts):
|
||||
host = ModelClusterHost(cluster_id=cluster_id,
|
||||
machine_id=machine.id)
|
||||
@ -688,8 +704,8 @@ def execute_cluster_action(cluster_id):
|
||||
200, {
|
||||
"status": "OK",
|
||||
"cluster_hosts": cluseter_hosts
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
def _remove_hosts(cluster_id, hosts):
|
||||
"""Remove existing cluster host from the cluster"""
|
||||
@ -698,9 +714,9 @@ def execute_cluster_action(cluster_id):
|
||||
with database.session() as session:
|
||||
failed_hosts = []
|
||||
for host_id in hosts:
|
||||
host = session.query(ModelClusterHost)\
|
||||
.filter_by(id=host_id, cluster_id=cluster_id)\
|
||||
.first()
|
||||
host = session.query(
|
||||
ModelClusterHost).filter_by(
|
||||
id=host_id, cluster_id=cluster_id).first()
|
||||
|
||||
if not host:
|
||||
failed_hosts.append(host_id)
|
||||
@ -719,7 +735,7 @@ def execute_cluster_action(cluster_id):
|
||||
}
|
||||
return errors.handle_not_exist(
|
||||
errors.ObjectDoesNotExist(error_msg), value
|
||||
)
|
||||
)
|
||||
|
||||
filter_clause = []
|
||||
for host_id in hosts:
|
||||
@ -733,17 +749,17 @@ def execute_cluster_action(cluster_id):
|
||||
200, {
|
||||
"status": "OK",
|
||||
"cluster_hosts": removed_hosts
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
def _replace_all_hosts(cluster_id, hosts):
|
||||
"""Remove all existing hosts from the cluster and add new ones"""
|
||||
|
||||
with database.session() as session:
|
||||
# Delete all existing hosts of the cluster
|
||||
session.query(ModelClusterHost)\
|
||||
.filter_by(cluster_id=cluster_id).delete()
|
||||
session.flush()
|
||||
session.query(ModelClusterHost).filter_by(
|
||||
cluster_id=cluster_id).delete()
|
||||
|
||||
return _add_hosts(cluster_id, hosts)
|
||||
|
||||
def _deploy(cluster_id, hosts):
|
||||
@ -754,21 +770,23 @@ def execute_cluster_action(cluster_id):
|
||||
with database.session() as session:
|
||||
if not hosts:
|
||||
# Deploy all hosts in the cluster
|
||||
cluster_hosts = session.query(ModelClusterHost)\
|
||||
.filter_by(cluster_id=cluster_id).all()
|
||||
cluster_hosts = session.query(
|
||||
ModelClusterHost).filter_by(cluster_id=cluster_id).all()
|
||||
|
||||
if not cluster_hosts:
|
||||
# No host belongs to this cluster
|
||||
error_msg = ('Cannot find any host in cluster id=%s' %
|
||||
cluster_id)
|
||||
error_msg = (
|
||||
'Cannot find any host in cluster id=%s' % cluster_id)
|
||||
return errors.handle_not_exist(
|
||||
errors.ObjectDoesNotExist(error_msg))
|
||||
|
||||
for host in cluster_hosts:
|
||||
if not host.mutable:
|
||||
# The host is not allowed to modified
|
||||
error_msg = ("The host id=%s is not allowed to be "
|
||||
"modified now!") % host.id
|
||||
error_msg = (
|
||||
'The host id=%s is not allowed to be '
|
||||
'modified now!'
|
||||
) % host.id
|
||||
return errors.UserInvalidUsage(
|
||||
errors.UserInvalidUsage(error_msg))
|
||||
|
||||
@ -785,23 +803,26 @@ def execute_cluster_action(cluster_id):
|
||||
deploy_hosts_info.append(host_info)
|
||||
|
||||
# Lock cluster hosts and its cluster
|
||||
session.query(ModelClusterHost).filter_by(cluster_id=cluster_id)\
|
||||
.update({'mutable': False})
|
||||
session.query(ModelCluster).filter_by(id=cluster_id)\
|
||||
.update({'mutable': False})
|
||||
session.query(ModelClusterHost).filter_by(
|
||||
cluster_id=cluster_id).update({'mutable': False})
|
||||
session.query(ModelCluster).filter_by(
|
||||
id=cluster_id).update({'mutable': False})
|
||||
|
||||
# Clean up cluster_state and host_state table
|
||||
session.query(ClusterState).filter_by(id=cluster_id).delete()
|
||||
for host_id in hosts:
|
||||
session.query(HostState).filter_by(id=host_id).delete()
|
||||
|
||||
celery.send_task("compass.tasks.trigger_install", (cluster_id, hosts))
|
||||
celery.send_task("compass.tasks.deploy", ({cluster_id: hosts},))
|
||||
return util.make_json_response(
|
||||
202, {"status": "accepted",
|
||||
"deployment": {
|
||||
"cluster": deploy_cluster_info,
|
||||
"hosts": deploy_hosts_info
|
||||
}})
|
||||
202, {
|
||||
"status": "accepted",
|
||||
"deployment": {
|
||||
"cluster": deploy_cluster_info,
|
||||
"hosts": deploy_hosts_info
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
request_data = None
|
||||
with database.session() as session:
|
||||
@ -810,7 +831,8 @@ def execute_cluster_action(cluster_id):
|
||||
error_msg = 'Cluster id=%s does not exist!'
|
||||
return errors.handle_not_exist(
|
||||
errors.ObjectDoesNotExist(error_msg)
|
||||
)
|
||||
)
|
||||
|
||||
if not cluster.mutable:
|
||||
# The cluster cannot be deploy again
|
||||
error_msg = ("The cluster id=%s is not allowed to "
|
||||
@ -836,7 +858,7 @@ def execute_cluster_action(cluster_id):
|
||||
else:
|
||||
return errors.handle_invalid_usage(
|
||||
errors.UserInvalidUsage('%s action is not support!' % action)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class ClusterHostConfig(Resource):
|
||||
@ -849,8 +871,8 @@ class ClusterHostConfig(Resource):
|
||||
"""
|
||||
config_res = {}
|
||||
with database.session() as session:
|
||||
host = session.query(ModelClusterHost).filter_by(id=host_id)\
|
||||
.first()
|
||||
host = session.query(
|
||||
ModelClusterHost).filter_by(id=host_id).first()
|
||||
if not host:
|
||||
# The host does not exist.
|
||||
error_msg = "The host id=%s does not exist!" % host_id
|
||||
@ -871,12 +893,13 @@ class ClusterHostConfig(Resource):
|
||||
:param host_id: the unique identifier of the host
|
||||
"""
|
||||
with database.session() as session:
|
||||
host = session.query(ModelClusterHost).filter_by(id=host_id)\
|
||||
.first()
|
||||
host = session.query(
|
||||
ModelClusterHost).filter_by(id=host_id).first()
|
||||
if not host:
|
||||
error_msg = "The host id=%s does not exist!" % host_id
|
||||
return errors.handle_not_exist(
|
||||
errors.ObjectDoesNotExist(error_msg))
|
||||
|
||||
logging.debug("cluster config put request.data %s", request.data)
|
||||
request_data = json.loads(request.data)
|
||||
if not request_data:
|
||||
@ -928,8 +951,8 @@ class ClusterHostConfig(Resource):
|
||||
"""
|
||||
available_delete_keys = ['roles']
|
||||
with database.session() as session:
|
||||
host = session.query(ModelClusterHost).filter_by(id=host_id)\
|
||||
.first()
|
||||
host = session.query(
|
||||
ModelClusterHost).filter_by(id=host_id).first()
|
||||
if not host:
|
||||
error_msg = "The host id=%s does not exist!" % host_id
|
||||
return errors.handle_not_exist(
|
||||
@ -965,12 +988,13 @@ class ClusterHost(Resource):
|
||||
"""
|
||||
host_res = {}
|
||||
with database.session() as session:
|
||||
host = session.query(ModelClusterHost).filter_by(id=host_id)\
|
||||
.first()
|
||||
host = session.query(
|
||||
ModelClusterHost).filter_by(id=host_id).first()
|
||||
if not host:
|
||||
error_msg = "The host id=%s does not exist!" % host_id
|
||||
return errors.handle_not_exist(
|
||||
errors.ObjectDoesNotExist(error_msg))
|
||||
|
||||
host_res['hostname'] = host.hostname
|
||||
host_res['mutable'] = host.mutable
|
||||
host_res['id'] = host.id
|
||||
@ -1002,19 +1026,20 @@ def list_clusterhosts():
|
||||
with database.session() as session:
|
||||
hosts = None
|
||||
if hostname and clustername:
|
||||
hosts = session.query(ModelClusterHost).join(ModelCluster)\
|
||||
.filter(ModelClusterHost.hostname == hostname,
|
||||
ModelCluster.name == clustername)\
|
||||
.all()
|
||||
hosts = session.query(
|
||||
ModelClusterHost).join(ModelCluster).filter(
|
||||
ModelClusterHost.hostname == hostname,
|
||||
ModelCluster.name == clustername).all()
|
||||
|
||||
elif hostname:
|
||||
hosts = session.query(ModelClusterHost)\
|
||||
.filter_by(hostname=hostname).all()
|
||||
hosts = session.query(
|
||||
ModelClusterHost).filter_by(hostname=hostname).all()
|
||||
elif clustername:
|
||||
cluster = session.query(ModelCluster)\
|
||||
.filter_by(name=clustername).first()
|
||||
cluster = session.query(
|
||||
ModelCluster).filter_by(name=clustername).first()
|
||||
if cluster:
|
||||
hosts = cluster.hosts
|
||||
|
||||
else:
|
||||
hosts = session.query(ModelClusterHost).all()
|
||||
|
||||
@ -1051,6 +1076,7 @@ def list_adapter(adapter_id):
|
||||
error_msg = "Adapter id=%s does not exist!" % adapter_id
|
||||
return errors.handle_not_exist(
|
||||
errors.ObjectDoesNotExist(error_msg))
|
||||
|
||||
adapter_res['name'] = adapter.name
|
||||
adapter_res['os'] = adapter.os
|
||||
adapter_res['id'] = adapter.id
|
||||
@ -1058,6 +1084,7 @@ def list_adapter(adapter_id):
|
||||
adapter_res['link'] = {
|
||||
"href": "/".join((endpoint, str(adapter.id))),
|
||||
"rel": "self"}
|
||||
|
||||
return util.make_json_response(
|
||||
200, {"status": "OK",
|
||||
"adapter": adapter_res})
|
||||
@ -1071,19 +1098,19 @@ def list_adapter_roles(adapter_id):
|
||||
"""
|
||||
roles_list = []
|
||||
with database.session() as session:
|
||||
adapter_q = session.query(Adapter)\
|
||||
.filter_by(id=adapter_id).first()
|
||||
adapter_q = session.query(
|
||||
Adapter).filter_by(id=adapter_id).first()
|
||||
if not adapter_q:
|
||||
error_msg = "Adapter id=%s does not exist!" % adapter_id
|
||||
return errors.handle_not_exist(
|
||||
errors.ObjectDoesNotExist(error_msg))
|
||||
|
||||
roles = session.query(Role, Adapter)\
|
||||
.filter(Adapter.id == adapter_id,
|
||||
Adapter.target_system == Role.target_system)\
|
||||
.all()
|
||||
roles = session.query(
|
||||
Role, Adapter).filter(
|
||||
Adapter.id == adapter_id,
|
||||
Adapter.target_system == Role.target_system).all()
|
||||
|
||||
for role, adapter in roles:
|
||||
for role, _ in roles:
|
||||
role_res = {}
|
||||
role_res['name'] = role.name
|
||||
role_res['description'] = role.description
|
||||
@ -1227,19 +1254,19 @@ class DashboardLinks(Resource):
|
||||
config = host.config
|
||||
if ('has_dashboard_roles' in config and
|
||||
config['has_dashboard_roles']):
|
||||
ip = config.get(
|
||||
ip_addr = config.get(
|
||||
'networking', {}).get(
|
||||
'interfaces', {}).get(
|
||||
'management', {}).get(
|
||||
'ip', '')
|
||||
roles = config.get('roles', [])
|
||||
for role in roles:
|
||||
links[role] = 'http://%s' % ip
|
||||
links[role] = 'http://%s' % ip_addr
|
||||
|
||||
return util.make_json_response(
|
||||
200, {"status": "OK",
|
||||
"dashboardlinks": links}
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
TABLES = {
|
||||
@ -1269,7 +1296,8 @@ def export_csv(tname):
|
||||
error_msg = "Table '%s' is not supported to export or wrong table name"
|
||||
return util.handle_invalid_usage(
|
||||
errors.UserInvalidUsage(error_msg)
|
||||
)
|
||||
)
|
||||
|
||||
table = TABLES[tname]['name']
|
||||
colnames = TABLES[tname]['columns']
|
||||
t_headers = []
|
||||
|
@ -6,6 +6,7 @@ from compass.api import util
|
||||
class ObjectDoesNotExist(Exception):
|
||||
"""Define the exception for referring non-existing object"""
|
||||
def __init__(self, message):
|
||||
super(ObjectDoesNotExist, self).__init__(message)
|
||||
self.message = message
|
||||
|
||||
def __str__(self):
|
||||
@ -15,6 +16,7 @@ class ObjectDoesNotExist(Exception):
|
||||
class UserInvalidUsage(Exception):
|
||||
"""Define the exception for fault usage of users"""
|
||||
def __init__(self, message):
|
||||
super(UserInvalidUsage, self).__init__(message)
|
||||
self.message = message
|
||||
|
||||
def __str__(self):
|
||||
@ -24,6 +26,7 @@ class UserInvalidUsage(Exception):
|
||||
class ObjectDuplicateError(Exception):
|
||||
"""Define the duplicated object exception"""
|
||||
def __init__(self, message):
|
||||
super(ObjectDuplicateError, self).__init__(message)
|
||||
self.message = message
|
||||
|
||||
def __str__(self):
|
||||
@ -33,6 +36,7 @@ class ObjectDuplicateError(Exception):
|
||||
class InputMissingError(Exception):
|
||||
"""Define the insufficient input exception"""
|
||||
def __init__(self, message):
|
||||
super(InputMissingError, self).__init__(message)
|
||||
self.message = message
|
||||
|
||||
def __str__(self):
|
||||
@ -42,6 +46,7 @@ class InputMissingError(Exception):
|
||||
class MethodNotAllowed(Exception):
|
||||
"""Define the exception which invalid method is called"""
|
||||
def __init__(self, message):
|
||||
super(MethodNotAllowed, self).__init__(message)
|
||||
self.message = message
|
||||
|
||||
def __str__(self):
|
||||
|
@ -8,7 +8,7 @@ import simplejson as json
|
||||
|
||||
from compass.api import app
|
||||
|
||||
api = Api(app)
|
||||
API = Api(app)
|
||||
|
||||
|
||||
def make_json_response(status_code, data):
|
||||
@ -31,7 +31,7 @@ def make_csv_response(status_code, csv_data, fname):
|
||||
|
||||
def add_resource(*args, **kwargs):
|
||||
"""Add resource"""
|
||||
api.add_resource(*args, **kwargs)
|
||||
API.add_resource(*args, **kwargs)
|
||||
|
||||
|
||||
def is_valid_ip(ip_address):
|
||||
@ -39,9 +39,9 @@ def is_valid_ip(ip_address):
|
||||
if not ip_address:
|
||||
return False
|
||||
|
||||
regex = ('^(([0-9]|[1-9][0-9]|1[0-9]{2}|[1-2][0-4][0-9]|25[0-5])\.)'
|
||||
'{3}'
|
||||
'([0-9]|[1-9][0-9]|1[0-9]{2}|[1-2][0-4][0-9]|25[0-5])')
|
||||
regex = (r'^(([0-9]|[1-9][0-9]|1[0-9]{2}|[1-2][0-4][0-9]|25[0-5])\.)'
|
||||
r'{3}'
|
||||
r'([0-9]|[1-9][0-9]|1[0-9]{2}|[1-2][0-4][0-9]|25[0-5])')
|
||||
|
||||
if re.match(regex, ip_address):
|
||||
return True
|
||||
@ -55,10 +55,10 @@ def is_valid_ipnetowrk(ip_network):
|
||||
if not ip_network:
|
||||
return False
|
||||
|
||||
regex = ('^(([0-9]|[1-9][0-9]|1[0-9]{2}|[1-2][0-4][0-9]|25[0-5])\.)'
|
||||
'{3}'
|
||||
'([0-9]|[1-9][0-9]|1[0-9]{2}|[1-2][0-4][0-9]|25[0-5])'
|
||||
'((\/[0-9]|\/[1-2][0-9]|\/[1-3][0-2]))$')
|
||||
regex = (r'^(([0-9]|[1-9][0-9]|1[0-9]{2}|[1-2][0-4][0-9]|25[0-5])\.)'
|
||||
r'{3}'
|
||||
r'([0-9]|[1-9][0-9]|1[0-9]{2}|[1-2][0-4][0-9]|25[0-5])'
|
||||
r'((\/[0-9]|\/[1-2][0-9]|\/[1-3][0-2]))$')
|
||||
|
||||
if re.match(regex, ip_network):
|
||||
return True
|
||||
@ -93,6 +93,7 @@ def is_valid_gateway(ip_addr):
|
||||
|
||||
|
||||
def _is_valid_nameservers(value):
|
||||
"""Valid the format of nameservers."""
|
||||
if value:
|
||||
nameservers = value.strip(",").split(",")
|
||||
for elem in nameservers:
|
||||
@ -282,7 +283,7 @@ def valid_host_config(config):
|
||||
validator = None
|
||||
try:
|
||||
validator = valid_format[key]
|
||||
except:
|
||||
except Exception:
|
||||
continue
|
||||
else:
|
||||
value = flat_config[key]
|
||||
@ -327,6 +328,7 @@ def update_dict_value(searchkey, dictionary):
|
||||
|
||||
|
||||
def is_valid_keys(expected, input_dict, section=""):
|
||||
"""Valid keys"""
|
||||
excepted_keys = set(expected.keys())
|
||||
input_keys = set(input_dict.keys())
|
||||
if excepted_keys != input_keys:
|
||||
|
@ -55,18 +55,18 @@ VAR_PERCENTAGE = 15
|
||||
ROLES_LIST = [['os-dashboard']]
|
||||
|
||||
PRESET_VALUES = {
|
||||
'NAMESERVERS':'192.168.10.1',
|
||||
'NTP_SERVER':'192.168.10.1',
|
||||
'GATEWAY':'192.168.10.1',
|
||||
'PROXY':'http://192.168.10.1:3128',
|
||||
'NAMESERVERS': '192.168.10.1',
|
||||
'NTP_SERVER': '192.168.10.1',
|
||||
'GATEWAY': '192.168.10.1',
|
||||
'PROXY': 'http://192.168.10.1:3128',
|
||||
}
|
||||
print os.environ.get("NAMESERVERS")
|
||||
for v in PRESET_VALUES:
|
||||
if os.environ.get(v):
|
||||
PRESET_VALUES[v]=os.environ.get(v)
|
||||
print ( v + PRESET_VALUES[v] + " is set by env variables")
|
||||
PRESET_VALUES[v] = os.environ.get(v)
|
||||
print (v + PRESET_VALUES[v] + " is set by env variables")
|
||||
else:
|
||||
print (PRESET_VALUES[v])
|
||||
print (PRESET_VALUES[v])
|
||||
|
||||
# get apiclient object.
|
||||
client = Client(COMPASS_SERVER_URL)
|
||||
@ -86,7 +86,7 @@ print 'add a switch status: %s resp: %s' % (status, resp)
|
||||
if status < 400:
|
||||
switch = resp['switch']
|
||||
else:
|
||||
status, resp = client.get_switches()
|
||||
status, resp = client.get_switches()
|
||||
print 'get all switches status: %s resp: %s' % (status, resp)
|
||||
switch = None
|
||||
for switch in resp['switches']:
|
||||
@ -205,7 +205,8 @@ print 'set networking config to cluster %s status: %s, resp: %s' % (
|
||||
|
||||
|
||||
# set partiton of each host in cluster
|
||||
status, resp = client.set_partition(cluster_id,
|
||||
status, resp = client.set_partition(
|
||||
cluster_id,
|
||||
home_percentage=HOME_PERCENTAGE,
|
||||
tmp_partition_percentage=TMP_PERCENTAGE,
|
||||
var_partition_percentage=VAR_PERCENTAGE)
|
||||
@ -232,14 +233,17 @@ print 'deploy cluster %s status: %s, resp: %s' % (cluster_id, status, resp)
|
||||
|
||||
|
||||
# get intalling progress.
|
||||
timeout = time.time() + 60*90
|
||||
timeout = time.time() + 60 * 90
|
||||
while True:
|
||||
status, resp = client.get_cluster_installing_progress(cluster_id)
|
||||
print 'get cluster %s installing progress status: %s, resp: %s' % (
|
||||
cluster_id, status, resp)
|
||||
progress = resp['progress']
|
||||
if (progress['state'] not in ['UNINITIALIZED', 'INSTALLING'] or
|
||||
progress['percentage'] >= 1.0) or time.time() > timeout:
|
||||
if (
|
||||
progress['state'] not in ['UNINITIALIZED', 'INSTALLING'] or
|
||||
progress['percentage'] >= 1.0 or
|
||||
time.time() > timeout
|
||||
):
|
||||
break
|
||||
|
||||
for host_id in host_ids:
|
||||
|
@ -1,3 +1,25 @@
|
||||
"""modules to read/write cluster/host config from installers.
|
||||
|
||||
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
|
||||
"""
|
||||
__all__ = [
|
||||
'chefhandler', 'cobbler',
|
||||
'get_os_installer_by_name',
|
||||
'get_os_installer',
|
||||
'register_os_installer',
|
||||
'get_package_installer_by_name',
|
||||
'get_package_installer',
|
||||
'register_package_installer',
|
||||
]
|
||||
|
||||
|
||||
from compass.config_management.installers.os_installer import (
|
||||
get_installer_by_name as get_os_installer_by_name,
|
||||
get_installer as get_os_installer,
|
||||
register as register_os_installer)
|
||||
from compass.config_management.installers.package_installer import (
|
||||
get_installer_by_name as get_package_installer_by_name,
|
||||
get_installer as get_package_installer,
|
||||
register as register_package_installer)
|
||||
from compass.config_management.installers.plugins import chefhandler
|
||||
from compass.config_management.installers.plugins import cobbler
|
||||
|
||||
|
@ -8,10 +8,6 @@ class Installer(object):
|
||||
"""Interface for installer."""
|
||||
NAME = 'installer'
|
||||
|
||||
def __init__(self):
|
||||
raise NotImplementedError(
|
||||
'%s is not implemented' % self.__class__.__name__)
|
||||
|
||||
def __repr__(self):
|
||||
return '%s[%s]' % (self.__class__.__name__, self.NAME)
|
||||
|
||||
@ -19,24 +15,10 @@ class Installer(object):
|
||||
"""virtual method to sync installer."""
|
||||
pass
|
||||
|
||||
def reinstall_host(self, hostid, config, **kwargs):
|
||||
"""virtual method to reinstall specific host."""
|
||||
pass
|
||||
|
||||
def get_global_config(self, **kwargs):
|
||||
"""virtual method to get global config."""
|
||||
return {}
|
||||
|
||||
def clean_cluster_config(self, clusterid, config, **kwargs):
|
||||
"""virtual method to clean cluster config.
|
||||
|
||||
:param clusterid: the id of the cluster to cleanup.
|
||||
:type clusterid: int
|
||||
:param config: cluster configuration to cleanup.
|
||||
:type config: dict
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_cluster_config(self, clusterid, **kwargs):
|
||||
"""virtual method to get cluster config.
|
||||
|
||||
@ -47,16 +29,6 @@ class Installer(object):
|
||||
"""
|
||||
return {}
|
||||
|
||||
def clean_host_config(self, hostid, config, **kwargs):
|
||||
"""virtual method to clean host config.
|
||||
|
||||
:param hostid: the id of the host to cleanup.
|
||||
:type hostid: int
|
||||
:param config: host configuration to cleanup.
|
||||
:type config: dict
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_host_config(self, hostid, **kwargs):
|
||||
"""virtual method to get host config.
|
||||
|
||||
@ -67,27 +39,6 @@ class Installer(object):
|
||||
"""
|
||||
return {}
|
||||
|
||||
def clean_host_configs(self, host_configs, **kwargs):
|
||||
"""Wrapper method to clean hosts' configs.
|
||||
|
||||
:param host_configs: dict of host id to host configuration as dict
|
||||
"""
|
||||
for hostid, host_config in host_configs.items():
|
||||
self.clean_host_config(hostid, host_config, **kwargs)
|
||||
|
||||
def get_host_configs(self, hostids, **kwargs):
|
||||
"""Wrapper method get hosts' configs.
|
||||
|
||||
:param hostids: ids of the hosts' configuration.
|
||||
:type hostids: list of int
|
||||
|
||||
:returns: dict of host id to host configuration as dict.
|
||||
"""
|
||||
host_configs = {}
|
||||
for hostid in hostids:
|
||||
host_configs[hostid] = self.get_host_config(hostid, **kwargs)
|
||||
return host_configs
|
||||
|
||||
def update_global_config(self, config, **kwargs):
|
||||
"""virtual method to update global config.
|
||||
|
||||
@ -116,10 +67,66 @@ class Installer(object):
|
||||
"""
|
||||
pass
|
||||
|
||||
def update_host_configs(self, host_configs, **kwargs):
|
||||
"""Wrapper method to updaet hosts' configs.
|
||||
def clean_host_installing_progress(
|
||||
self, hostid, config, **kwargs
|
||||
):
|
||||
"""virtual method to clean host installing progress.
|
||||
|
||||
:param host_configs: dict of host id to host configuration as dict
|
||||
:param hostid: the id of host to clean the log.
|
||||
:type hostid: int
|
||||
:param config: host configuration.
|
||||
:type config: dict
|
||||
"""
|
||||
for hostid, config in host_configs.items():
|
||||
self.update_host_config(hostid, config, **kwargs)
|
||||
pass
|
||||
|
||||
def clean_cluster_installing_progress(
|
||||
self, clusterid, config, **kwargs
|
||||
):
|
||||
"""virtual method to clean host installing progress.
|
||||
|
||||
:param clusterid: the id of cluster to clean the log.
|
||||
:type clusterid: int
|
||||
:param config: cluster configuration.
|
||||
:type config: dict
|
||||
"""
|
||||
pass
|
||||
|
||||
def reinstall_host(self, hostid, config, **kwargs):
|
||||
"""virtual method to reinstall specific host.
|
||||
|
||||
:param hostid: the id of the host to reinstall.
|
||||
:type hostid: int
|
||||
:param config: host configuration to reinstall
|
||||
:type config: dict
|
||||
"""
|
||||
pass
|
||||
|
||||
def reinstall_cluster(self, clusterid, config, **kwargs):
|
||||
"""virtual method to reinstall specific cluster.
|
||||
|
||||
:param clusterid: the id of the cluster to reinstall.
|
||||
:type clusterid: int
|
||||
:param config: cluster configuration to reinstall
|
||||
:type config: dict
|
||||
"""
|
||||
pass
|
||||
|
||||
def clean_host_config(self, hostid, config, **kwargs):
|
||||
"""virtual method to clean host config.
|
||||
|
||||
:param hostid: the id of the host to cleanup.
|
||||
:type hostid: int
|
||||
:param config: host configuration to cleanup.
|
||||
:type config: dict
|
||||
"""
|
||||
pass
|
||||
|
||||
def clean_cluster_config(self, clusterid, config, **kwargs):
|
||||
"""virtual method to clean cluster config.
|
||||
|
||||
:param clusterid: the id of the cluster to cleanup.
|
||||
:type clusterid: int
|
||||
:param config: cluster configuration to cleanup.
|
||||
:type config: dict
|
||||
"""
|
||||
pass
|
||||
|
@ -112,6 +112,7 @@ class Installer(package_installer.Installer):
|
||||
NAME = 'chef'
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(Installer, self).__init__(**kwargs)
|
||||
import chef
|
||||
self.installer_url_ = setting.CHEF_INSTALLER_URL
|
||||
self.global_databag_name_ = setting.CHEF_GLOBAL_DATABAG_NAME
|
||||
@ -224,7 +225,7 @@ class Installer(package_installer.Installer):
|
||||
logging.debug('databag item is removed for cluster %s '
|
||||
'config %s target_system %s',
|
||||
clusterid, config, target_system)
|
||||
except Exception as error:
|
||||
except Exception:
|
||||
logging.debug('no databag item to delete for cluster %s '
|
||||
'config %s target_system %s',
|
||||
clusterid, config, target_system)
|
||||
@ -261,7 +262,7 @@ class Installer(package_installer.Installer):
|
||||
logging.debug('client is removed for host %s '
|
||||
'config %s target_system %s',
|
||||
hostid, config, target_system)
|
||||
except Exception as error:
|
||||
except Exception:
|
||||
logging.debug('no client to delete for host %s '
|
||||
'config %s target_system %s',
|
||||
hostid, config, target_system)
|
||||
@ -274,12 +275,12 @@ class Installer(package_installer.Installer):
|
||||
self._get_node_name(
|
||||
config['hostname'], config['clusterid'], target_system),
|
||||
api=self.api_
|
||||
)
|
||||
)
|
||||
node.delete()
|
||||
logging.debug('node is removed for host %s '
|
||||
'config %s target_system %s',
|
||||
hostid, config, target_system)
|
||||
except Exception as error:
|
||||
except Exception:
|
||||
logging.debug('no node to delete for host %s '
|
||||
'config %s target_system %s',
|
||||
hostid, config, target_system)
|
||||
@ -301,7 +302,8 @@ class Installer(package_installer.Installer):
|
||||
clusterid = config['clusterid']
|
||||
bag = self._get_databag(target_system)
|
||||
global_bag_item = dict(self._get_global_databag_item(bag))
|
||||
bag_item = self._get_cluster_databag_item(bag, clusterid, target_system)
|
||||
bag_item = self._get_cluster_databag_item(
|
||||
bag, clusterid, target_system)
|
||||
bag_item_dict = dict(bag_item)
|
||||
util.merge_dict(bag_item_dict, global_bag_item, False)
|
||||
translated_config = TO_HOST_TRANSLATORS[target_system].translate(
|
||||
|
@ -1,6 +1,8 @@
|
||||
"""os installer cobbler plugin"""
|
||||
import functools
|
||||
import logging
|
||||
import os.path
|
||||
import shutil
|
||||
import xmlrpclib
|
||||
|
||||
from compass.config_management.installers import os_installer
|
||||
@ -112,6 +114,7 @@ class Installer(os_installer.Installer):
|
||||
NAME = 'cobbler'
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(Installer, self).__init__()
|
||||
# the connection is created when cobbler installer is initialized.
|
||||
self.remote_ = xmlrpclib.Server(
|
||||
setting.COBBLER_INSTALLER_URL,
|
||||
@ -148,6 +151,7 @@ class Installer(os_installer.Installer):
|
||||
"""Sync cobbler to catch up the latest update config."""
|
||||
logging.debug('sync %s', self)
|
||||
self.remote_.sync(self.token_)
|
||||
os.system('service rsyslog restart')
|
||||
|
||||
def _get_modify_system(self, profile, config, **kwargs):
|
||||
"""get modified system config."""
|
||||
@ -176,7 +180,9 @@ class Installer(os_installer.Installer):
|
||||
{'name': os_version})
|
||||
return profile_found[0]
|
||||
|
||||
def _get_system_name(self, config):
|
||||
@classmethod
|
||||
def _get_system_name(cls, config):
|
||||
"""get system name"""
|
||||
return '%s.%s' % (
|
||||
config['hostname'], config['clusterid'])
|
||||
|
||||
@ -188,7 +194,7 @@ class Installer(os_installer.Installer):
|
||||
sys_name, self.token_)
|
||||
logging.debug('using existing system %s for %s',
|
||||
sys_id, sys_name)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
if create_if_not_exists:
|
||||
sys_id = self.remote_.new_system(self.token_)
|
||||
logging.debug('create new system %s for %s',
|
||||
@ -204,9 +210,9 @@ class Installer(os_installer.Installer):
|
||||
try:
|
||||
self.remote_.remove_system(sys_name, self.token_)
|
||||
logging.debug('system %s is removed', sys_name)
|
||||
except Exception as error:
|
||||
except Exception:
|
||||
logging.debug('no system %s found to remove', sys_name)
|
||||
|
||||
|
||||
def _save_system(self, sys_id):
|
||||
"""save system config update."""
|
||||
self.remote_.save_system(sys_id, self.token_)
|
||||
@ -224,12 +230,30 @@ class Installer(os_installer.Installer):
|
||||
|
||||
def clean_host_config(self, hostid, config, **kwargs):
|
||||
"""clean host config."""
|
||||
self.clean_host_installing_progress(
|
||||
hostid, config, **kwargs)
|
||||
self._clean_system(config)
|
||||
|
||||
@classmethod
|
||||
def _clean_log(cls, system_name):
|
||||
"""clean log"""
|
||||
log_dir = os.path.join(
|
||||
setting.INSTALLATION_LOGDIR,
|
||||
system_name)
|
||||
shutil.rmtree(log_dir, True)
|
||||
|
||||
def clean_host_installing_progress(
|
||||
self, hostid, config, **kwargs
|
||||
):
|
||||
"""clean host installing progress."""
|
||||
self._clean_log(self._get_system_name(config))
|
||||
|
||||
def reinstall_host(self, hostid, config, **kwargs):
|
||||
"""reinstall host."""
|
||||
sys_id = self._get_system(config, False)
|
||||
if sys_id:
|
||||
self.clean_host_installing_progress(
|
||||
hostid, config, **kwargs)
|
||||
self._netboot_enabled(sys_id)
|
||||
|
||||
def update_host_config(self, hostid, config, **kwargs):
|
||||
@ -246,5 +270,4 @@ class Installer(os_installer.Installer):
|
||||
self._save_system(sys_id)
|
||||
|
||||
|
||||
|
||||
os_installer.register(Installer)
|
||||
|
@ -1,3 +1,15 @@
|
||||
"""modules to provider providers to read/write cluster/host config
|
||||
|
||||
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
|
||||
"""
|
||||
__all__ = [
|
||||
'db_config_provider', 'file_config_provider', 'mix_config_provider',
|
||||
'get_provider', 'get_provider_by_name', 'register_provider',
|
||||
]
|
||||
|
||||
|
||||
from compass.config_management.providers.config_provider import (
|
||||
get_provider, get_provider_by_name, register_provider)
|
||||
from compass.config_management.providers.plugins import db_config_provider
|
||||
from compass.config_management.providers.plugins import file_config_provider
|
||||
from compass.config_management.providers.plugins import mix_config_provider
|
||||
|
@ -4,17 +4,17 @@
|
||||
"""
|
||||
import logging
|
||||
|
||||
from abc import ABCMeta
|
||||
|
||||
from compass.utils import setting_wrapper as setting
|
||||
|
||||
|
||||
class ConfigProvider(object):
|
||||
"""Interface for config provider"""
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
NAME = 'config_provider'
|
||||
|
||||
def __init__(self):
|
||||
raise NotImplementedError('%s is not implemented' % self)
|
||||
|
||||
def __repr__(self):
|
||||
return '%s[%s]' % (self.__class__.__name__, self.NAME)
|
||||
|
||||
@ -35,6 +35,24 @@ class ConfigProvider(object):
|
||||
"""
|
||||
return {}
|
||||
|
||||
def update_adapters(self, adapters, roles_per_target_system):
|
||||
"""Virtual method to update adapters.
|
||||
|
||||
:param adapters: adapters to update
|
||||
:type adapters: list of dict
|
||||
:param roles_per_target_system: roles per target_system to update
|
||||
:type roles_per_target_system: dict of str to dict.
|
||||
"""
|
||||
pass
|
||||
|
||||
def update_switch_filters(self, switch_filters):
|
||||
"""Virtual method to update switch filters.
|
||||
|
||||
:param switch_filters: switch filters to update.
|
||||
:type switch_filters: list of dict
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_host_config(self, hostid):
|
||||
"""Virtual method to get host config.
|
||||
|
||||
@ -45,19 +63,6 @@ class ConfigProvider(object):
|
||||
"""
|
||||
return {}
|
||||
|
||||
def get_host_configs(self, hostids):
|
||||
"""Wrapper method to get hosts' configs.
|
||||
|
||||
:param hostids: ids of the hosts to get configuration.
|
||||
:type hostids: list of int
|
||||
|
||||
:returns: dict mapping each hostid to host configuration as dict.
|
||||
"""
|
||||
configs = {}
|
||||
for hostid in hostids:
|
||||
configs[hostid] = self.get_host_config(hostid)
|
||||
return configs
|
||||
|
||||
def update_global_config(self, config):
|
||||
"""Virtual method to update global config.
|
||||
|
||||
@ -86,14 +91,88 @@ class ConfigProvider(object):
|
||||
"""
|
||||
pass
|
||||
|
||||
def update_host_configs(self, configs):
|
||||
"""Wrapper method to update host configs.
|
||||
def clean_host_config(self, hostid):
|
||||
"""Virtual method to clean host config.
|
||||
|
||||
:param configs: dict mapping host id to host configuration as dict.
|
||||
:type configs: dict of (int, dict)
|
||||
:param hostid; the id of the host to clean.
|
||||
:type hostid: int
|
||||
"""
|
||||
for hostname, config in configs.items():
|
||||
self.update_host_config(hostname, config)
|
||||
pass
|
||||
|
||||
def reinstall_host(self, hostid):
|
||||
"""Virtual method to reintall host.
|
||||
|
||||
:param hostid: the id of the host to reinstall.
|
||||
:type hostid: int.
|
||||
"""
|
||||
pass
|
||||
|
||||
def reinstall_cluster(self, clusterid):
|
||||
"""Virtual method to reinstall cluster.
|
||||
|
||||
:param clusterid: the id of the cluster to reinstall.
|
||||
:type clusterid: int
|
||||
"""
|
||||
pass
|
||||
|
||||
def clean_host_installing_progress(self, hostid):
|
||||
"""Virtual method to clean host installing progress.
|
||||
|
||||
:param hostid: the id of the host to clean the installing progress
|
||||
:type hostid: int
|
||||
"""
|
||||
pass
|
||||
|
||||
def clean_cluster_installing_progress(self, clusterid):
|
||||
"""Virtual method to clean cluster installing progress.
|
||||
|
||||
:param clusterid: the id of the cluster to clean installing progress
|
||||
:type clusterid: int
|
||||
"""
|
||||
pass
|
||||
|
||||
def clean_cluster_config(self, clusterid):
|
||||
"""Virtual method to clean cluster config
|
||||
|
||||
:param clsuterid: the id of the cluster to clean
|
||||
:type clusterid: int
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_cluster_hosts(self, clusterid):
|
||||
"""Virtual method to get hosts of given cluster.
|
||||
|
||||
:param clusterid: the id of the clsuter
|
||||
:type clsuterid: int
|
||||
"""
|
||||
return []
|
||||
|
||||
def get_clusters(self):
|
||||
"""Virtual method to get cluster list."""
|
||||
return []
|
||||
|
||||
def get_switch_and_machines(self):
|
||||
"""Virtual method to get switches and machines.
|
||||
|
||||
:returns: switches as list, machines per switch as dict of str to list
|
||||
"""
|
||||
return ([], {})
|
||||
|
||||
def update_switch_and_machines(
|
||||
self, switches, switch_machines
|
||||
):
|
||||
"""Virtual method to update switches and machines.
|
||||
|
||||
:param switches: switches to update
|
||||
:type switches: list of dict.
|
||||
:param switch_machines: machines of each switch to update
|
||||
:type switch_machines: dict of str to list of dict.
|
||||
"""
|
||||
pass
|
||||
|
||||
def sync(self):
|
||||
"""Virtual method to sync data in provider."""
|
||||
pass
|
||||
|
||||
|
||||
PROVIDERS = {}
|
||||
|
@ -2,10 +2,16 @@
|
||||
|
||||
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
|
||||
"""
|
||||
import logging
|
||||
import os.path
|
||||
|
||||
from compass.config_management.providers import config_provider
|
||||
from compass.config_management.utils import config_filter
|
||||
from compass.db import database
|
||||
from compass.db.model import Adapter, Role, SwitchConfig, Switch, Machine
|
||||
from compass.db.model import Cluster, ClusterHost
|
||||
from compass.db.model import ClusterState, HostState, LogProgressingHistory
|
||||
from compass.utils import setting_wrapper as setting
|
||||
|
||||
|
||||
CLUSTER_ALLOWS = ['*']
|
||||
@ -49,13 +55,188 @@ class DBProvider(config_provider.ConfigProvider):
|
||||
return {}
|
||||
|
||||
def update_host_config(self, hostid, config):
|
||||
"""Update hsot config to db."""
|
||||
"""Update host config to db."""
|
||||
session = database.current_session()
|
||||
host = session.query(ClusterHost).filter_by(id=hostid).first()
|
||||
if not host:
|
||||
return
|
||||
|
||||
filtered_config = self.HOST_FILTER.filter(config)
|
||||
host.config = filtered_config
|
||||
|
||||
def update_adapters(self, adapters, roles_per_target_system):
|
||||
"""Update adapter config to db."""
|
||||
session = database.current_session()
|
||||
session.query(Adapter).delete()
|
||||
session.query(Role).delete()
|
||||
for adapter in adapters:
|
||||
session.add(Adapter(**adapter))
|
||||
|
||||
for _, roles in roles_per_target_system.items():
|
||||
for role in roles:
|
||||
session.add(Role(**role))
|
||||
|
||||
def update_switch_filters(self, switch_filters):
|
||||
"""update switch filters."""
|
||||
session = database.current_session()
|
||||
switch_filter_tuples = set([])
|
||||
session.query(SwitchConfig).delete(synchronize_session='fetch')
|
||||
for switch_filter in switch_filters:
|
||||
switch_filter_tuple = tuple(switch_filter.values())
|
||||
if switch_filter_tuple in switch_filter_tuples:
|
||||
logging.debug('ignore adding switch filter: %s',
|
||||
switch_filter)
|
||||
|
||||
continue
|
||||
else:
|
||||
logging.debug('add switch filter: %s', switch_filter)
|
||||
switch_filter_tuples.add(switch_filter_tuple)
|
||||
|
||||
session.add(SwitchConfig(**switch_filter))
|
||||
|
||||
def clean_host_config(self, hostid):
|
||||
"""clean host config."""
|
||||
self.clean_host_installing_progress(hostid)
|
||||
session = database.current_session()
|
||||
session.query(ClusterHost).filter_by(
|
||||
id=hostid).delete(synchronize_session='fetch')
|
||||
session.query(HostState).filter_by(
|
||||
id=hostid).delete(synchronize_session='fetch')
|
||||
|
||||
def reinstall_host(self, hostid):
|
||||
"""reinstall host."""
|
||||
session = database.current_session()
|
||||
host = session.query(ClusterHost).filter_by(id=hostid).first()
|
||||
if not host:
|
||||
return
|
||||
|
||||
log_dir = os.path.join(
|
||||
setting.INSTALLATION_LOGDIR,
|
||||
'%s.%s' % (host.hostname, host.cluster_id),
|
||||
'')
|
||||
session.query(LogProgressingHistory).filter(
|
||||
LogProgressingHistory.pathname.startswith(
|
||||
log_dir)).delete(synchronize_session='fetch')
|
||||
if not host.state:
|
||||
host.state = HostState()
|
||||
|
||||
host.mutable = False
|
||||
host.state.state = 'INSTALLING'
|
||||
host.state.progress = 0.0
|
||||
host.state.message = ''
|
||||
host.state.severity = 'INFO'
|
||||
|
||||
def reinstall_cluster(self, clusterid):
|
||||
"""reinstall cluster."""
|
||||
session = database.current_session()
|
||||
cluster = session.query(Cluster).filter_by(id=clusterid).first()
|
||||
if not cluster:
|
||||
return
|
||||
|
||||
if not cluster.state:
|
||||
cluster.state = ClusterState()
|
||||
|
||||
cluster.state.state = 'INSTALLING'
|
||||
cluster.mutable = False
|
||||
cluster.state.progress = 0.0
|
||||
cluster.state.message = ''
|
||||
cluster.state.severity = 'INFO'
|
||||
|
||||
def clean_cluster_installing_progress(self, clusterid):
|
||||
"""clean cluster installing progress."""
|
||||
session = database.current_session()
|
||||
cluster = session.query(Cluster).filter_by(id=clusterid).first()
|
||||
if not cluster:
|
||||
return
|
||||
|
||||
if cluster.state and cluster.state.state != 'UNINITIALIZED':
|
||||
cluster.mutable = False
|
||||
cluster.state.state = 'INSTALLING'
|
||||
cluster.state.progress = 0.0
|
||||
cluster.state.message = ''
|
||||
cluster.state.severity = 'INFO'
|
||||
|
||||
def clean_host_installing_progress(self, hostid):
|
||||
"""clean host intalling progress."""
|
||||
session = database.current_session()
|
||||
host = session.query(ClusterHost).filter_by(id=hostid).first()
|
||||
if not host:
|
||||
return
|
||||
|
||||
log_dir = os.path.join(
|
||||
setting.INSTALLATION_LOGDIR,
|
||||
'%s.%s' % (host.hostname, host.cluster_id),
|
||||
'')
|
||||
session.query(LogProgressingHistory).filter(
|
||||
LogProgressingHistory.pathname.startswith(
|
||||
log_dir)).delete(synchronize_session='fetch')
|
||||
if host.state and host.state.state != 'UNINITIALIZED':
|
||||
host.mutable = False
|
||||
host.state.state = 'INSTALLING'
|
||||
host.state.progress = 0.0
|
||||
host.state.message = ''
|
||||
host.state.severity = 'INFO'
|
||||
|
||||
def clean_cluster_config(self, clusterid):
|
||||
"""clean cluster config."""
|
||||
session = database.current_session()
|
||||
session.query(Cluster).filter_by(
|
||||
id=clusterid).delete(synchronize_session='fetch')
|
||||
session.query(ClusterState).filter_by(
|
||||
id=clusterid).delete(synchronize_session='fetch')
|
||||
|
||||
def get_cluster_hosts(self, clusterid):
|
||||
"""get cluster hosts"""
|
||||
session = database.current_session()
|
||||
hosts = session.query(ClusterHost).filter_by(
|
||||
cluster_id=clusterid).all()
|
||||
return [host.id for host in hosts]
|
||||
|
||||
def get_clusters(self):
|
||||
"""get clusters"""
|
||||
session = database.current_session()
|
||||
clusters = session.query(Cluster).all()
|
||||
return [cluster.id for cluster in clusters]
|
||||
|
||||
def get_switch_and_machines(self):
|
||||
"""get switches and machines"""
|
||||
session = database.current_session()
|
||||
switches = session.query(Switch).all()
|
||||
switches_data = []
|
||||
switch_machines_data = {}
|
||||
for switch in switches:
|
||||
switches_data.append({
|
||||
'ip': switch.ip,
|
||||
'vendor_info': switch.vendor_info,
|
||||
'credential': switch.credential,
|
||||
'state': switch.state,
|
||||
})
|
||||
switch_machines_data[switch.ip] = []
|
||||
for machine in switch.machines:
|
||||
switch_machines_data[switch.ip].append({
|
||||
'mac': machine.mac,
|
||||
'port': machine.port,
|
||||
'vlan': machine.vlan,
|
||||
})
|
||||
|
||||
return switches_data, switch_machines_data
|
||||
|
||||
def update_switch_and_machines(
|
||||
self, switches, switch_machines
|
||||
):
|
||||
"""update switches and machines"""
|
||||
session = database.current_session()
|
||||
session.query(Switch).delete(synchronize_session='fetch')
|
||||
session.query(Machine).delete(synchronize_session='fetch')
|
||||
for switch_data in switches:
|
||||
switch = Switch(**switch_data)
|
||||
logging.info('add switch %s', switch)
|
||||
session.add(switch)
|
||||
for machine_data in switch_machines.get(switch.ip, []):
|
||||
machine = Machine(**machine_data)
|
||||
logging.info('add machine %s under %s', machine, switch)
|
||||
machine.switch = switch
|
||||
session.add(machine)
|
||||
|
||||
|
||||
config_provider.register_provider(DBProvider)
|
||||
|
@ -43,5 +43,55 @@ class MixProvider(config_provider.ConfigProvider):
|
||||
"""update host config."""
|
||||
self.host_provider_.update_host_config(hostid, config)
|
||||
|
||||
def update_adapters(self, adapters, roles_per_target_system):
|
||||
"""update adapters."""
|
||||
self.host_provider_.update_adapters(
|
||||
adapters, roles_per_target_system)
|
||||
|
||||
def update_switch_filters(self, switch_filters):
|
||||
"""update switch filters"""
|
||||
self.host_provider_.update_switch_filters(switch_filters)
|
||||
|
||||
def clean_host_config(self, hostid):
|
||||
"""clean host config."""
|
||||
self.host_provider_.clean_host_config(hostid)
|
||||
|
||||
def reinstall_host(self, hostid):
|
||||
"""reinstall host config"""
|
||||
self.host_provider_.reinstall_host(hostid)
|
||||
|
||||
def reinstall_cluster(self, clusterid):
|
||||
"""reinstall cluster"""
|
||||
self.host_provider_.reinstall_cluster(clusterid)
|
||||
|
||||
def clean_host_installing_progress(self, hostid):
|
||||
"""clean host installing progress"""
|
||||
self.host_provider_.clean_host_installing_progress(hostid)
|
||||
|
||||
def clean_cluster_installing_progress(self, clusterid):
|
||||
"""clean cluster installing progress"""
|
||||
self.host_provider_.clean_cluster_installing_progress(clusterid)
|
||||
|
||||
def clean_cluster_config(self, clusterid):
|
||||
"""clean cluster config"""
|
||||
self.host_provider_.clean_cluster_config(clusterid)
|
||||
|
||||
def get_cluster_hosts(self, clusterid):
|
||||
"""get cluster hosts."""
|
||||
return self.host_provider_.get_cluster_hosts(clusterid)
|
||||
|
||||
def get_clusters(self):
|
||||
"""get clusters"""
|
||||
return self.host_provider_.get_clusters()
|
||||
|
||||
def get_switch_and_machines(self):
|
||||
"""get switch and machines."""
|
||||
return self.host_provider_.get_switch_and_machines()
|
||||
|
||||
def update_switch_and_machines(self, switches, switch_machines):
|
||||
"""update siwtch and machines."""
|
||||
self.host_provider_.update_switch_and_machines(
|
||||
switches, switch_machines)
|
||||
|
||||
|
||||
config_provider.register_provider(MixProvider)
|
||||
|
@ -7,12 +7,13 @@ them to provider and installers.
|
||||
import functools
|
||||
import logging
|
||||
|
||||
from compass.config_management.installers import os_installer
|
||||
from compass.config_management.installers import package_installer
|
||||
from compass.config_management.providers import config_provider
|
||||
from compass.config_management import installers
|
||||
from compass.config_management import providers
|
||||
from compass.config_management.utils import config_merger_callbacks
|
||||
from compass.config_management.utils.config_merger import ConfigMapping
|
||||
from compass.config_management.utils.config_merger import ConfigMerger
|
||||
from compass.config_management.utils.config_reference import ConfigReference
|
||||
from compass.utils import setting_wrapper as setting
|
||||
from compass.utils import util
|
||||
|
||||
|
||||
@ -94,11 +95,11 @@ class ConfigManager(object):
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.config_provider_ = config_provider.get_provider()
|
||||
self.config_provider_ = providers.get_provider()
|
||||
logging.debug('got config provider: %s', self.config_provider_)
|
||||
self.package_installer_ = package_installer.get_installer()
|
||||
self.package_installer_ = installers.get_package_installer()
|
||||
logging.debug('got package installer: %s', self.package_installer_)
|
||||
self.os_installer_ = os_installer.get_installer(
|
||||
self.os_installer_ = installers.get_os_installer(
|
||||
package_installer=self.package_installer_)
|
||||
logging.debug('got os installer: %s', self.os_installer_)
|
||||
|
||||
@ -112,8 +113,11 @@ class ConfigManager(object):
|
||||
{'name': '...', 'os': '...', 'target_system': '...'}
|
||||
"""
|
||||
oses = self.os_installer_.get_oses()
|
||||
logging.debug('got oses %s from %s', oses, self.os_installer_)
|
||||
target_systems_per_os = self.package_installer_.get_target_systems(
|
||||
oses)
|
||||
logging.debug('got target_systems per os from %s: %s',
|
||||
self.package_installer_, target_systems_per_os)
|
||||
adapters = []
|
||||
for os_version, target_systems in target_systems_per_os.items():
|
||||
for target_system in target_systems:
|
||||
@ -138,6 +142,8 @@ class ConfigManager(object):
|
||||
{'name': '...', 'description': '...', 'target_system': '...'}
|
||||
"""
|
||||
roles = self.package_installer_.get_roles(target_system)
|
||||
logging.debug('got target system %s roles %s from %s',
|
||||
target_system, roles, self.package_installer_)
|
||||
return [
|
||||
{
|
||||
'name': role,
|
||||
@ -146,6 +152,57 @@ class ConfigManager(object):
|
||||
} for role, description in roles.items()
|
||||
]
|
||||
|
||||
def update_adapters_from_installers(self):
|
||||
"""update adapters from installers."""
|
||||
adapters = self.get_adapters()
|
||||
target_systems = set()
|
||||
roles_per_target_system = {}
|
||||
for adapter in adapters:
|
||||
target_systems.add(adapter['target_system'])
|
||||
|
||||
for target_system in target_systems:
|
||||
roles_per_target_system[target_system] = self.get_roles(
|
||||
target_system)
|
||||
|
||||
logging.debug('update adapters %s and '
|
||||
'roles per target system %s to %s',
|
||||
adapters, roles_per_target_system,
|
||||
self.config_provider_)
|
||||
self.config_provider_.update_adapters(
|
||||
adapters, roles_per_target_system)
|
||||
|
||||
def update_switch_filters(self):
|
||||
"""Update switch filter from setting.SWITCHES"""
|
||||
if not hasattr(setting, 'SWITCHES'):
|
||||
logging.info('no switch configs to set')
|
||||
return
|
||||
|
||||
switch_filters = util.get_switch_filters(setting.SWITCHES)
|
||||
logging.debug('update switch filters %s to %s',
|
||||
switch_filters, self.config_provider_)
|
||||
self.config_provider_.update_switch_filters(switch_filters)
|
||||
|
||||
def get_switch_and_machines(self):
|
||||
"""Get switches and machines"""
|
||||
switches, machines_per_switch = (
|
||||
self.config_provider_.get_switch_and_machines())
|
||||
logging.debug('got switches %s from %s',
|
||||
switches, self.config_provider_)
|
||||
logging.debug('got machines per switch %s from %s',
|
||||
machines_per_switch, self.config_provider_)
|
||||
return (switches, machines_per_switch)
|
||||
|
||||
def update_switch_and_machines(
|
||||
self, switches, switch_machines
|
||||
):
|
||||
"""Update switches and machines."""
|
||||
logging.debug('update switches %s to %s',
|
||||
switches, self.config_provider_)
|
||||
logging.debug('update switch machines %s to %s',
|
||||
switch_machines, self.config_provider_)
|
||||
self.config_provider_.update_switch_and_machines(
|
||||
switches, switch_machines)
|
||||
|
||||
def get_global_config(self, os_version, target_system):
|
||||
"""Get global config."""
|
||||
config = self.config_provider_.get_global_config()
|
||||
@ -169,9 +226,15 @@ class ConfigManager(object):
|
||||
def update_global_config(self, config, os_version, target_system):
|
||||
"""update global config."""
|
||||
logging.debug('update global config: %s', config)
|
||||
logging.debug('update global config to %s',
|
||||
self.config_provider_)
|
||||
self.config_provider_.update_global_config(config)
|
||||
logging.debug('update global config to %s',
|
||||
self.os_installer_)
|
||||
self.os_installer_.update_global_config(
|
||||
config, os_version=os_version, target_system=target_system)
|
||||
logging.debug('update global config to %s',
|
||||
self.package_installer_)
|
||||
self.package_installer_.update_global_config(
|
||||
config, os_version=os_version, target_system=target_system)
|
||||
|
||||
@ -197,29 +260,20 @@ class ConfigManager(object):
|
||||
util.merge_dict(config, package_config)
|
||||
return config
|
||||
|
||||
def clean_cluster_config(self, clusterid, os_version, target_system):
|
||||
config = self.config_provider_.get_cluster_config(clusterid)
|
||||
logging.debug('got cluster %s config from %s: %s',
|
||||
clusterid, self.config_provider_, config)
|
||||
self.os_installer_.clean_cluster_config(
|
||||
clusterid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
logging.debug('clean cluster %s config in %s',
|
||||
clusterid, self.os_installer_)
|
||||
self.package_installer_.clean_cluster_config(
|
||||
clusterid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
logging.debug('clean cluster %s config in %s',
|
||||
clusterid, self.package_installer_)
|
||||
|
||||
def update_cluster_config(self, clusterid, config,
|
||||
os_version, target_system):
|
||||
"""update cluster config."""
|
||||
logging.debug('update cluster %s config: %s', clusterid, config)
|
||||
logging.debug('update cluster %s config to %s',
|
||||
clusterid, self.config_provider_)
|
||||
self.config_provider_.update_cluster_config(clusterid, config)
|
||||
logging.debug('update cluster %s config to %s',
|
||||
clusterid, self.os_installer_)
|
||||
self.os_installer_.update_cluster_config(
|
||||
clusterid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
logging.debug('update cluster %s config to %s',
|
||||
clusterid, self.package_installer_)
|
||||
self.package_installer_.update_cluster_config(
|
||||
clusterid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
@ -259,16 +313,19 @@ class ConfigManager(object):
|
||||
config = self.config_provider_.get_host_config(hostid)
|
||||
logging.debug('got host %s config from %s: %s',
|
||||
hostid, self.config_provider_, config)
|
||||
logging.debug('clean host %s config in %s',
|
||||
hostid, self.config_provider_)
|
||||
self.config_provider_.clean_host_config(hostid)
|
||||
logging.debug('clean host %s config in %s',
|
||||
hostid, self.os_installer_)
|
||||
self.os_installer_.clean_host_config(
|
||||
hostid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
logging.debug('clean host %s config in %s',
|
||||
hostid, self.os_installer_)
|
||||
hostid, self.package_installer_)
|
||||
self.package_installer_.clean_host_config(
|
||||
hostid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
logging.debug('clean host %s config in %s',
|
||||
hostid, self.package_installer_)
|
||||
|
||||
def clean_host_configs(self, hostids, os_version, target_system):
|
||||
"""clean hosts' configs."""
|
||||
@ -280,28 +337,126 @@ class ConfigManager(object):
|
||||
config = self.config_provider_.get_host_config(hostid)
|
||||
logging.debug('got host %s config from %s: %s',
|
||||
hostid, self.config_provider_, config)
|
||||
logging.debug('reinstall host %s in %s',
|
||||
hostid, self.config_provider_)
|
||||
self.config_provider_.reinstall_host(hostid)
|
||||
logging.debug('reinstall host %s in %s',
|
||||
hostid, self.os_installer_)
|
||||
self.os_installer_.reinstall_host(
|
||||
hostid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
logging.debug('reinstall host %s in %s',
|
||||
hostid, self.os_installer_)
|
||||
hostid, self.package_installer_)
|
||||
self.package_installer_.reinstall_host(
|
||||
hostid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
logging.debug('clean host %s in %s',
|
||||
hostid, self.package_installer_)
|
||||
|
||||
def reinstall_cluster(self, clusterid, os_version, target_system):
|
||||
"""reinstall cluster."""
|
||||
config = self.config_provider_.get_cluster_config(clusterid)
|
||||
logging.debug('got cluster %s config from %s: %s',
|
||||
clusterid, self.config_provider_, config)
|
||||
logging.debug('reinstall cluster %s in %s',
|
||||
clusterid, self.config_provider_)
|
||||
self.config_provider_.reinstall_cluster(clusterid)
|
||||
logging.debug('reinstall cluster %s in %s',
|
||||
clusterid, self.os_installer_)
|
||||
self.os_installer_.reinstall_cluster(
|
||||
clusterid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
logging.debug('reinstall cluster %s in %s',
|
||||
clusterid, self.package_installer_)
|
||||
self.package_installer_.reinstall_cluster(
|
||||
clusterid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
|
||||
def reinstall_hosts(self, hostids, os_version, target_system):
|
||||
"""reinstall hosts."""
|
||||
for hostid in hostids:
|
||||
self.reinstall_host(hostid, os_version, target_system)
|
||||
|
||||
def update_host_config(self, hostid, config, os_version, target_system):
|
||||
def clean_host_installing_progress(self, hostid,
|
||||
os_version, target_system):
|
||||
"""clean host installing progress."""
|
||||
config = self.config_provider_.get_host_config(hostid)
|
||||
logging.debug('got host %s config from %s: %s',
|
||||
hostid, self.config_provider_, config)
|
||||
logging.debug('clean host %s installing progress in %s',
|
||||
hostid, self.config_provider_)
|
||||
self.config_provider_.clean_host_installing_progress(hostid)
|
||||
logging.debug('clean host %s installing progress in %s',
|
||||
hostid, self.os_installer_)
|
||||
self.os_installer_.clean_host_installing_progress(
|
||||
hostid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
logging.debug('clean host %s installing progress in %s',
|
||||
hostid, self.package_installer_)
|
||||
self.package_installer_.clean_host_installing_progress(
|
||||
hostid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
|
||||
def clean_hosts_installing_progress(self, hostids,
|
||||
os_version, target_system):
|
||||
"""clean hosts installing progress."""
|
||||
for hostid in hostids:
|
||||
self.clean_host_installing_progress(
|
||||
hostid, os_version, target_system)
|
||||
|
||||
def clean_cluster_installing_progress(self, clusterid,
|
||||
os_version, target_system):
|
||||
"""clean cluster installing progress."""
|
||||
config = self.config_provider_.get_cluster_config(clusterid)
|
||||
logging.debug('got host %s config from %s: %s',
|
||||
clusterid, self.config_provider_, config)
|
||||
logging.debug('clean cluster %s installing progress in %s',
|
||||
clusterid, self.config_provider_)
|
||||
self.config_provider_.clean_cluster_installing_progress(clusterid)
|
||||
logging.debug('clean cluster %s installing progress in %s',
|
||||
clusterid, self.os_installer_)
|
||||
self.os_installer_.clean_cluster_installing_progress(
|
||||
clusterid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
logging.debug('clean cluster %s installing progress in %s',
|
||||
clusterid, self.package_installer_)
|
||||
self.package_installer_.clean_cluster_installing_progress(
|
||||
clusterid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
|
||||
def clean_cluster_config(self, clusterid,
|
||||
os_version, target_system):
|
||||
"""clean cluster config."""
|
||||
config = self.config_provider_.get_cluster_config(clusterid)
|
||||
logging.debug('got cluster %s config from %s: %s',
|
||||
clusterid, self.config_provider_, config)
|
||||
|
||||
logging.debug('clean cluster %s config in %s',
|
||||
clusterid, self.config_provider_)
|
||||
self.config_provider_.clean_cluster_config(clusterid)
|
||||
logging.debug('clean cluster %s config in %s',
|
||||
clusterid, self.os_installer_)
|
||||
self.os_installer_.clean_cluster_config(
|
||||
clusterid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
logging.debug('clean cluster %s config in %s',
|
||||
clusterid, self.package_installer_)
|
||||
self.package_installer_.clean_cluster_config(
|
||||
clusterid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
|
||||
def update_host_config(self, hostid, config,
|
||||
os_version, target_system):
|
||||
"""update host config."""
|
||||
logging.debug('update host %s config: %s', hostid, config)
|
||||
logging.debug('update host %s config to %s',
|
||||
hostid, self.config_provider_)
|
||||
self.config_provider_.update_host_config(hostid, config)
|
||||
logging.debug('update host %s config to %s',
|
||||
hostid, self.os_installer_)
|
||||
self.os_installer_.update_host_config(
|
||||
hostid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
logging.debug('update host %s config to %s',
|
||||
hostid, self.package_installer_)
|
||||
self.package_installer_.update_host_config(
|
||||
hostid, config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
@ -312,40 +467,153 @@ class ConfigManager(object):
|
||||
self.update_host_config(
|
||||
hostid, host_config, os_version, target_system)
|
||||
|
||||
def update_cluster_and_host_configs(self,
|
||||
clusterid,
|
||||
hostids,
|
||||
update_hostids,
|
||||
os_version,
|
||||
target_system):
|
||||
"""update cluster/host configs."""
|
||||
logging.debug('update cluster %s with all hosts %s and update: %s',
|
||||
clusterid, hostids, update_hostids)
|
||||
def get_cluster_hosts(self, clusterid):
|
||||
"""get cluster hosts."""
|
||||
hostids = self.config_provider_.get_cluster_hosts(clusterid)
|
||||
logging.debug('got hosts of cluster %s from %s: %s',
|
||||
clusterid, self.config_provider_, hostids)
|
||||
return hostids
|
||||
|
||||
global_config = self.get_global_config(os_version, target_system)
|
||||
self.update_global_config(global_config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
def get_clusters(self):
|
||||
"""get clusters"""
|
||||
clusters = self.config_provider_.get_clusters()
|
||||
logging.debug('got clusters from %s: %s',
|
||||
self.config_provider_, clusters)
|
||||
return clusters
|
||||
|
||||
cluster_config = self.get_cluster_config(
|
||||
clusterid, os_version=os_version, target_system=target_system)
|
||||
util.merge_dict(cluster_config, global_config, False)
|
||||
self.update_cluster_config(
|
||||
clusterid, cluster_config, os_version=os_version,
|
||||
target_system=target_system)
|
||||
def filter_cluster_and_hosts(self, cluster_hosts,
|
||||
os_versions, target_systems,
|
||||
cluster_properties_match,
|
||||
cluster_properties_name,
|
||||
host_properties_match,
|
||||
host_properties_name):
|
||||
"""get filtered cluster and hosts configs."""
|
||||
logging.debug('filter cluster_hosts: %s', cluster_hosts)
|
||||
clusters_properties = []
|
||||
cluster_hosts_properties = {}
|
||||
for clusterid, hostids in cluster_hosts.items():
|
||||
cluster_config = self.get_cluster_config(
|
||||
clusterid, os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
cluster_ref = ConfigReference(cluster_config)
|
||||
if cluster_ref.match(cluster_properties_match):
|
||||
clusters_properties.append(
|
||||
cluster_ref.filter(cluster_properties_name))
|
||||
|
||||
host_configs = self.get_host_configs(
|
||||
hostids, os_version=os_version,
|
||||
target_system=target_system)
|
||||
CLUSTER_HOST_MERGER.merge(cluster_config, host_configs)
|
||||
update_host_configs = dict(
|
||||
[(hostid, host_config)
|
||||
for hostid, host_config in host_configs.items()
|
||||
if hostid in update_hostids])
|
||||
self.update_host_configs(
|
||||
update_host_configs, os_version=os_version,
|
||||
target_system=target_system)
|
||||
host_configs = self.get_host_configs(
|
||||
hostids, os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
cluster_hosts_properties[clusterid] = []
|
||||
for _, host_config in host_configs.items():
|
||||
host_ref = ConfigReference(host_config)
|
||||
if host_ref.match(host_properties_match):
|
||||
cluster_hosts_properties[clusterid].append(
|
||||
host_ref.filter(host_properties_name))
|
||||
|
||||
logging.debug('got clsuter properties: %s',
|
||||
clusters_properties)
|
||||
logging.debug('got cluster hosts properties: %s',
|
||||
cluster_hosts_properties)
|
||||
return (clusters_properties, cluster_hosts_properties)
|
||||
|
||||
def reinstall_cluster_and_hosts(self,
|
||||
cluster_hosts,
|
||||
os_versions,
|
||||
target_systems):
|
||||
"""reinstall clusters and hosts of each cluster."""
|
||||
logging.debug('reinstall cluster_hosts: %s', cluster_hosts)
|
||||
for clusterid, hostids in cluster_hosts.items():
|
||||
self.reinstall_hosts(
|
||||
hostids,
|
||||
os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
self.reinstall_cluster(clusterid,
|
||||
os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
|
||||
def clean_cluster_and_hosts(self, cluster_hosts,
|
||||
os_versions, target_systems):
|
||||
"""clean clusters and hosts of each cluster."""
|
||||
logging.debug('clean cluster_hosts: %s', cluster_hosts)
|
||||
for clusterid, hostids in cluster_hosts.items():
|
||||
self.clean_host_configs(hostids,
|
||||
os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
all_hostids = self.get_cluster_hosts(clusterid)
|
||||
if set(all_hostids) == set(hostids):
|
||||
self.clean_cluster_config(
|
||||
clusterid,
|
||||
os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
else:
|
||||
self.clean_cluster_installing_progress(
|
||||
clusterid, os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
|
||||
def clean_cluster_and_hosts_installing_progress(
|
||||
self, cluster_hosts, os_versions, target_systems
|
||||
):
|
||||
"""Clean clusters and hosts of each cluster intalling progress."""
|
||||
logging.debug('clean cluster_hosts installing progress: %s',
|
||||
cluster_hosts)
|
||||
for clusterid, hostids in cluster_hosts.items():
|
||||
self.clean_hosts_installing_progress(
|
||||
hostids, os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
self.clean_cluster_installing_progress(
|
||||
clusterid, os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
|
||||
def install_cluster_and_hosts(self,
|
||||
cluster_hosts,
|
||||
os_versions,
|
||||
target_systems):
|
||||
"""update clusters and hosts of each cluster configs."""
|
||||
logging.debug('update cluster_hosts: %s', cluster_hosts)
|
||||
|
||||
for clusterid, hostids in cluster_hosts.items():
|
||||
global_config = self.get_global_config(
|
||||
os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
self.update_global_config(global_config,
|
||||
os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
cluster_config = self.get_cluster_config(
|
||||
clusterid, os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
util.merge_dict(cluster_config, global_config, False)
|
||||
self.update_cluster_config(
|
||||
clusterid, cluster_config,
|
||||
os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
|
||||
all_hostids = self.get_cluster_hosts(clusterid)
|
||||
host_configs = self.get_host_configs(
|
||||
all_hostids, os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
CLUSTER_HOST_MERGER.merge(cluster_config, host_configs)
|
||||
update_host_configs = dict(
|
||||
[(hostid, host_config)
|
||||
for hostid, host_config in host_configs.items()
|
||||
if hostid in hostids])
|
||||
self.update_host_configs(
|
||||
update_host_configs,
|
||||
os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
self.reinstall_hosts(
|
||||
update_host_configs.keys(),
|
||||
os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
self.reinstall_cluster(clusterid,
|
||||
os_version=os_versions[clusterid],
|
||||
target_system=target_systems[clusterid])
|
||||
|
||||
def sync(self):
|
||||
"""sync os installer and package installer."""
|
||||
logging.info('config manager sync')
|
||||
logging.debug('sync %s', self.config_provider_)
|
||||
self.config_provider_.sync()
|
||||
logging.debug('sync %s', self.os_installer_)
|
||||
self.os_installer_.sync()
|
||||
logging.debug('sync %s', self.package_installer_)
|
||||
self.package_installer_.sync()
|
||||
|
@ -221,11 +221,11 @@ class ConfigMapping(object):
|
||||
|
||||
util.merge_dict(sub_configs, lower_sub_configs)
|
||||
|
||||
values = self._get_values(
|
||||
values = self._get_values(
|
||||
ref_key, sub_ref, lower_sub_refs, sub_configs)
|
||||
|
||||
logging.debug('%s set values %s to %s',
|
||||
ref_key, self.to_key_, values)
|
||||
ref_key, self.to_key_, values)
|
||||
for lower_key, lower_sub_ref in lower_sub_refs.items():
|
||||
if lower_key not in values:
|
||||
logging.error('no key %s in %s', lower_key, values)
|
||||
|
@ -172,7 +172,7 @@ def _update_exclusive_roles(bundled_exclusives, lower_roles,
|
||||
|
||||
|
||||
def _assign_roles_by_mins(role_bundles, lower_roles, unassigned_hosts,
|
||||
bundled_maxs, bundled_mins):
|
||||
bundled_maxs, bundled_mins):
|
||||
"""Assign roles to hosts by min restriction."""
|
||||
available_hosts = deepcopy(unassigned_hosts)
|
||||
for bundled_role, roles in role_bundles.items():
|
||||
@ -204,7 +204,7 @@ def _assign_roles_by_maxs(role_bundles, lower_roles, unassigned_hosts,
|
||||
for bundled_role in role_bundles.keys():
|
||||
if bundled_maxs[bundled_role] > 0:
|
||||
available_lists.append(
|
||||
[bundled_role]*bundled_maxs[bundled_role])
|
||||
[bundled_role] * bundled_maxs[bundled_role])
|
||||
else:
|
||||
default_roles.append(bundled_role)
|
||||
|
||||
@ -367,7 +367,7 @@ def assign_noproxy(_upper_ref, _from_key, lower_refs,
|
||||
logging.error('failed to assign %s[%s] = %s %% %s',
|
||||
lower_key, to_key, noproxy_pattern, mapping)
|
||||
raise error
|
||||
|
||||
|
||||
no_proxy = ','.join(no_proxy_list)
|
||||
host_no_proxy = {}
|
||||
for lower_key, _ in lower_refs.items():
|
||||
|
@ -4,6 +4,8 @@
|
||||
"""
|
||||
import fnmatch
|
||||
import os.path
|
||||
import re
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
from compass.utils import util
|
||||
@ -292,3 +294,37 @@ class ConfigReference(object):
|
||||
if ref.config is None:
|
||||
ref.__init__(value, ref.parent_, ref.parent_key_)
|
||||
return ref
|
||||
|
||||
def match(self, properties_match):
|
||||
"""Check if config match the given properties."""
|
||||
for property_name, property_value in properties_match.items():
|
||||
config_value = self.get(property_name)
|
||||
if config_value is None:
|
||||
return False
|
||||
|
||||
if isinstance(config_value, list):
|
||||
found = False
|
||||
for config_value_item in config_value:
|
||||
if re.match(property_value, str(config_value_item)):
|
||||
found = True
|
||||
|
||||
if not found:
|
||||
return False
|
||||
|
||||
else:
|
||||
if not re.match(property_value, str(config_value)):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def filter(self, properties_name):
|
||||
"""filter config by properties name."""
|
||||
filtered_properties = {}
|
||||
for property_name in properties_name:
|
||||
config_value = self.get(property_name)
|
||||
if config_value is None:
|
||||
continue
|
||||
|
||||
filtered_properties[property_name] = config_value
|
||||
|
||||
return filtered_properties
|
||||
|
@ -36,7 +36,7 @@ class KeyTranslator(object):
|
||||
self.override_conditions_ = override_conditions
|
||||
self._is_valid()
|
||||
|
||||
def __repr__(self):
|
||||
def __repr__(self):
|
||||
return (
|
||||
'%s[translated_keys=%s,from_keys=%s,translated_value=%s,'
|
||||
'from_values=%s,override=%s,override_conditions=%s]'
|
||||
@ -57,8 +57,8 @@ class KeyTranslator(object):
|
||||
elif not callable(translated_key):
|
||||
raise TypeError(
|
||||
'translated_keys[%d] type is %s while expected '
|
||||
'types are str or callable: %s' % (
|
||||
i, type(translated_key), translated_key))
|
||||
'types are str or callable: %s' % (
|
||||
i, type(translated_key), translated_key))
|
||||
|
||||
def _is_valid_from_keys(self):
|
||||
"""Check from keys are valid."""
|
||||
@ -142,7 +142,7 @@ class KeyTranslator(object):
|
||||
return translated_keys
|
||||
|
||||
def _get_translated_value(self, ref_key, sub_ref,
|
||||
translated_key, translated_sub_ref):
|
||||
translated_key, translated_sub_ref):
|
||||
"""Get translated value."""
|
||||
if self.translated_value_ is None:
|
||||
return sub_ref.config
|
||||
|
@ -8,7 +8,8 @@ from compass.utils import util
|
||||
|
||||
def get_key_from_pattern(
|
||||
_ref, path, from_pattern='.*',
|
||||
to_pattern='', **kwargs):
|
||||
to_pattern='', **kwargs
|
||||
):
|
||||
"""Get translated key from pattern"""
|
||||
match = re.match(from_pattern, path)
|
||||
if not match:
|
||||
|
@ -29,6 +29,14 @@ def init(database_url):
|
||||
SCOPED_SESSION = scoped_session(SESSION)
|
||||
|
||||
|
||||
def in_session():
|
||||
"""check if in database session scope."""
|
||||
if hasattr(SESSION_HOLDER, 'session'):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
@contextmanager
|
||||
def session():
|
||||
"""
|
||||
@ -37,11 +45,12 @@ def session():
|
||||
"""
|
||||
if hasattr(SESSION_HOLDER, 'session'):
|
||||
logging.error('we are already in session')
|
||||
new_session = SESSION_HOLDER.session
|
||||
raise Exception('session already exist')
|
||||
else:
|
||||
new_session = SCOPED_SESSION()
|
||||
try:
|
||||
SESSION_HOLDER.session = new_session
|
||||
|
||||
try:
|
||||
yield new_session
|
||||
new_session.commit()
|
||||
except Exception as error:
|
||||
|
@ -5,14 +5,17 @@ A vendor needs to implement abstract methods of base class.
|
||||
import re
|
||||
import logging
|
||||
|
||||
from abc import ABCMeta
|
||||
|
||||
from compass.hdsdiscovery import utils
|
||||
from compass.hdsdiscovery.error import TimeoutError
|
||||
|
||||
|
||||
class BaseVendor(object):
|
||||
"""Basic Vendor object"""
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
def is_this_vendor(self, *args, **kwargs):
|
||||
def is_this_vendor(self, host, credential, sys_info, **kwargs):
|
||||
"""Determine if the host is associated with this vendor.
|
||||
This function must be implemented by vendor itself
|
||||
"""
|
||||
@ -24,10 +27,11 @@ class BaseSnmpVendor(BaseVendor):
|
||||
to determine the vendor of the switch. """
|
||||
|
||||
def __init__(self, matched_names):
|
||||
super(BaseSnmpVendor, self).__init__()
|
||||
self._matched_names = matched_names
|
||||
|
||||
def is_this_vendor(self, host, credential, sys_info):
|
||||
|
||||
def is_this_vendor(self, host, credential, sys_info, **kwargs):
|
||||
"""Determine if the host is associated with this vendor."""
|
||||
if utils.is_valid_snmp_v2_credential(credential) and sys_info:
|
||||
for name in self._matched_names:
|
||||
if re.search(r"\b" + re.escape(name) + r"\b", sys_info,
|
||||
@ -40,62 +44,66 @@ class BasePlugin(object):
|
||||
"""Extended by vendor's plugin, which processes request and
|
||||
retrieve info directly from the switch.
|
||||
"""
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
def process_data(self, *args, **kwargs):
|
||||
def process_data(self, oper='SCAN', **kwargs):
|
||||
"""Each vendors will have some plugins to do some operations.
|
||||
Plugin will process request data and return expected result.
|
||||
|
||||
:param args: arguments
|
||||
:param oper: operation function name.
|
||||
:param kwargs: key-value pairs of arguments
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
# At least one of these three functions below must be implemented.
|
||||
def scan(self, *args, **kwargs):
|
||||
def scan(self, **kwargs):
|
||||
"""Get multiple records at once"""
|
||||
pass
|
||||
|
||||
def set(self, *args, **kwargs):
|
||||
def set(self, **kwargs):
|
||||
"""Set value to desired variable"""
|
||||
pass
|
||||
|
||||
def get(self, *args, **kwargs):
|
||||
def get(self, **kwargs):
|
||||
"""Get one record from a host"""
|
||||
pass
|
||||
|
||||
|
||||
class BaseSnmpMacPlugin(BasePlugin):
|
||||
"""Base snmp plugin."""
|
||||
|
||||
def __init__(self, host, credential, oid='BRIDGE-MIB::dot1dTpFdbPort',
|
||||
vlan_oid='Q-BRIDGE-MIB::dot1qPvid'):
|
||||
super(BaseSnmpMacPlugin, self).__init__()
|
||||
self.host = host
|
||||
self.credential = credential
|
||||
self.oid = oid
|
||||
self.port_oid = 'ifName'
|
||||
self.vlan_oid = vlan_oid
|
||||
|
||||
def process_data(self, oper='SCAN'):
|
||||
def process_data(self, oper='SCAN', **kwargs):
|
||||
func_name = oper.lower()
|
||||
return getattr(self, func_name)()
|
||||
return getattr(self, func_name)(**kwargs)
|
||||
|
||||
def scan(self):
|
||||
def scan(self, **kwargs):
|
||||
results = None
|
||||
try:
|
||||
results = utils.snmpwalk_by_cl(self.host, self.credential,
|
||||
self.oid)
|
||||
except TimeoutError as e:
|
||||
except TimeoutError as error:
|
||||
logging.debug("PluginMac:scan snmpwalk_by_cl failed: %s",
|
||||
e.message)
|
||||
error.message)
|
||||
return None
|
||||
|
||||
mac_list = []
|
||||
for entity in results:
|
||||
ifIndex = entity['value']
|
||||
if entity and int(ifIndex):
|
||||
if_index = entity['value']
|
||||
if entity and int(if_index):
|
||||
tmp = {}
|
||||
mac_numbers = entity['iid'].split('.')
|
||||
tmp['mac'] = self.get_mac_address(mac_numbers)
|
||||
tmp['port'] = self.get_port(ifIndex)
|
||||
tmp['vlan'] = self.get_vlan_id(ifIndex)
|
||||
tmp['port'] = self.get_port(if_index)
|
||||
tmp['vlan'] = self.get_vlan_id(if_index)
|
||||
mac_list.append(tmp)
|
||||
|
||||
return mac_list
|
||||
@ -110,9 +118,9 @@ class BaseSnmpMacPlugin(BasePlugin):
|
||||
result = None
|
||||
try:
|
||||
result = utils.snmpget_by_cl(self.host, self.credential, oid)
|
||||
except TimeoutError as e:
|
||||
except TimeoutError as error:
|
||||
logging.debug("[PluginMac:get_vlan_id snmpget_by_cl failed: %s]",
|
||||
e.message)
|
||||
error.message)
|
||||
return None
|
||||
|
||||
vlan_id = result.split()[-1]
|
||||
@ -125,9 +133,9 @@ class BaseSnmpMacPlugin(BasePlugin):
|
||||
result = None
|
||||
try:
|
||||
result = utils.snmpget_by_cl(self.host, self.credential, if_name)
|
||||
except TimeoutError as e:
|
||||
except TimeoutError as error:
|
||||
logging.debug("[PluginMac:get_port snmpget_by_cl failed: %s]",
|
||||
e.message)
|
||||
error.message)
|
||||
return None
|
||||
|
||||
# A result may be like "Value: FasterEthernet1/2/34
|
||||
|
@ -1,8 +1,11 @@
|
||||
"""hdsdiscovery module errors
|
||||
"""
|
||||
"""hdsdiscovery module errors"""
|
||||
|
||||
|
||||
class TimeoutError(Exception):
|
||||
"""Timeout error."""
|
||||
|
||||
def __init__(self, message):
|
||||
super(TimeoutError, self).__init__(message)
|
||||
self.message = message
|
||||
|
||||
def __str__(self):
|
||||
|
@ -11,7 +11,7 @@ NOTSUPPORTED = 'notsupported'
|
||||
ERROR = 'error'
|
||||
|
||||
|
||||
class HDManager:
|
||||
class HDManager(object):
|
||||
"""Process a request."""
|
||||
|
||||
def __init__(self):
|
||||
@ -42,7 +42,7 @@ class HDManager:
|
||||
logging.error('no plugin %s to load from %s', req_obj, plugin_dir)
|
||||
return None
|
||||
|
||||
return plugin.process_data(oper)
|
||||
return plugin.process_data(oper, **kwargs)
|
||||
|
||||
def is_valid_vendor(self, host, credential, vendor):
|
||||
""" Check if vendor is associated with this host and credential
|
||||
@ -86,7 +86,7 @@ class HDManager:
|
||||
# TODO(grace): Why do we need to have valid IP?
|
||||
# a hostname should also work.
|
||||
if not utils.valid_ip_format(host):
|
||||
logging.error("host '%s' is not valid IP address!" % host)
|
||||
logging.error("host '%s' is not valid IP address!", host)
|
||||
return (None, ERROR, "Invalid IP address %s!" % host)
|
||||
|
||||
if not utils.is_valid_snmp_v2_credential(credential):
|
||||
@ -106,7 +106,7 @@ class HDManager:
|
||||
and re.match(r'^[^\.]', o)]
|
||||
|
||||
logging.debug("[get_vendor][available vendors]: %s ", all_vendors)
|
||||
logging.debug("[get_vendor] System Information is [%s]" % sys_info)
|
||||
logging.debug("[get_vendor] System Information is [%s]", sys_info)
|
||||
|
||||
# TODO(grace): should not conver to lower. The vendor impl can choose
|
||||
# to do case-insensitive match
|
||||
@ -120,7 +120,7 @@ class HDManager:
|
||||
continue
|
||||
|
||||
if instance.is_this_vendor(host, credential, sys_info):
|
||||
logging.info("[get_vendor]****Found vendor '%s'****" % vname)
|
||||
logging.info("[get_vendor]****Found vendor '%s'****", vname)
|
||||
vendor = vname
|
||||
break
|
||||
|
||||
@ -131,12 +131,13 @@ class HDManager:
|
||||
return (vendor, "Found", "")
|
||||
|
||||
def get_sys_info(self, host, credential):
|
||||
"""get sys info"""
|
||||
sys_info = None
|
||||
try:
|
||||
sys_info = utils.snmpget_by_cl(host,
|
||||
credential,
|
||||
self.snmp_sysdescr)
|
||||
except TimeoutError as e:
|
||||
return (None, e.message)
|
||||
except TimeoutError as error:
|
||||
return (None, error.message)
|
||||
|
||||
return (sys_info, "")
|
||||
|
@ -17,25 +17,23 @@ def load_module(mod_name, path, host=None, credential=None):
|
||||
:param str host: switch ip address
|
||||
:param str credential: credential used to access switch
|
||||
"""
|
||||
instance = None
|
||||
try:
|
||||
file, path, descr = imp.find_module(mod_name, [path])
|
||||
if file:
|
||||
mod = imp.load_module(mod_name, file, path, descr)
|
||||
mod_file, path, descr = imp.find_module(mod_name, [path])
|
||||
if mod_file:
|
||||
mod = imp.load_module(mod_name, mod_file, path, descr)
|
||||
if host and credential:
|
||||
instance = getattr(mod, mod.CLASS_NAME)(host, credential)
|
||||
else:
|
||||
instance = getattr(mod, mod.CLASS_NAME)()
|
||||
|
||||
return instance
|
||||
except ImportError as exc:
|
||||
logging.error('No such plugin : %s', mod_name)
|
||||
logging.exception(exc)
|
||||
|
||||
finally:
|
||||
return instance
|
||||
return None
|
||||
|
||||
|
||||
def ssh_remote_execute(host, username, password, cmd, *args):
|
||||
def ssh_remote_execute(host, username, password, cmd):
|
||||
"""SSH to execute script on remote machine
|
||||
|
||||
:param host: ip of the remote machine
|
||||
@ -80,7 +78,7 @@ def valid_ip_format(ip_address):
|
||||
"""Valid the format of an Ip address"""
|
||||
|
||||
if not re.match(r'^((([0-2]?\d{0,2}\.){3}([0-2]?\d{0,2}))'
|
||||
'|(([\da-fA-F]{1,4}:){7}([\da-fA-F]{1,4})))$',
|
||||
r'|(([\da-fA-F]{1,4}:){7}([\da-fA-F]{1,4})))$',
|
||||
ip_address):
|
||||
# check IP's format is match ipv4 or ipv6 by regex
|
||||
return False
|
||||
@ -190,6 +188,7 @@ SNMP_V2_CREDENTIALS = {"version": "", "community": ""}
|
||||
|
||||
|
||||
def is_valid_snmp_v2_credential(credential):
|
||||
"""check if credential is valid snmp v2 credential."""
|
||||
if credential.keys() != SNMP_V2_CREDENTIALS.keys():
|
||||
return False
|
||||
if credential['version'] != '2c':
|
||||
@ -199,23 +198,25 @@ def is_valid_snmp_v2_credential(credential):
|
||||
|
||||
|
||||
def is_valid_ssh_credential(credential):
|
||||
"""check if credential is valid ssh credential."""
|
||||
if credential.keys() != SSH_CREDENTIALS.keys():
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def snmpget_by_cl(host, credential, oid, timeout=8, retries=3):
|
||||
"""snmpget by credential."""
|
||||
if not is_valid_snmp_v2_credential(credential):
|
||||
logging.error("[utils][snmpget_by_cl] Credential %s cannot be used "
|
||||
"for SNMP request!" % credential)
|
||||
"for SNMP request!", credential)
|
||||
return None
|
||||
|
||||
version = credential['version']
|
||||
community = credential['community']
|
||||
cl = ("snmpget -v %s -c %s -Ob -r %s -t %s %s %s"
|
||||
% (version, community, retries, timeout, host, oid))
|
||||
cmd = "snmpget -v %s -c %s -Ob -r %s -t %s %s %s" % (
|
||||
version, community, retries, timeout, host, oid)
|
||||
output = None
|
||||
sub_p = subprocess.Popen(cl, shell=True,
|
||||
sub_p = subprocess.Popen(cmd, shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
output, err = sub_p.communicate()
|
||||
@ -228,17 +229,18 @@ def snmpget_by_cl(host, credential, oid, timeout=8, retries=3):
|
||||
|
||||
|
||||
def snmpwalk_by_cl(host, credential, oid, timeout=5, retries=3):
|
||||
"""snmpwalk by credential."""
|
||||
if not is_valid_snmp_v2_credential(credential):
|
||||
logging.error("[utils][snmpwalk_by_cl] Credential %s cannot be used "
|
||||
"for SNMP request!" % credential)
|
||||
"for SNMP request!", credential)
|
||||
return None
|
||||
|
||||
version = credential['version']
|
||||
community = credential['community']
|
||||
cl = ("snmpwalk -v %s -c %s -Cc -r %s -t %s -Ob %s %s"
|
||||
% (version, community, retries, timeout, host, oid))
|
||||
cmd = "snmpwalk -v %s -c %s -Cc -r %s -t %s -Ob %s %s" % (
|
||||
version, community, retries, timeout, host, oid)
|
||||
output = []
|
||||
sub_p = subprocess.Popen(cl, shell=True, stdout=subprocess.PIPE)
|
||||
sub_p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
|
||||
output, err = sub_p.communicate()
|
||||
|
||||
if err:
|
||||
|
@ -15,7 +15,7 @@ class OVSwitch(base.BaseVendor):
|
||||
def __init__(self):
|
||||
self.__name = "Open vSwitch"
|
||||
|
||||
def is_this_vendor(self, host, credential, sys_info):
|
||||
def is_this_vendor(self, host, credential, sys_info, **kwargs):
|
||||
"""Determine if the hostname is accociated witH this vendor.
|
||||
|
||||
:param host: swtich's IP address
|
||||
|
@ -14,15 +14,15 @@ class Mac(base.BasePlugin):
|
||||
self.host = host
|
||||
self.credential = credential
|
||||
|
||||
def process_data(self, oper="SCAN"):
|
||||
def process_data(self, oper="SCAN", **kwargs):
|
||||
"""Dynamically call the function according 'oper'
|
||||
|
||||
:param oper: operation of data processing
|
||||
"""
|
||||
func_name = oper.lower()
|
||||
return getattr(self, func_name)()
|
||||
return getattr(self, func_name)(**kwargs)
|
||||
|
||||
def scan(self):
|
||||
def scan(self, **kwargs):
|
||||
"""
|
||||
Implemnets the scan method in BasePlugin class. In this module,
|
||||
mac addesses were retrieved by ssh
|
||||
|
@ -151,187 +151,178 @@ class AdapterMatcher(object):
|
||||
"""Get Host Progress from database.
|
||||
|
||||
.. notes::
|
||||
The function should be called out of database session.
|
||||
The function should be called in database session.
|
||||
"""
|
||||
with database.session() as session:
|
||||
host = session.query(
|
||||
ClusterHost).filter_by(
|
||||
id=hostid).first()
|
||||
if not host:
|
||||
logging.error(
|
||||
'there is no host for %s in ClusterHost', hostid)
|
||||
return None, None, None
|
||||
session = database.current_session()
|
||||
host = session.query(
|
||||
ClusterHost).filter_by(
|
||||
id=hostid).first()
|
||||
if not host:
|
||||
logging.error(
|
||||
'there is no host for %s in ClusterHost', hostid)
|
||||
return None, None, None
|
||||
|
||||
if not host.state:
|
||||
logging.error('there is no related HostState for %s',
|
||||
hostid)
|
||||
return host.hostname, None, None
|
||||
if not host.state:
|
||||
logging.error('there is no related HostState for %s',
|
||||
hostid)
|
||||
return host.hostname, None, None
|
||||
|
||||
return (
|
||||
host.hostname,
|
||||
host.state.state,
|
||||
Progress(host.state.progress,
|
||||
host.state.message,
|
||||
host.state.severity))
|
||||
return (
|
||||
host.hostname,
|
||||
host.state.state,
|
||||
Progress(host.state.progress,
|
||||
host.state.message,
|
||||
host.state.severity))
|
||||
|
||||
@classmethod
|
||||
def _update_host_progress(cls, hostid, progress):
|
||||
"""Update host progress to database.
|
||||
|
||||
.. note::
|
||||
The function should be called out of the database session.
|
||||
The function should be called in database session.
|
||||
"""
|
||||
with database.session() as session:
|
||||
host = session.query(
|
||||
ClusterHost).filter_by(id=hostid).first()
|
||||
if not host:
|
||||
logging.error(
|
||||
'there is no host for %s in ClusterHost', hostid)
|
||||
return
|
||||
session = database.current_session()
|
||||
host = session.query(
|
||||
ClusterHost).filter_by(id=hostid).first()
|
||||
if not host:
|
||||
logging.error(
|
||||
'there is no host for %s in ClusterHost', hostid)
|
||||
return
|
||||
|
||||
if not host.state:
|
||||
logging.error(
|
||||
'there is no related HostState for %s', hostid)
|
||||
return
|
||||
if not host.state:
|
||||
logging.error(
|
||||
'there is no related HostState for %s', hostid)
|
||||
return
|
||||
|
||||
if host.state.state != 'INSTALLING':
|
||||
logging.error(
|
||||
'host %s is not in INSTALLING state',
|
||||
hostid)
|
||||
return
|
||||
if host.state.state != 'INSTALLING':
|
||||
logging.error(
|
||||
'host %s is not in INSTALLING state',
|
||||
hostid)
|
||||
return
|
||||
|
||||
if host.state.progress > progress.progress:
|
||||
logging.error(
|
||||
'host %s progress is not increased '
|
||||
'from %s to %s',
|
||||
hostid, host.state, progress)
|
||||
return
|
||||
if host.state.progress > progress.progress:
|
||||
logging.error(
|
||||
'host %s progress is not increased '
|
||||
'from %s to %s',
|
||||
hostid, host.state, progress)
|
||||
return
|
||||
|
||||
if (host.state.progress == progress.progress and
|
||||
host.state.message == progress.message):
|
||||
logging.info(
|
||||
'ignore update host %s progress %s to %s',
|
||||
hostid, progress, host.state)
|
||||
return
|
||||
if (
|
||||
host.state.progress == progress.progress and
|
||||
host.state.message == progress.message
|
||||
):
|
||||
logging.info(
|
||||
'ignore update host %s progress %s to %s',
|
||||
hostid, progress, host.state)
|
||||
return
|
||||
|
||||
if progress.progress >= 1.0:
|
||||
host.state.state = 'READY'
|
||||
host.state.progress = progress.progress
|
||||
host.state.message = progress.message
|
||||
if progress.severity:
|
||||
host.state.severity = progress.severity
|
||||
|
||||
host.state.progress = progress.progress
|
||||
host.state.message = progress.message
|
||||
if host.state.progress >= 1.0:
|
||||
host.state.state = 'READY'
|
||||
|
||||
if progress.severity:
|
||||
host.state.severity = progress.severity
|
||||
if host.state.severity == 'ERROR':
|
||||
host.state.state = 'ERROR'
|
||||
|
||||
if progress.severity == 'ERROR':
|
||||
host.state.state = 'ERROR'
|
||||
if host.state.state != 'INSTALLING':
|
||||
host.mutable = True
|
||||
|
||||
if host.state.state != 'INSTALLING':
|
||||
host.mutable = True
|
||||
logging.debug(
|
||||
'update host %s state %s',
|
||||
hostid, host.state)
|
||||
logging.debug(
|
||||
'update host %s state %s',
|
||||
hostid, host.state)
|
||||
|
||||
@classmethod
|
||||
def _get_cluster_progress(cls, clusterid):
|
||||
"""Get cluster progress from database.
|
||||
|
||||
.. notes::
|
||||
The function should be called out of database session.
|
||||
"""
|
||||
with database.session() as session:
|
||||
cluster = session.query(Cluster).filter_by(id=clusterid).first()
|
||||
if not cluster:
|
||||
logging.error('there is no Cluster for %s', clusterid)
|
||||
return None, None
|
||||
|
||||
if not cluster.state:
|
||||
logging.error('there is no ClusterState for %s', clusterid)
|
||||
return None, None
|
||||
|
||||
return (
|
||||
cluster.state.state,
|
||||
Progress(cluster.state.progress,
|
||||
cluster.state.message,
|
||||
cluster.state.severity))
|
||||
|
||||
@classmethod
|
||||
def _update_cluster_progress(cls, clusterid, progress):
|
||||
def _update_cluster_progress(cls, clusterid):
|
||||
"""Update cluster installing progress to database.
|
||||
|
||||
.. note::
|
||||
The function should be called out of the database session.
|
||||
The function should be called in the database session.
|
||||
"""
|
||||
with database.session() as session:
|
||||
cluster = session.query(
|
||||
Cluster).filter_by(id=clusterid).first()
|
||||
if not cluster:
|
||||
logging.error(
|
||||
'there is no cluster for %s in Cluster',
|
||||
clusterid)
|
||||
return
|
||||
session = database.current_session()
|
||||
cluster = session.query(
|
||||
Cluster).filter_by(id=clusterid).first()
|
||||
if not cluster:
|
||||
logging.error(
|
||||
'there is no cluster for %s in Cluster',
|
||||
clusterid)
|
||||
return
|
||||
|
||||
if not cluster.state:
|
||||
logging.error(
|
||||
'there is no ClusterState for %s',
|
||||
clusterid)
|
||||
if not cluster.state:
|
||||
logging.error(
|
||||
'there is no ClusterState for %s',
|
||||
clusterid)
|
||||
|
||||
if cluster.state.state != 'INSTALLING':
|
||||
logging.error('cluster %s is not in INSTALLING state',
|
||||
clusterid)
|
||||
return
|
||||
if cluster.state.state != 'INSTALLING':
|
||||
logging.error('cluster %s is not in INSTALLING state',
|
||||
clusterid)
|
||||
return
|
||||
|
||||
if progress.progress >= 1.0:
|
||||
cluster.state.state = 'READY'
|
||||
cluster_progress = 0.0
|
||||
cluster_messages = {}
|
||||
cluster_severities = set([])
|
||||
hostids = []
|
||||
for host in cluster.hosts:
|
||||
if host.state:
|
||||
hostids.append(host.id)
|
||||
cluster_progress += host.state.progress
|
||||
if host.state.message:
|
||||
cluster_messages[host.hostname] = host.state.message
|
||||
|
||||
cluster.state.progress = progress.progress
|
||||
cluster.state.message = progress.message
|
||||
if host.state.severity:
|
||||
cluster_severities.add(host.state.severity)
|
||||
|
||||
if progress.severity:
|
||||
cluster.state.severity = progress.severity
|
||||
cluster.state.progress = cluster_progress / len(hostids)
|
||||
cluster.state.message = '\n'.join(
|
||||
[
|
||||
'%s: %s' % (hostname, message)
|
||||
for hostname, message in cluster_messages.items()
|
||||
]
|
||||
)
|
||||
for severity in ['ERROR', 'WARNING', 'INFO']:
|
||||
if severity in cluster_severities:
|
||||
cluster.state.severity = severity
|
||||
break
|
||||
|
||||
if progress.severity == 'ERROR':
|
||||
cluster.state.state = 'ERROR'
|
||||
if cluster.state.progress >= 1.0:
|
||||
cluster.state.state = 'READY'
|
||||
|
||||
if cluster.state.state != 'INSTALLING':
|
||||
cluster.mutable = True
|
||||
if cluster.state.severity == 'ERROR':
|
||||
cluster.state.state = 'ERROR'
|
||||
|
||||
logging.debug(
|
||||
'update cluster %s state %s',
|
||||
clusterid, cluster.state)
|
||||
if cluster.state.state != 'INSTALLING':
|
||||
cluster.mutable = True
|
||||
|
||||
logging.debug(
|
||||
'update cluster %s state %s',
|
||||
clusterid, cluster.state)
|
||||
|
||||
def update_progress(self, clusterid, hostids):
|
||||
"""Update cluster progress and hosts progresses.
|
||||
|
||||
:param clusterid: the cluster id.
|
||||
:type clusterid: int
|
||||
:param hostids: the host ids.
|
||||
:type hostids: list of int
|
||||
:param clusterid: the id of the cluster to update.
|
||||
:type clusterid: int.
|
||||
:param hostids: the ids of the hosts to update.
|
||||
:type hostids: list of int.
|
||||
"""
|
||||
cluster_state, cluster_progress = self._get_cluster_progress(
|
||||
clusterid)
|
||||
if not cluster_progress:
|
||||
logging.error(
|
||||
'nothing to update cluster %s => state %s progress %s',
|
||||
clusterid, cluster_state, cluster_progress)
|
||||
return
|
||||
|
||||
logging.debug('got cluster %s state %s progress %s',
|
||||
clusterid, cluster_state, cluster_progress)
|
||||
host_progresses = {}
|
||||
for hostid in hostids:
|
||||
hostname, host_state, host_progress = self._get_host_progress(
|
||||
hostid)
|
||||
if not hostname or not host_progress:
|
||||
logging.error(
|
||||
'nothing to update host %s => hostname %s '
|
||||
'state %s progress %s',
|
||||
hostid, hostname, host_state, host_progress)
|
||||
continue
|
||||
with database.session():
|
||||
for hostid in hostids:
|
||||
hostname, host_state, host_progress = (
|
||||
self._get_host_progress(hostid))
|
||||
if not hostname or not host_progress:
|
||||
logging.error(
|
||||
'nothing to update host %s => hostname %s '
|
||||
'state %s progress %s',
|
||||
hostid, hostname, host_state, host_progress)
|
||||
continue
|
||||
|
||||
logging.debug('got host %s hostname %s state %s progress %s',
|
||||
hostid, hostname, host_state, host_progress)
|
||||
host_progresses[hostid] = (hostname, host_state, host_progress)
|
||||
logging.debug('got host %s hostname %s state %s progress %s',
|
||||
hostid, hostname, host_state, host_progress)
|
||||
host_progresses[hostid] = (
|
||||
hostname, host_state, host_progress)
|
||||
|
||||
for hostid, host_value in host_progresses.items():
|
||||
hostname, host_state, host_progress = host_value
|
||||
@ -340,35 +331,18 @@ class AdapterMatcher(object):
|
||||
hostname, clusterid, host_progress)
|
||||
self.package_matcher_.update_progress(
|
||||
hostname, clusterid, host_progress)
|
||||
self._update_host_progress(hostid, host_progress)
|
||||
else:
|
||||
logging.error(
|
||||
'there is no need to update host %s '
|
||||
'progress: hostname %s state %s progress %s',
|
||||
hostid, hostname, host_state, host_progress)
|
||||
|
||||
cluster_progress_data = 0.0
|
||||
for _, _, host_progress in host_progresses.values():
|
||||
cluster_progress_data += host_progress.progress
|
||||
with database.session():
|
||||
for hostid in hostids:
|
||||
if hostid not in host_progresses:
|
||||
continue
|
||||
|
||||
cluster_progress.progress = cluster_progress_data / len(hostids)
|
||||
messages = []
|
||||
for _, _, host_progress in host_progresses.values():
|
||||
if host_progress.message:
|
||||
messages.append(host_progress.message)
|
||||
_, _, host_progress = host_progresses[hostid]
|
||||
self._update_host_progress(hostid, host_progress)
|
||||
|
||||
if messages:
|
||||
cluster_progress.message = '\n'.join(messages)
|
||||
|
||||
for severity in ['ERROR', 'WARNING', 'INFO']:
|
||||
cluster_severity = None
|
||||
for _, _, host_progress in host_progresses.values():
|
||||
if host_progress.severity == severity:
|
||||
cluster_severity = severity
|
||||
break
|
||||
|
||||
if cluster_severity:
|
||||
cluster_progress.severity = cluster_severity
|
||||
break
|
||||
|
||||
self._update_cluster_progress(clusterid, cluster_progress)
|
||||
self._update_cluster_progress(clusterid)
|
||||
|
@ -280,9 +280,12 @@ class FileMatcher(object):
|
||||
# total progress should only be updated when the new calculated
|
||||
# progress is greater than the recored total progress or the
|
||||
# progress to update is the same but the message is different.
|
||||
if (total_progress.progress < total_progress_data or
|
||||
(total_progress.progress == total_progress_data and
|
||||
total_progress.message != file_progress.message)):
|
||||
if (
|
||||
total_progress.progress < total_progress_data or (
|
||||
total_progress.progress == total_progress_data and
|
||||
total_progress.message != file_progress.message
|
||||
)
|
||||
):
|
||||
total_progress.progress = total_progress_data
|
||||
total_progress.message = file_progress.message
|
||||
total_progress.severity = file_progress.severity
|
||||
|
@ -2,6 +2,8 @@
|
||||
import logging
|
||||
import re
|
||||
|
||||
from abc import ABCMeta
|
||||
|
||||
from compass.utils import util
|
||||
|
||||
|
||||
@ -29,12 +31,14 @@ class Progress(object):
|
||||
|
||||
class ProgressCalculator(object):
|
||||
"""base class to generate progress."""
|
||||
def __init__(self):
|
||||
raise NotImplementedError(str(self))
|
||||
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
@classmethod
|
||||
def update_progress(cls, progress_data, message,
|
||||
severity, progress):
|
||||
def update_progress(
|
||||
cls, progress_data, message,
|
||||
severity, progress
|
||||
):
|
||||
"""
|
||||
Update progress with the given progress_data,
|
||||
message and severity.
|
||||
@ -48,9 +52,12 @@ class ProgressCalculator(object):
|
||||
# the progress is only updated when the new progress
|
||||
# is greater than the stored progress or the progress
|
||||
# to update is the same but the message is different.
|
||||
if (progress_data > progress.progress or
|
||||
(progress_data == progress.progress and
|
||||
message != progress.message)):
|
||||
if (
|
||||
progress_data > progress.progress or (
|
||||
progress_data == progress.progress and
|
||||
message != progress.message
|
||||
)
|
||||
):
|
||||
progress.progress = progress_data
|
||||
if message:
|
||||
progress.message = message
|
||||
@ -80,13 +87,14 @@ class IncrementalProgress(ProgressCalculator):
|
||||
|
||||
def __init__(self, min_progress,
|
||||
max_progress, incremental_ratio):
|
||||
super(IncrementalProgress, self).__init__()
|
||||
if not 0.0 <= min_progress <= max_progress <= 1.0:
|
||||
raise IndexError(
|
||||
'%s restriction is not mat: 0.0 <= min_progress(%s)'
|
||||
' <= max_progress(%s) <= 1.0' % (
|
||||
self.__class__.__name__, min_progress, max_progress))
|
||||
|
||||
if not 0.0 <= incremental_ratio <= 1.0:
|
||||
if not 0.0 <= incremental_ratio <= 1.0:
|
||||
raise IndexError(
|
||||
'%s restriction is not mat: '
|
||||
'0.0 <= incremental_ratio(%s) <= 1.0' % (
|
||||
@ -122,6 +130,7 @@ class RelativeProgress(ProgressCalculator):
|
||||
"""class to update progress to the given relative progress."""
|
||||
|
||||
def __init__(self, progress):
|
||||
super(RelativeProgress, self).__init__()
|
||||
if not 0.0 <= progress <= 1.0:
|
||||
raise IndexError(
|
||||
'%s restriction is not mat: 0.0 <= progress(%s) <= 1.0' % (
|
||||
|
@ -246,8 +246,10 @@ ADAPTER_CONFIGURATIONS = [
|
||||
]
|
||||
|
||||
|
||||
def _get_adapter_matcher(os_installer, os_name,
|
||||
package_installer, target_system):
|
||||
def _get_adapter_matcher(
|
||||
os_installer, os_name,
|
||||
package_installer, target_system
|
||||
):
|
||||
"""Get adapter matcher by os name and package installer name."""
|
||||
for configuration in ADAPTER_CONFIGURATIONS:
|
||||
if configuration.match(os_installer, os_name,
|
||||
@ -260,19 +262,20 @@ def _get_adapter_matcher(os_installer, os_name,
|
||||
return None
|
||||
|
||||
|
||||
def update_progress(os_installer, os_name, package_installer, target_system,
|
||||
clusterid, hostids):
|
||||
def update_progress(os_installer, os_names, package_installer, target_systems,
|
||||
cluster_hosts):
|
||||
"""Update adapter installing progress.
|
||||
|
||||
:param os_installer: os installer name
|
||||
:param os_name: os name.
|
||||
:param package_installer: package installer name.
|
||||
:param clusterid: cluster id.
|
||||
:param hostids: hosts ids.
|
||||
:param cluster_hosts: clusters and hosts in each cluster to update.
|
||||
:param cluster_hosts: dict of int to list of int.
|
||||
"""
|
||||
adapter = _get_adapter_matcher(os_installer, os_name,
|
||||
package_installer, target_system)
|
||||
if not adapter:
|
||||
return
|
||||
for clusterid, hostids in cluster_hosts.items():
|
||||
adapter = _get_adapter_matcher(os_installer, os_names[clusterid],
|
||||
package_installer,
|
||||
target_systems[clusterid])
|
||||
if not adapter:
|
||||
continue
|
||||
|
||||
adapter.update_progress(clusterid, hostids)
|
||||
adapter.update_progress(clusterid, hostids)
|
||||
|
@ -4,10 +4,12 @@
|
||||
"""
|
||||
from celery.signals import setup_logging
|
||||
|
||||
from compass.actions import clean_deployment
|
||||
from compass.actions import clean_installing_progress
|
||||
from compass.actions import deploy
|
||||
from compass.actions import poll_switch
|
||||
from compass.actions import trigger_install
|
||||
from compass.actions import progress_update
|
||||
from compass.db import database
|
||||
from compass.actions import update_progress
|
||||
from compass.actions import reinstall
|
||||
from compass.tasks.client import celery
|
||||
from compass.utils import flags
|
||||
from compass.utils import logsetting
|
||||
@ -24,8 +26,8 @@ def tasks_setup_logging(**_):
|
||||
setup_logging.connect(tasks_setup_logging)
|
||||
|
||||
|
||||
@celery.task(name="compass.tasks.pollswitch")
|
||||
def pollswitch(ip_addr, req_obj='mac', oper="SCAN"):
|
||||
@celery.task(name='compass.tasks.pollswitch')
|
||||
def pollswitch(ip_addr, req_obj='mac', oper='SCAN'):
|
||||
"""Query switch and return expected result.
|
||||
|
||||
:param ip_addr: switch ip address.
|
||||
@ -35,28 +37,54 @@ def pollswitch(ip_addr, req_obj='mac', oper="SCAN"):
|
||||
:param oper: the operation to query the switch (SCAN, GET, SET).
|
||||
:type oper: str
|
||||
"""
|
||||
with database.session():
|
||||
poll_switch.poll_switch(ip_addr, req_obj='mac', oper="SCAN")
|
||||
poll_switch.poll_switch(ip_addr, req_obj=req_obj, oper=oper)
|
||||
|
||||
|
||||
@celery.task(name="compass.tasks.trigger_install")
|
||||
def triggerinstall(clusterid, hostids=[]):
|
||||
@celery.task(name='compass.tasks.deploy')
|
||||
def deploy_clusters(cluster_hosts):
|
||||
"""Deploy the given cluster.
|
||||
|
||||
:param clusterid: the id of the cluster to deploy.
|
||||
:type clusterid: int
|
||||
:param hostids: the ids of the hosts to deploy.
|
||||
:type hostids: list of int
|
||||
:param cluster_hosts: the cluster and hosts of each cluster to deploy.
|
||||
:type cluster_hosts: dict of int to list of int
|
||||
"""
|
||||
with database.session():
|
||||
trigger_install.trigger_install(clusterid, hostids)
|
||||
deploy.deploy(cluster_hosts)
|
||||
|
||||
|
||||
@celery.task(name="compass.tasks.progress_update")
|
||||
def progressupdate(clusterid):
|
||||
@celery.task(name='compass.tasks.reinstall')
|
||||
def reinstall_clusters(cluster_hosts):
|
||||
"""reinstall the given cluster.
|
||||
|
||||
:param cluster_hosts: the cluster and hosts of each cluster to reinstall.
|
||||
:type cluster_hosts: dict of int to list of int
|
||||
"""
|
||||
reinstall.reinstall(cluster_hosts)
|
||||
|
||||
|
||||
@celery.task(name='compass.tasks.clean_deployment')
|
||||
def clean_clusters_deployment(cluster_hosts):
|
||||
"""clean deployment of the given cluster.
|
||||
|
||||
:param cluster_hosts: the cluster and hosts of each cluster to clean.
|
||||
:type cluster_hosts: dict of int to list of int
|
||||
"""
|
||||
clean_deployment.clean_deployment(cluster_hosts)
|
||||
|
||||
|
||||
@celery.task(name='compass.tasks.clean_installing_progress')
|
||||
def clean_clusters_installing_progress(cluster_hosts):
|
||||
"""clean installing progress of the given cluster.
|
||||
|
||||
:param cluster_hosts: the cluster and hosts of each cluster to clean.
|
||||
:type cluster_hosts: dict of int to list of int
|
||||
"""
|
||||
clean_installing_progress.clean_installing_progress(cluster_hosts)
|
||||
|
||||
|
||||
@celery.task(name='compass.tasks.update_progress')
|
||||
def update_clusters_progress(cluster_hosts):
|
||||
"""Calculate the installing progress of the given cluster.
|
||||
|
||||
:param clusterid: the id of the cluster to get the intstalling progress.
|
||||
:type clusterid: int
|
||||
:param cluster_hosts: the cluster and hosts of each cluster to update.
|
||||
:type cluster_hosts: dict of int to list of int
|
||||
"""
|
||||
progress_update.update_progress(clusterid)
|
||||
update_progress.update_progress(cluster_hosts)
|
||||
|
@ -1,3 +1,4 @@
|
||||
"""integration test for action deploy"""
|
||||
import chef
|
||||
import logging
|
||||
import os
|
||||
@ -23,19 +24,19 @@ setting.CHEF_INSTALLER_URL = 'https://localhost/'
|
||||
setting.CONFIG_DIR = '%s/data' % os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
import compass.config_management.installers
|
||||
import compass.config_management.providers
|
||||
|
||||
from compass.actions import trigger_install
|
||||
from compass.actions import deploy
|
||||
from compass.db import database
|
||||
from compass.db.model import Switch, Machine, Cluster, ClusterHost, Adapter, Role
|
||||
from compass.db.model import Switch, Machine
|
||||
from compass.db.model import Cluster, ClusterHost, Adapter, Role
|
||||
from compass.utils import flags
|
||||
from compass.utils import logsetting
|
||||
|
||||
|
||||
class TestEndToEnd(unittest2.TestCase):
|
||||
"""Integration test class."""
|
||||
|
||||
def _contains(self, origin_config, expected_config):
|
||||
"""check if expected config contains in origin config."""
|
||||
if isinstance(expected_config, dict):
|
||||
for key, value in expected_config.items():
|
||||
if not isinstance(origin_config, dict):
|
||||
@ -60,16 +61,18 @@ class TestEndToEnd(unittest2.TestCase):
|
||||
return expected_config == origin_config
|
||||
|
||||
def _mock_cobbler(self, host_configs):
|
||||
"""mock cobbler"""
|
||||
mock_server = Mock()
|
||||
xmlrpclib.Server = mock_server
|
||||
mock_server.return_value.login.return_value = ''
|
||||
mock_server.return_value.login.return_value = ''
|
||||
mock_server.return_value.sync = Mock()
|
||||
mock_server.return_value.find_profile = Mock(
|
||||
side_effect=lambda x: [x['name']])
|
||||
|
||||
def _get_system_handle(sys_name, token):
|
||||
"""mock get_system_handle"""
|
||||
for i, config in enumerate(host_configs):
|
||||
if config['name'] == sys_name:
|
||||
if config['name'] == sys_name:
|
||||
return i
|
||||
|
||||
raise Exception('Not Found %s' % sys_name)
|
||||
@ -78,6 +81,7 @@ class TestEndToEnd(unittest2.TestCase):
|
||||
side_effect=_get_system_handle)
|
||||
|
||||
def _new_system(token):
|
||||
"""mock new_system"""
|
||||
host_configs.append({'name': ''})
|
||||
return len(host_configs) - 1
|
||||
|
||||
@ -85,6 +89,7 @@ class TestEndToEnd(unittest2.TestCase):
|
||||
side_effect=_new_system)
|
||||
|
||||
def _remove_system(sys_name, token):
|
||||
"""mock remove system"""
|
||||
for i, config in host_configs:
|
||||
if config['name'] == sys_name:
|
||||
del host_configs[i]
|
||||
@ -98,29 +103,35 @@ class TestEndToEnd(unittest2.TestCase):
|
||||
mock_server.return_value.save_system = Mock()
|
||||
|
||||
def _modify_system(sys_id, key, value, token):
|
||||
"""mock modify_system"""
|
||||
host_configs[sys_id][key] = value
|
||||
|
||||
mock_server.return_value.modify_system = Mock(
|
||||
side_effect=_modify_system)
|
||||
|
||||
def _check_cobbler(self, host_configs, expected_host_configs):
|
||||
"""check cobbler config generated correctly"""
|
||||
self.assertEqual(len(host_configs), len(expected_host_configs))
|
||||
for i in range(len(host_configs)):
|
||||
self.assertTrue(
|
||||
self._contains(host_configs[i], expected_host_configs[i]))
|
||||
|
||||
def _mock_chef(self, configs):
|
||||
"""mock chef"""
|
||||
chef.autoconfigure = Mock()
|
||||
chef.DataBag = Mock()
|
||||
|
||||
import collections
|
||||
|
||||
class _mockDict(collections.Mapping):
|
||||
"""mock dict class."""
|
||||
|
||||
def __init__(in_self, bag, bag_item_name, api):
|
||||
in_self.bag_item_name_ = bag_item_name
|
||||
in_self.config_ = configs.get(bag_item_name, {})
|
||||
|
||||
def __len__(in_self):
|
||||
return len(in_self.config_)
|
||||
return len(in_self.config_)
|
||||
|
||||
def __iter__(in_self):
|
||||
return iter(in_self.config_)
|
||||
@ -131,10 +142,15 @@ class TestEndToEnd(unittest2.TestCase):
|
||||
def __setitem__(in_self, name, value):
|
||||
in_self.config_[name] = value
|
||||
|
||||
def __delitem__(in_self, name):
|
||||
del in_self.config_[name]
|
||||
|
||||
def delete(in_self):
|
||||
"""mock delete"""
|
||||
del configs[in_self.bag_item_name_]
|
||||
|
||||
def save(in_self):
|
||||
"""mock save"""
|
||||
configs[in_self.bag_item_name_] = in_self.config_
|
||||
|
||||
chef.DataBagItem = Mock(side_effect=_mockDict)
|
||||
@ -142,19 +158,23 @@ class TestEndToEnd(unittest2.TestCase):
|
||||
chef.Client.return_value.delete = Mock()
|
||||
chef.Node = Mock()
|
||||
chef.Node.return_value.delete = Mock()
|
||||
|
||||
|
||||
def _check_chef(self, configs, expected_configs):
|
||||
"""check chef config is generated correctly."""
|
||||
self.assertTrue(self._contains(configs, expected_configs))
|
||||
|
||||
def _mock_os_installer(self, config_locals):
|
||||
"""mock os installer"""
|
||||
self.os_installer_mock_[setting.OS_INSTALLER](
|
||||
**config_locals['%s_MOCK' % setting.OS_INSTALLER])
|
||||
|
||||
def _mock_package_installer(self, config_locals):
|
||||
"""mock package installer"""
|
||||
self.package_installer_mock_[setting.PACKAGE_INSTALLER](
|
||||
**config_locals['%s_MOCK' % setting.PACKAGE_INSTALLER])
|
||||
|
||||
def _check_os_installer(self, config_locals):
|
||||
"""check os installer generate correct configs"""
|
||||
mock_kwargs = config_locals['%s_MOCK' % setting.OS_INSTALLER]
|
||||
expected_kwargs = config_locals['%s_EXPECTED' % setting.OS_INSTALLER]
|
||||
kwargs = {}
|
||||
@ -163,14 +183,17 @@ class TestEndToEnd(unittest2.TestCase):
|
||||
self.os_installer_checker_[setting.OS_INSTALLER](**kwargs)
|
||||
|
||||
def _check_package_installer(self, config_locals):
|
||||
"""check package installer generate correct configs"""
|
||||
mock_kwargs = config_locals['%s_MOCK' % setting.PACKAGE_INSTALLER]
|
||||
expected_kwargs = config_locals['%s_EXPECTED' % setting.PACKAGE_INSTALLER]
|
||||
expected_kwargs = config_locals[
|
||||
'%s_EXPECTED' % setting.PACKAGE_INSTALLER]
|
||||
kwargs = {}
|
||||
kwargs.update(mock_kwargs)
|
||||
kwargs.update(expected_kwargs)
|
||||
self.package_installer_checker_[setting.PACKAGE_INSTALLER](**kwargs)
|
||||
|
||||
def _test(self, config_filename):
|
||||
"""run the test"""
|
||||
full_path = '%s/data/%s' % (
|
||||
os.path.dirname(os.path.abspath(__file__)),
|
||||
config_filename)
|
||||
@ -180,17 +203,20 @@ class TestEndToEnd(unittest2.TestCase):
|
||||
self._prepare_database(config_locals)
|
||||
self._mock_os_installer(config_locals)
|
||||
self._mock_package_installer(config_locals)
|
||||
cluster_hosts = {}
|
||||
with database.session() as session:
|
||||
clusters = session.query(Cluster).all()
|
||||
for cluster in clusters:
|
||||
clusterid = cluster.id
|
||||
hostids = [host.id for host in cluster.hosts]
|
||||
trigger_install.trigger_install(clusterid, hostids)
|
||||
cluster_hosts[cluster.id] = [
|
||||
host.id for host in cluster.hosts]
|
||||
|
||||
deploy.deploy(cluster_hosts)
|
||||
|
||||
self._check_os_installer(config_locals)
|
||||
self._check_package_installer(config_locals)
|
||||
|
||||
def _prepare_database(self, config_locals):
|
||||
"""prepare database"""
|
||||
with database.session() as session:
|
||||
adapters = {}
|
||||
for adapter_config in config_locals['ADAPTERS']:
|
||||
@ -211,7 +237,9 @@ class TestEndToEnd(unittest2.TestCase):
|
||||
switches[switch_config['ip']] = switch
|
||||
|
||||
machines = {}
|
||||
for switch_ip, machine_configs in config_locals['MACHINES_BY_SWITCH'].items():
|
||||
for switch_ip, machine_configs in (
|
||||
config_locals['MACHINES_BY_SWITCH'].items()
|
||||
):
|
||||
for machine_config in machine_configs:
|
||||
machine = Machine(**machine_config)
|
||||
machines[machine_config['mac']] = machine
|
||||
@ -228,18 +256,23 @@ class TestEndToEnd(unittest2.TestCase):
|
||||
session.add(cluster)
|
||||
|
||||
hosts = {}
|
||||
for cluster_name, host_configs in config_locals['HOSTS_BY_CLUSTER'].items():
|
||||
for cluster_name, host_configs in (
|
||||
config_locals['HOSTS_BY_CLUSTER'].items()
|
||||
):
|
||||
for host_config in host_configs:
|
||||
mac = host_config['mac']
|
||||
del host_config['mac']
|
||||
host = ClusterHost(**host_config)
|
||||
hosts['%s.%s' % (host_config['hostname'], cluster_name)] = host
|
||||
hosts['%s.%s' % (
|
||||
host_config['hostname'], cluster_name)] = host
|
||||
host.machine = machines[mac]
|
||||
host.cluster = clusters[cluster_name]
|
||||
session.add(host)
|
||||
|
||||
def setUp(self):
|
||||
"""test setup"""
|
||||
super(TestEndToEnd, self).setUp()
|
||||
logsetting.init()
|
||||
database.create_db()
|
||||
shutil.rmtree = Mock()
|
||||
os.system = Mock()
|
||||
@ -251,25 +284,22 @@ class TestEndToEnd(unittest2.TestCase):
|
||||
self.os_installer_checker_['cobbler'] = self._check_cobbler
|
||||
self.package_installer_checker_ = {}
|
||||
self.package_installer_checker_['chef'] = self._check_chef
|
||||
self.backup_logfile = flags.OPTIONS.logfile
|
||||
if not flags.OPTIONS.logfile:
|
||||
flags.OPTIONS.logfile = '/tmp/test_trigger_install.log'
|
||||
|
||||
logsetting.init()
|
||||
|
||||
def tearDown(self):
|
||||
flags.OPTIONS.logfile = self.backup_logfile
|
||||
logsetting.init()
|
||||
"""test teardown"""
|
||||
database.drop_db()
|
||||
super(TestEndToEnd, self).tearDown()
|
||||
|
||||
def test_1(self):
|
||||
"""test one cluster one host."""
|
||||
self._test('test1')
|
||||
|
||||
def test_2(self):
|
||||
"""test one cluster multi hosts."""
|
||||
self._test('test2')
|
||||
|
||||
def test_3(self):
|
||||
"""test multi clusters multi hosts"""
|
||||
self._test('test3')
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,3 +1,4 @@
|
||||
"""test os installer module"""
|
||||
import os
|
||||
import unittest2
|
||||
|
||||
@ -15,37 +16,47 @@ from compass.utils import logsetting
|
||||
|
||||
|
||||
class DummyInstaller(os_installer.Installer):
|
||||
"""dummy installer"""
|
||||
NAME = 'dummy'
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
super(DummyInstaller, self).__init__()
|
||||
|
||||
|
||||
class Dummy2Installer(os_installer.Installer):
|
||||
"""another dummy installer"""
|
||||
NAME = 'dummy'
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
super(Dummy2Installer, self).__init__()
|
||||
|
||||
|
||||
class TestInstallerFunctions(unittest2.TestCase):
|
||||
"""test installer functions"""
|
||||
|
||||
def setUp(self):
|
||||
super(TestInstallerFunctions, self).setUp()
|
||||
logsetting.init()
|
||||
self.installers_backup = os_installer.INSTALLERS
|
||||
os_installer.INSTALLERS = {}
|
||||
|
||||
def tearDown(self):
|
||||
os_installer.INSTALLERS = self.installers_backup
|
||||
super(TestInstallerFunctions, self).tearDown()
|
||||
|
||||
def test_found_installer(self):
|
||||
"""test found installer"""
|
||||
os_installer.register(DummyInstaller)
|
||||
intaller = os_installer.get_installer_by_name(DummyInstaller.NAME)
|
||||
self.assertIsInstance(intaller, DummyInstaller)
|
||||
|
||||
def test_notfound_unregistered_installer(self):
|
||||
"""test not found unregistered installer"""
|
||||
self.assertRaises(KeyError, os_installer.get_installer_by_name,
|
||||
DummyInstaller.NAME)
|
||||
|
||||
def test_multi_registered_installer(self):
|
||||
"""test register multi installers with the same name"""
|
||||
os_installer.register(DummyInstaller)
|
||||
self.assertRaises(KeyError, os_installer.register, Dummy2Installer)
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
"""test package_installer module"""
|
||||
import os
|
||||
import unittest2
|
||||
|
||||
@ -15,6 +16,7 @@ from compass.utils import logsetting
|
||||
|
||||
|
||||
class DummyInstaller(package_installer.Installer):
|
||||
"""dummy installer"""
|
||||
NAME = 'dummy'
|
||||
|
||||
def __init__(self):
|
||||
@ -22,6 +24,7 @@ class DummyInstaller(package_installer.Installer):
|
||||
|
||||
|
||||
class Dummy2Installer(package_installer.Installer):
|
||||
"""another dummy installer"""
|
||||
NAME = 'dummy'
|
||||
|
||||
def __init__(self):
|
||||
@ -29,24 +32,32 @@ class Dummy2Installer(package_installer.Installer):
|
||||
|
||||
|
||||
class TestInstallerFunctions(unittest2.TestCase):
|
||||
"""test installer functions"""
|
||||
|
||||
def setUp(self):
|
||||
super(TestInstallerFunctions, self).setUp()
|
||||
logsetting.init()
|
||||
self.installers_backup = package_installer.INSTALLERS
|
||||
package_installer.INSTALLERS = {}
|
||||
|
||||
def tearDown(self):
|
||||
package_installer.INSTALLERS = self.installers_backup
|
||||
super(TestInstallerFunctions, self).tearDown()
|
||||
|
||||
def test_found_installer(self):
|
||||
"""test found installer"""
|
||||
package_installer.register(DummyInstaller)
|
||||
intaller = package_installer.get_installer_by_name(
|
||||
DummyInstaller.NAME)
|
||||
self.assertIsInstance(intaller, DummyInstaller)
|
||||
|
||||
def test_notfound_unregistered_installer(self):
|
||||
"""test not found unregistered installer"""
|
||||
self.assertRaises(KeyError, package_installer.get_installer_by_name,
|
||||
DummyInstaller.NAME)
|
||||
|
||||
def test_multi_registered_installer(self):
|
||||
"""test register multi installers with the same name"""
|
||||
package_installer.register(DummyInstaller)
|
||||
self.assertRaises(KeyError, package_installer.register,
|
||||
Dummy2Installer)
|
||||
|
@ -1,3 +1,4 @@
|
||||
"""test config provider module"""
|
||||
import os
|
||||
import unittest2
|
||||
|
||||
@ -15,37 +16,47 @@ from compass.utils import logsetting
|
||||
|
||||
|
||||
class DummyProvider(config_provider.ConfigProvider):
|
||||
"""Dummy provider"""
|
||||
NAME = 'dummy'
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
super(DummyProvider, self).__init__()
|
||||
|
||||
|
||||
class Dummy2Provider(config_provider.ConfigProvider):
|
||||
"""another dummy provider"""
|
||||
NAME = 'dummy'
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
super(Dummy2Provider, self).__init__()
|
||||
|
||||
|
||||
class TestProviderRegisterFunctions(unittest2.TestCase):
|
||||
"""test provider register"""
|
||||
|
||||
def setUp(self):
|
||||
super(TestProviderRegisterFunctions, self).setUp()
|
||||
logsetting.init()
|
||||
config_provider.PROVIDERS = {}
|
||||
|
||||
def tearDown(self):
|
||||
config_provider.PROVIDERS = {}
|
||||
super(TestProviderRegisterFunctions, self).tearDown()
|
||||
|
||||
def test_found_provider(self):
|
||||
"""test found provider"""
|
||||
config_provider.register_provider(DummyProvider)
|
||||
provider = config_provider.get_provider_by_name(
|
||||
DummyProvider.NAME)
|
||||
self.assertIsInstance(provider, DummyProvider)
|
||||
|
||||
def test_notfound_unregistered_provider(self):
|
||||
"""test notfound unregistered provider"""
|
||||
self.assertRaises(KeyError, config_provider.get_provider_by_name,
|
||||
DummyProvider.NAME)
|
||||
|
||||
def test_multi_registered_provider(self):
|
||||
"""tst register multi provider with the same name."""
|
||||
config_provider.register_provider(DummyProvider)
|
||||
self.assertRaises(KeyError, config_provider.register_provider,
|
||||
Dummy2Provider)
|
||||
|
@ -1,52 +1,68 @@
|
||||
"""test config_filter module"""
|
||||
import unittest2
|
||||
|
||||
from compass.config_management.utils import config_filter
|
||||
from compass.utils import flags
|
||||
from compass.utils import logsetting
|
||||
|
||||
|
||||
class TestConfigFilter(unittest2.TestCase):
|
||||
"""test config filter class"""
|
||||
|
||||
def setUp(self):
|
||||
super(TestConfigFilter, self).setUp()
|
||||
logsetting.init()
|
||||
|
||||
def tearDown(self):
|
||||
super(TestConfigFilter, self).tearDown()
|
||||
|
||||
def test_allows(self):
|
||||
"""test allows rules"""
|
||||
config = {'1': '1',
|
||||
'2': {'22': '22',
|
||||
'33': {'333': '333',
|
||||
'44': '444'}},
|
||||
'3': {'33': '44'}}
|
||||
allows = ['*', '3', '5']
|
||||
filter = config_filter.ConfigFilter(allows)
|
||||
filtered_config = filter.filter(config)
|
||||
configfilter = config_filter.ConfigFilter(allows)
|
||||
filtered_config = configfilter.filter(config)
|
||||
self.assertEqual(filtered_config, config)
|
||||
allows = ['/1', '2/22', '5']
|
||||
expected_config = {'1': '1', '2': {'22': '22'}}
|
||||
filter = config_filter.ConfigFilter(allows)
|
||||
filtered_config = filter.filter(config)
|
||||
configfilter = config_filter.ConfigFilter(allows)
|
||||
filtered_config = configfilter.filter(config)
|
||||
self.assertEqual(filtered_config, expected_config)
|
||||
allows = ['*/33']
|
||||
expected_config = {'2': {'33': {'333': '333',
|
||||
'44': '444'}},
|
||||
'3': {'33': '44'}}
|
||||
filter = config_filter.ConfigFilter(allows)
|
||||
filtered_config = filter.filter(config)
|
||||
configfilter = config_filter.ConfigFilter(allows)
|
||||
filtered_config = configfilter.filter(config)
|
||||
self.assertEqual(filtered_config, expected_config)
|
||||
|
||||
def test_denies(self):
|
||||
"""test denies rules"""
|
||||
config = {'1': '1', '2': {'22': '22',
|
||||
'33': {'333': '333',
|
||||
'44': '444'}},
|
||||
'3': {'33': '44'}}
|
||||
denies = ['/1', '2/22', '2/33/333', '5']
|
||||
expected_config = {'2': {'33': {'44': '444'}}, '3': {'33': '44'}}
|
||||
filter = config_filter.ConfigFilter(denies=denies)
|
||||
filtered_config = filter.filter(config)
|
||||
configfilter = config_filter.ConfigFilter(denies=denies)
|
||||
filtered_config = configfilter.filter(config)
|
||||
self.assertEqual(filtered_config, expected_config)
|
||||
denies = ['*']
|
||||
filter = config_filter.ConfigFilter(denies=denies)
|
||||
filtered_config = filter.filter(config)
|
||||
configfilter = config_filter.ConfigFilter(denies=denies)
|
||||
filtered_config = configfilter.filter(config)
|
||||
self.assertIsNone(filtered_config)
|
||||
denies = ['*/33']
|
||||
expected_config = {'1': '1', '2': {'22': '22'}}
|
||||
filter = config_filter.ConfigFilter(denies=denies)
|
||||
filtered_config = filter.filter(config)
|
||||
configfilter = config_filter.ConfigFilter(denies=denies)
|
||||
filtered_config = configfilter.filter(config)
|
||||
self.assertEqual(filtered_config, expected_config)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
flags.init()
|
||||
logsetting.init()
|
||||
unittest2.main()
|
||||
|
@ -1,13 +1,25 @@
|
||||
"""test config merger module"""
|
||||
import functools
|
||||
import unittest2
|
||||
|
||||
from compass.config_management.utils import config_merger
|
||||
from compass.config_management.utils import config_merger_callbacks
|
||||
from compass.config_management.utils import config_reference
|
||||
from compass.utils import flags
|
||||
from compass.utils import logsetting
|
||||
|
||||
|
||||
class TestConfigMerger(unittest2.TestCase):
|
||||
"""test config merger class"""
|
||||
|
||||
def setUp(self):
|
||||
super(TestConfigMerger, self).setUp()
|
||||
logsetting.init()
|
||||
|
||||
def tearDown(self):
|
||||
super(TestConfigMerger, self).tearDown()
|
||||
|
||||
def test_merge(self):
|
||||
"""test merge"""
|
||||
upper_config = {
|
||||
'networking': {
|
||||
'interfaces': {
|
||||
@ -15,13 +27,16 @@ class TestConfigMerger(unittest2.TestCase):
|
||||
'ip_start': '192.168.1.1',
|
||||
'ip_end': '192.168.1.100',
|
||||
'netmask': '255.255.255.0',
|
||||
'dns_pattern': '%(hostname)s.%(clustername)s.%(search_path)s',
|
||||
'dns_pattern': (
|
||||
'%(hostname)s.%(clustername)s.%(search_path)s'),
|
||||
},
|
||||
'floating': {
|
||||
'ip_start': '172.16.0.1',
|
||||
'ip_end': '172.16.0.100',
|
||||
'netmask': '0.0.0.0',
|
||||
'dns_pattern': 'public-%(hostname)s.%(clustername)s.%(search_path)s',
|
||||
'dns_pattern': (
|
||||
'public-%(hostname)s.%(clustername)s'
|
||||
'.%(search_path)s'),
|
||||
},
|
||||
},
|
||||
'global': {
|
||||
@ -101,7 +116,7 @@ class TestConfigMerger(unittest2.TestCase):
|
||||
'roles': ['os-single-controller', 'os-network']
|
||||
}
|
||||
}
|
||||
mappings=[
|
||||
mappings = [
|
||||
config_merger.ConfigMapping(
|
||||
path_list=['/networking/interfaces/*'],
|
||||
from_upper_keys={'ip_start': 'ip_start', 'ip_end': 'ip_end'},
|
||||
@ -136,12 +151,14 @@ class TestConfigMerger(unittest2.TestCase):
|
||||
path_list=['/networking/interfaces/*'],
|
||||
from_upper_keys={'pattern': 'dns_pattern',
|
||||
'clustername': '/clustername',
|
||||
'search_path': '/networking/global/search_path'},
|
||||
'search_path': (
|
||||
'/networking/global/search_path')},
|
||||
from_lower_keys={'hostname': '/hostname'},
|
||||
to_key='dns_alias',
|
||||
value=functools.partial(config_merger_callbacks.assign_from_pattern,
|
||||
upper_keys=['search_path', 'clustername'],
|
||||
lower_keys=['hostname'])
|
||||
value=functools.partial(
|
||||
config_merger_callbacks.assign_from_pattern,
|
||||
upper_keys=['search_path', 'clustername'],
|
||||
lower_keys=['hostname'])
|
||||
),
|
||||
]
|
||||
merger = config_merger.ConfigMerger(mappings)
|
||||
@ -150,4 +167,6 @@ class TestConfigMerger(unittest2.TestCase):
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
flags.init()
|
||||
logsetting.init()
|
||||
unittest2.main()
|
||||
|
@ -1,11 +1,24 @@
|
||||
"""test config merger callbacks module"""
|
||||
import unittest2
|
||||
|
||||
from compass.config_management.utils import config_merger_callbacks
|
||||
from compass.config_management.utils import config_reference
|
||||
from compass.utils import flags
|
||||
from comapss.utils import logsetting
|
||||
|
||||
|
||||
class TestAssignRoles(unittest2.TestCase):
|
||||
"""test assign roles"""
|
||||
|
||||
def setUp(self):
|
||||
super(TestAssignRoles, self).setUp()
|
||||
logsetting.init()
|
||||
|
||||
def tearDown(self):
|
||||
super(TestAssignRoles, self).tearDown()
|
||||
|
||||
def test_assign_roles(self):
|
||||
"""test assign roles"""
|
||||
lower_configs = {
|
||||
1: {'roles': ['control']},
|
||||
2: {'roles': ['api', 'compute']},
|
||||
@ -79,4 +92,6 @@ class TestAssignRoles(unittest2.TestCase):
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
flags.init()
|
||||
logsetting.init()
|
||||
unittest2.main()
|
||||
|
@ -1,3 +1,4 @@
|
||||
"""test config reference module"""
|
||||
import unittest2
|
||||
from copy import deepcopy
|
||||
|
||||
@ -6,7 +7,10 @@ from compass.config_management.utils import config_reference
|
||||
|
||||
|
||||
class TestConfigReference(unittest2.TestCase):
|
||||
"""test config reference class"""
|
||||
|
||||
def test_init(self):
|
||||
"""test init function"""
|
||||
config = {'1': {'2': 3, '10': {}}, '4': [5, 6, 7], '8': 8}
|
||||
ref = config_reference.ConfigReference(config)
|
||||
config2 = {'5': {'6': 6}}
|
||||
@ -20,6 +24,7 @@ class TestConfigReference(unittest2.TestCase):
|
||||
self.assertEqual(id(ref.config['5']), id(ref3.config))
|
||||
|
||||
def test_ref(self):
|
||||
"""test ref function"""
|
||||
config = {'1': {'2': 3, '10': {}}, '4': [5, 6, 7], '8': 8}
|
||||
ref = config_reference.ConfigReference(config)
|
||||
self.assertRaises(KeyError, ref.ref, '')
|
||||
@ -47,6 +52,7 @@ class TestConfigReference(unittest2.TestCase):
|
||||
self.assertEqual(ref.ref('9'), subref2)
|
||||
|
||||
def test_refs(self):
|
||||
"""test refs function"""
|
||||
config = {'1': {'2': 3, '10': {}}, '4': [5, 6, 7], '8': 8, '88': 88}
|
||||
ref = config_reference.ConfigReference(config)
|
||||
refkeys = ref.ref_keys('1')
|
||||
@ -60,6 +66,7 @@ class TestConfigReference(unittest2.TestCase):
|
||||
self.assertRaises(KeyError, ref.ref_keys, '')
|
||||
|
||||
def test_contains(self):
|
||||
"""test contains function"""
|
||||
config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}
|
||||
ref = config_reference.ConfigReference(config)
|
||||
self.assertIn('/1/2', ref)
|
||||
@ -70,6 +77,7 @@ class TestConfigReference(unittest2.TestCase):
|
||||
self.assertNotIn('/1/2/3/..', ref)
|
||||
|
||||
def test_setitem(self):
|
||||
"""test setitem function"""
|
||||
config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}
|
||||
ref = config_reference.ConfigReference(config)
|
||||
ref['/1/2'] = '6'
|
||||
@ -83,6 +91,7 @@ class TestConfigReference(unittest2.TestCase):
|
||||
self.assertEqual(ref['3/6/8'], [1, 3, 5])
|
||||
|
||||
def test_del(self):
|
||||
"""test del function"""
|
||||
config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}
|
||||
ref = config_reference.ConfigReference(config)
|
||||
del ref['/8']
|
||||
@ -94,6 +103,7 @@ class TestConfigReference(unittest2.TestCase):
|
||||
self.assertRaises(KeyError, ref.__delitem__, '9')
|
||||
|
||||
def test_get(self):
|
||||
"""test get function"""
|
||||
config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}
|
||||
ref = config_reference.ConfigReference(config)
|
||||
self.assertEqual(ref.get('1/2'), config['1']['2'])
|
||||
@ -102,6 +112,7 @@ class TestConfigReference(unittest2.TestCase):
|
||||
self.assertNotIn('3', config['1'])
|
||||
|
||||
def test_setdefault(self):
|
||||
"""test setdefault function"""
|
||||
config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}
|
||||
ref = config_reference.ConfigReference(config)
|
||||
self.assertEqual(ref.setdefault('1/2').config, config['1']['2'])
|
||||
@ -110,6 +121,7 @@ class TestConfigReference(unittest2.TestCase):
|
||||
self.assertEqual(4, config['1']['4'])
|
||||
|
||||
def test_update(self):
|
||||
"""test update function"""
|
||||
config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}
|
||||
expected_config = deepcopy(config)
|
||||
|
||||
@ -124,6 +136,7 @@ class TestConfigReference(unittest2.TestCase):
|
||||
self.assertEqual(ref.config, 10)
|
||||
|
||||
def test_iter(self):
|
||||
"""test iter function"""
|
||||
config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}
|
||||
ref = config_reference.ConfigReference(config)
|
||||
keys = ref.keys()
|
||||
|
@ -1,11 +1,25 @@
|
||||
"""test config translator module"""
|
||||
import functools
|
||||
import unittest2
|
||||
|
||||
from compass.config_management.utils import config_translator
|
||||
from compass.config_management.utils import config_translator_callbacks
|
||||
from compass.utils import flags
|
||||
from compass.utils import logsetting
|
||||
|
||||
|
||||
class TestConfigTranslatorFunctions(unittest2.TestCase):
|
||||
"""test config translator class"""
|
||||
|
||||
def setUp(self):
|
||||
super(TestConfigTranslatorFunctions, self).setUp()
|
||||
logsetting.init()
|
||||
|
||||
def tearDown(self):
|
||||
super(TestConfigTranslatorFunctions, self).tearDown()
|
||||
|
||||
def test_translate_1(self):
|
||||
"""config translate test"""
|
||||
config = {
|
||||
'networking': {
|
||||
'interfaces': {
|
||||
@ -37,7 +51,7 @@ class TestConfigTranslatorFunctions(unittest2.TestCase):
|
||||
'search_path': 'ods.com',
|
||||
'gateway': '10.0.0.1',
|
||||
'proxy': 'http://1.2.3.4:3128',
|
||||
'ntp_server': '1.2.3.4',
|
||||
'ntp_server': '1.2.3.4',
|
||||
'ignore_proxy': '127.0.0.1',
|
||||
},
|
||||
},
|
||||
@ -118,7 +132,8 @@ class TestConfigTranslatorFunctions(unittest2.TestCase):
|
||||
config_translator.KeyTranslator(
|
||||
translated_keys=[functools.partial(
|
||||
config_translator_callbacks.get_key_from_pattern,
|
||||
to_pattern='/modify_interface/macaddress-%(nic)s')],
|
||||
to_pattern='/modify_interface/macaddress-%(nic)s'
|
||||
)],
|
||||
from_keys={'nic': '../nic'},
|
||||
override=functools.partial(
|
||||
config_translator_callbacks.override_path_has,
|
||||
@ -171,7 +186,8 @@ class TestConfigTranslatorFunctions(unittest2.TestCase):
|
||||
), config_translator.KeyTranslator(
|
||||
translated_keys=[functools.partial(
|
||||
config_translator_callbacks.get_key_from_pattern,
|
||||
to_pattern='/modify_interface/management-%(nic)s')],
|
||||
to_pattern='/modify_interface/management-%(nic)s'
|
||||
)],
|
||||
from_keys={'nic': '../nic'},
|
||||
translated_value=functools.partial(
|
||||
config_translator_callbacks.override_path_has,
|
||||
@ -193,6 +209,7 @@ class TestConfigTranslatorFunctions(unittest2.TestCase):
|
||||
self.assertEqual(translated_config, expected_config)
|
||||
|
||||
def test_translate_2(self):
|
||||
"""config translate test"""
|
||||
translator = config_translator.ConfigTranslator(
|
||||
mapping={
|
||||
'/networking/interfaces/management/ip': [
|
||||
@ -215,7 +232,8 @@ class TestConfigTranslatorFunctions(unittest2.TestCase):
|
||||
'/endpoints/network/service/host',
|
||||
'/endpoints/volume/service/host',
|
||||
],
|
||||
translated_value=config_translator_callbacks.get_value_if,
|
||||
translated_value=(
|
||||
config_translator_callbacks.get_value_if),
|
||||
from_values={'condition': '/has_dashboard_roles'},
|
||||
override=config_translator_callbacks.override_if_any,
|
||||
override_conditions={
|
||||
@ -324,6 +342,7 @@ class TestConfigTranslatorFunctions(unittest2.TestCase):
|
||||
self.assertEqual(translated_config2, expected_config2)
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
flags.init()
|
||||
logsetting.init()
|
||||
unittest2.main()
|
||||
|
@ -1,34 +1,53 @@
|
||||
"""test hdsdiscovery base module"""
|
||||
import os
|
||||
import unittest2
|
||||
from mock import patch
|
||||
|
||||
|
||||
os.environ['COMPASS_IGNORE_SETTING'] = 'true'
|
||||
|
||||
|
||||
from compass.utils import setting_wrapper as setting
|
||||
reload(setting)
|
||||
|
||||
|
||||
from compass.hdsdiscovery.base import BaseSnmpVendor
|
||||
from compass.hdsdiscovery.base import BaseSnmpMacPlugin
|
||||
from compass.utils import flags
|
||||
from compass.utils import logsetting
|
||||
|
||||
|
||||
class MockSnmpVendor(BaseSnmpVendor):
|
||||
"""snmp vendor mock class"""
|
||||
|
||||
def __init__(self):
|
||||
BaseSnmpVendor.__init__(self, ["MockVendor", "FakeVendor"])
|
||||
|
||||
|
||||
class TestBaseSnmpMacPlugin(unittest2.TestCase):
|
||||
"""teset base snmp plugin class"""
|
||||
|
||||
def setUp(self):
|
||||
super(TestBaseSnmpMacPlugin, self).setUp()
|
||||
logsetting.init()
|
||||
self.test_plugin = BaseSnmpMacPlugin('12.0.0.1',
|
||||
{'version': '2c',
|
||||
'community': 'public'})
|
||||
|
||||
def tearDown(self):
|
||||
del self.test_plugin
|
||||
super(TestBaseSnmpMacPlugin, self).tearDown()
|
||||
|
||||
@patch('compass.hdsdiscovery.utils.snmpget_by_cl')
|
||||
def test_get_port(self, mock_snmpget):
|
||||
"""test snmp get port"""
|
||||
mock_snmpget.return_value = 'IF-MIB::ifName.4 = STRING: ge-1/1/4'
|
||||
result = self.test_plugin.get_port('4')
|
||||
self.assertEqual('4', result)
|
||||
|
||||
@patch('compass.hdsdiscovery.utils.snmpget_by_cl')
|
||||
def test_get_vlan_id(self, mock_snmpget):
|
||||
"""test snmp get vlan"""
|
||||
# Port is None
|
||||
self.assertIsNone(self.test_plugin.get_vlan_id(None))
|
||||
|
||||
@ -38,6 +57,7 @@ class TestBaseSnmpMacPlugin(unittest2.TestCase):
|
||||
self.assertEqual('100', result)
|
||||
|
||||
def test_get_mac_address(self):
|
||||
"""tet snmp get mac address"""
|
||||
# Correct input for mac numbers
|
||||
mac_numbers = '0.224.129.230.57.173'.split('.')
|
||||
mac = self.test_plugin.get_mac_address(mac_numbers)
|
||||
@ -50,14 +70,17 @@ class TestBaseSnmpMacPlugin(unittest2.TestCase):
|
||||
|
||||
|
||||
class BaseTest(unittest2.TestCase):
|
||||
"""base test class"""
|
||||
|
||||
def setUp(self):
|
||||
pass
|
||||
super(BaseTest, self).setUp()
|
||||
logsetting.init()
|
||||
|
||||
def tearDown(self):
|
||||
pass
|
||||
super(BaseTest, self).tearDown()
|
||||
|
||||
def test_base_snmp_vendor(self):
|
||||
"""test base snmp vendor"""
|
||||
fake = MockSnmpVendor()
|
||||
|
||||
credential = {"version": "2c",
|
||||
@ -88,5 +111,8 @@ class BaseTest(unittest2.TestCase):
|
||||
"community": "public"},
|
||||
"fakevendor1.1"))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
flags.init()
|
||||
logsetting.init()
|
||||
unittest2.main()
|
||||
|
@ -1,14 +1,29 @@
|
||||
"""test hdsdiscovery module"""
|
||||
import os
|
||||
import unittest2
|
||||
from mock import patch
|
||||
|
||||
|
||||
os.environ['COMPASS_IGNORE_SETTING'] = 'true'
|
||||
|
||||
|
||||
from compass.utils import setting_wrapper as setting
|
||||
reload(setting)
|
||||
|
||||
|
||||
from compass.hdsdiscovery.hdmanager import HDManager
|
||||
from compass.hdsdiscovery.vendors.huawei.huawei import Huawei
|
||||
from compass.hdsdiscovery.vendors.huawei.plugins.mac import Mac
|
||||
from compass.utils import flags
|
||||
from compass.utils import logsetting
|
||||
|
||||
|
||||
class HuaweiTest(unittest2.TestCase):
|
||||
"""test huawei switch snmp get"""
|
||||
|
||||
def setUp(self):
|
||||
super(HuaweiTest, self).setUp()
|
||||
logsetting.init()
|
||||
self.huawei = Huawei()
|
||||
self.correct_host = '12.23.1.1'
|
||||
self.correct_credentials = {'version': '2c', 'community': 'public'}
|
||||
@ -16,8 +31,10 @@ class HuaweiTest(unittest2.TestCase):
|
||||
|
||||
def tearDown(self):
|
||||
del self.huawei
|
||||
super(HuaweiTest, self).tearDown()
|
||||
|
||||
def test_is_this_vendor(self):
|
||||
"""test device vendor is haiwei"""
|
||||
#Credential's keyword is incorrect
|
||||
self.assertFalse(
|
||||
self.huawei.is_this_vendor(self.correct_host,
|
||||
@ -40,15 +57,21 @@ class HuaweiTest(unittest2.TestCase):
|
||||
|
||||
|
||||
class HuaweiMacTest(unittest2.TestCase):
|
||||
"""test get mac from huawei device"""
|
||||
|
||||
def setUp(self):
|
||||
super(HuaweiMacTest, self).setUp()
|
||||
logsetting.init()
|
||||
host = '12.23.1.1'
|
||||
credential = {'version': '2c', 'community': 'public'}
|
||||
self.mac_plugin = Mac(host, credential)
|
||||
|
||||
def tearDown(self):
|
||||
del self.mac_plugin
|
||||
super(HuaweiMacTest, self).tearDown()
|
||||
|
||||
def test_ProcessData_Operation(self):
|
||||
def test_process_data(self):
|
||||
"""get progress data function"""
|
||||
# GET operation haven't been implemeneted.
|
||||
self.assertIsNone(self.mac_plugin.process_data('GET'))
|
||||
|
||||
@ -57,12 +80,17 @@ from compass.hdsdiscovery.vendors.ovswitch.plugins.mac import Mac as OVSMac
|
||||
|
||||
|
||||
class OVSMacTest(unittest2.TestCase):
|
||||
"""ovs switch test"""
|
||||
|
||||
def setUp(self):
|
||||
super(OVSMacTest, self).setUp()
|
||||
logsetting.init()
|
||||
self.host = '10.145.88.160'
|
||||
self.credential = {'username': 'root', 'password': 'huawei'}
|
||||
|
||||
@patch('compass.hdsdiscovery.utils.ssh_remote_execute')
|
||||
def test_scan(self, ovs_mock):
|
||||
"""test scan ovs switch"""
|
||||
ovs_mock.return_value = []
|
||||
mac_instance = OVSMac(self.host, self.credential)
|
||||
self.assertIsNone(mac_instance.scan())
|
||||
@ -75,17 +103,22 @@ class OVSMacTest(unittest2.TestCase):
|
||||
|
||||
|
||||
class HDManagerTest(unittest2.TestCase):
|
||||
"""test HDManager"""
|
||||
|
||||
def setUp(self):
|
||||
super(HDManagerTest, self).setUp()
|
||||
logsetting.init()
|
||||
self.manager = HDManager()
|
||||
self.correct_host = '12.23.1.1'
|
||||
self.correct_credential = {'version': '2c', 'community': 'public'}
|
||||
|
||||
def tearDown(self):
|
||||
del self.manager
|
||||
super(HDManagerTest, self).tearDown()
|
||||
|
||||
@patch('compass.hdsdiscovery.hdmanager.HDManager.get_sys_info')
|
||||
def test_get_vendor(self, sys_info_mock):
|
||||
"""test get_vendor"""
|
||||
# Incorrect ip
|
||||
self.assertIsNone(self.manager.get_vendor('1234.1.1.1',
|
||||
self.correct_credential)[0])
|
||||
@ -128,7 +161,7 @@ class HDManagerTest(unittest2.TestCase):
|
||||
|
||||
@patch('compass.hdsdiscovery.hdmanager.HDManager.get_sys_info')
|
||||
def test_is_valid_vendor(self, sys_info_mock):
|
||||
|
||||
"""test is_valid_vendor"""
|
||||
#non-exsiting vendor
|
||||
self.assertFalse(self.manager.is_valid_vendor(self.correct_host,
|
||||
self.correct_credential,
|
||||
@ -150,7 +183,8 @@ class HDManagerTest(unittest2.TestCase):
|
||||
self.correct_credential,
|
||||
'pica8'))
|
||||
|
||||
def test_Learn(self):
|
||||
def test_learn(self):
|
||||
"""test learn"""
|
||||
#non-exsiting plugin
|
||||
self.assertIsNone(self.manager.learn(self.correct_host,
|
||||
self.correct_credential,
|
||||
@ -166,9 +200,18 @@ from compass.hdsdiscovery import utils
|
||||
|
||||
|
||||
class UtilsTest(unittest2.TestCase):
|
||||
def test_LoadModule(self):
|
||||
"""hdsdiscovery util test class"""
|
||||
|
||||
def setUp(self):
|
||||
super(UtilsTest, self).setUp()
|
||||
logsetting.init()
|
||||
|
||||
def test_load_module(self):
|
||||
"""test load_module"""
|
||||
self.assertIsNone(utils.load_module('xxx', 'fake/path/to/module'))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
flags.init()
|
||||
logsetting.init()
|
||||
unittest2.main()
|
||||
|
@ -1,3 +1,4 @@
|
||||
"""test util module"""
|
||||
import os
|
||||
import unittest2
|
||||
|
||||
@ -15,19 +16,31 @@ from compass.utils import util
|
||||
|
||||
|
||||
class TestDictMerge(unittest2.TestCase):
|
||||
"""Test dict merge"""
|
||||
|
||||
def setUp(self):
|
||||
super(TestDictMerge, self).setUp()
|
||||
logsetting.init()
|
||||
|
||||
def tearDown(self):
|
||||
super(TestDictMerge, self).tearDown()
|
||||
|
||||
def test_simple_merge(self):
|
||||
"""simple test of merge"""
|
||||
lhs = {1: 1}
|
||||
rhs = {2: 2}
|
||||
util.merge_dict(lhs, rhs)
|
||||
self.assertEqual(lhs, {1: 1, 2: 2})
|
||||
|
||||
def test_recursive_merge(self):
|
||||
"""test merge recursively"""
|
||||
lhs = {1: {2: 3}}
|
||||
rhs = {1: {3: 4}}
|
||||
util.merge_dict(lhs, rhs)
|
||||
self.assertEqual(lhs, {1: {2: 3, 3: 4}})
|
||||
|
||||
def test_merge_override(self):
|
||||
"""test merge override"""
|
||||
lhs = {1: 1}
|
||||
rhs = {1: 2}
|
||||
util.merge_dict(lhs, rhs)
|
||||
@ -39,6 +52,7 @@ class TestDictMerge(unittest2.TestCase):
|
||||
self.assertEqual(lhs, {1: {2: 4, 3: 5, 4: 6}})
|
||||
|
||||
def test_merge_not_override(self):
|
||||
"""test merge not override"""
|
||||
lhs = {1: 1}
|
||||
rhs = {1: 2}
|
||||
util.merge_dict(lhs, rhs, False)
|
||||
@ -50,6 +64,7 @@ class TestDictMerge(unittest2.TestCase):
|
||||
self.assertEqual(lhs, {1: {2: 3, 3: 5, 4: 6}})
|
||||
|
||||
def test_change_after_merge(self):
|
||||
"""test change after merge"""
|
||||
lhs = {1: {2: 3}}
|
||||
rhs = {1: {3: [4, 5, 6]}}
|
||||
util.merge_dict(lhs, rhs)
|
||||
@ -60,6 +75,7 @@ class TestDictMerge(unittest2.TestCase):
|
||||
self.assertEqual(rhs, {1: {3: [4, 5, 6, 7]}})
|
||||
|
||||
def test_lhs_rhs_notdict(self):
|
||||
"""test merge not dict"""
|
||||
lhs = [1, 2, 3]
|
||||
rhs = {1: 2}
|
||||
self.assertRaises(TypeError, util.merge_dict, (lhs, rhs))
|
||||
@ -69,31 +85,38 @@ class TestDictMerge(unittest2.TestCase):
|
||||
|
||||
|
||||
class TestOrderKeys(unittest2.TestCase):
|
||||
"""test order keys"""
|
||||
|
||||
def test_simple_order_keys(self):
|
||||
"""test simple order keys"""
|
||||
keys = [1, 2, 3, 4, 5]
|
||||
orders = [3, 4, 5]
|
||||
ordered_keys = util.order_keys(keys, orders)
|
||||
self.assertEqual(ordered_keys, [3, 4, 5, 1, 2])
|
||||
|
||||
def test_order_keys_with_dot(self):
|
||||
"""test order keys with dot in it."""
|
||||
keys = [1, 2, 3, 4, 5]
|
||||
orders = [3, 4, '.', 5]
|
||||
ordered_keys = util.order_keys(keys, orders)
|
||||
self.assertEqual(ordered_keys, [3, 4, 1, 2, 5])
|
||||
|
||||
def test_order_keys_with_multidot(self):
|
||||
"""test order keys with multi dots in it"""
|
||||
keys = [1, 2, 3, 4, 5]
|
||||
orders = [3, '.', 4, '.', 5]
|
||||
ordered_keys = util.order_keys(keys, orders)
|
||||
self.assertEqual(ordered_keys, [3, 1, 2, 4, 5])
|
||||
|
||||
def test_others_in_orders(self):
|
||||
"""test other key in order"""
|
||||
keys = [1, 2, 3, 4, 5]
|
||||
orders = [3, '.', 5, 6]
|
||||
ordered_keys = util.order_keys(keys, orders)
|
||||
self.assertEqual(ordered_keys, [3, 1, 2, 4, 5])
|
||||
|
||||
def test_keys_orders_notlist(self):
|
||||
"""test keys not in order"""
|
||||
keys = {1: 1}
|
||||
orders = [3, 4, 5]
|
||||
self.assertRaises(TypeError, util.order_keys, keys, orders)
|
||||
@ -104,14 +127,19 @@ class TestOrderKeys(unittest2.TestCase):
|
||||
|
||||
|
||||
class TestIsInstanceOf(unittest2.TestCase):
|
||||
"""test isinstanceof"""
|
||||
def test_isinstance(self):
|
||||
"""test isinstance"""
|
||||
self.assertTrue(util.is_instance({}, [dict, list]))
|
||||
self.assertFalse(util.is_instance({}, [str, list]))
|
||||
self.assertFalse(util.is_instance({}, []))
|
||||
|
||||
|
||||
class TestGetListWithPossibility(unittest2.TestCase):
|
||||
"""test get list with possibility"""
|
||||
|
||||
def test_simple_case(self):
|
||||
"""test simple case"""
|
||||
lists = [['role1'], ['role2'], ['role3']]
|
||||
self.assertEqual(util.flat_lists_with_possibility(lists),
|
||||
['role1', 'role2', 'role3'])
|
||||
|
@ -13,8 +13,9 @@ CELERY_IMPORTS = ('compass.tasks.tasks',)
|
||||
|
||||
|
||||
if setting.CELERYCONFIG_FILE:
|
||||
CELERY_CONFIG = os.path.join(setting.CELERYCONFIG_DIR,
|
||||
setting.CELERYCONFIG_FILE)
|
||||
CELERY_CONFIG = os.path.join(
|
||||
setting.CELERYCONFIG_DIR,
|
||||
setting.CELERYCONFIG_FILE)
|
||||
|
||||
try:
|
||||
logging.info('load celery config from %s', CELERY_CONFIG)
|
||||
|
62
compass/utils/daemonize.py
Normal file
62
compass/utils/daemonize.py
Normal file
@ -0,0 +1,62 @@
|
||||
"""Module to provider util functions in all compass code
|
||||
|
||||
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
|
||||
"""
|
||||
import daemon
|
||||
import logging
|
||||
import sys
|
||||
import signal
|
||||
import time
|
||||
|
||||
from compass.utils import flags
|
||||
|
||||
|
||||
flags.add_bool('daemonize',
|
||||
help='run as daemon',
|
||||
default=False)
|
||||
|
||||
|
||||
BUSY = False
|
||||
KILLED = False
|
||||
|
||||
|
||||
def handle_term(signum, frame):
|
||||
"""Handle sig term."""
|
||||
global KILLED
|
||||
logging.info('Caught signal %s in %s', signum, frame)
|
||||
KILLED = True
|
||||
if not BUSY:
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def _daemon(callback, run_interval):
|
||||
"""help function to run callback in daemon"""
|
||||
global BUSY
|
||||
signal.signal(signal.SIGTERM, handle_term)
|
||||
signal.signal(signal.SIGHUP, handle_term)
|
||||
|
||||
while True:
|
||||
BUSY = True
|
||||
callback()
|
||||
BUSY = False
|
||||
if KILLED:
|
||||
logging.info('exit loop')
|
||||
break
|
||||
|
||||
if run_interval > 0:
|
||||
logging.info('will rerun after %s seconds',
|
||||
flags.OPTIONS.run_interval)
|
||||
time.sleep(flags.OPTIONS.run_interval)
|
||||
else:
|
||||
logging.info('finish loop')
|
||||
break
|
||||
|
||||
|
||||
def daemonize(callback, run_interval, **kwargs):
|
||||
"""daemonize callback and run every run_interval seconds."""
|
||||
if flags.OPTIONS.daemonize:
|
||||
with daemon.DaemonContext(**kwargs):
|
||||
logging.info('run as daemon')
|
||||
_daemon(callback, run_interval)
|
||||
else:
|
||||
_daemon(callback, run_interval)
|
@ -8,10 +8,14 @@ from optparse import OptionParser
|
||||
|
||||
|
||||
class Flags(object):
|
||||
"""Class to store flags."""
|
||||
|
||||
PARSER = OptionParser()
|
||||
PARSED_OPTIONS = None
|
||||
|
||||
def parse_args(self):
|
||||
@classmethod
|
||||
def parse_args(cls):
|
||||
"""parse args."""
|
||||
(options, argv) = Flags.PARSER.parse_args()
|
||||
sys.argv = [sys.argv[0]] + argv
|
||||
Flags.PARSED_OPTIONS = options
|
||||
@ -45,7 +49,6 @@ OPTIONS = Flags()
|
||||
def init():
|
||||
"""Init flag parsing.
|
||||
"""
|
||||
global OPTIONS
|
||||
OPTIONS.parse_args()
|
||||
|
||||
|
||||
|
@ -61,7 +61,10 @@ def init():
|
||||
if not logfile:
|
||||
handler = logging.StreamHandler(sys.stderr)
|
||||
else:
|
||||
handler = logging.FileHandler(logfile)
|
||||
handler = logging.handlers.TimedRotatingFileHandler(
|
||||
logfile,
|
||||
when=flags.OPTIONS.log_interval_unit,
|
||||
interval=flags.OPTIONS.log_interval)
|
||||
|
||||
if loglevel in LOGLEVEL_MAPPING:
|
||||
logger.setLevel(LOGLEVEL_MAPPING[loglevel])
|
||||
|
@ -22,7 +22,7 @@ CHEF_INSTALLER_URL = ''
|
||||
CHEF_GLOBAL_DATABAG_NAME = 'env_default'
|
||||
INSTALLATION_LOGDIR = ''
|
||||
DEFAULT_LOGLEVEL = 'info'
|
||||
DEFAULT_LOGDIR = ''
|
||||
DEFAULT_LOGDIR = '/tmp'
|
||||
DEFAULT_LOGINTERVAL = 1
|
||||
DEFAULT_LOGINTERVAL_UNIT = 'h'
|
||||
DEFAULT_LOGFORMAT = (
|
||||
@ -37,8 +37,10 @@ SWITCHES = [
|
||||
]
|
||||
|
||||
|
||||
if ('COMPASS_IGNORE_SETTING' in os.environ and
|
||||
os.environ['COMPASS_IGNORE_SETTING']):
|
||||
if (
|
||||
'COMPASS_IGNORE_SETTING' in os.environ and
|
||||
os.environ['COMPASS_IGNORE_SETTING']
|
||||
):
|
||||
pass
|
||||
else:
|
||||
if 'COMPASS_SETTING' in os.environ:
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
|
||||
"""
|
||||
import re
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
@ -29,8 +31,10 @@ def merge_dict(lhs, rhs, override=True):
|
||||
rhs)
|
||||
|
||||
for key, value in rhs.items():
|
||||
if (isinstance(value, dict) and key in lhs and
|
||||
isinstance(lhs[key], dict)):
|
||||
if (
|
||||
isinstance(value, dict) and key in lhs and
|
||||
isinstance(lhs[key], dict)
|
||||
):
|
||||
merge_dict(lhs[key], value, override)
|
||||
else:
|
||||
if override or key not in lhs:
|
||||
@ -113,7 +117,7 @@ def flat_lists_with_possibility(lists):
|
||||
length = len(items)
|
||||
if length > 0:
|
||||
total_elements += length
|
||||
possibilities.append(1.0/length)
|
||||
possibilities.append(1.0 / length)
|
||||
else:
|
||||
possibilities.append(0.0)
|
||||
|
||||
@ -132,7 +136,206 @@ def flat_lists_with_possibility(lists):
|
||||
|
||||
|
||||
def pretty_print(*contents):
|
||||
"""pretty print contents."""
|
||||
if len(contents) == 0:
|
||||
print ""
|
||||
else:
|
||||
print "\n".join(content for content in contents)
|
||||
|
||||
|
||||
def get_clusters_from_str(clusters_str):
|
||||
"""get clusters from string."""
|
||||
clusters = {}
|
||||
for clusterid_and_hostnames in clusters_str.split(';'):
|
||||
if not clusterid_and_hostnames:
|
||||
continue
|
||||
|
||||
if ':' in clusterid_and_hostnames:
|
||||
clusterid_str, hostnames_str = clusterid_and_hostnames.split(
|
||||
':', 1)
|
||||
else:
|
||||
clusterid_str = clusterid_and_hostnames
|
||||
hostnames_str = ''
|
||||
|
||||
clusterid = int(clusterid_str)
|
||||
hostnames = [
|
||||
hostname for hostname in hostnames_str.split(',')
|
||||
if hostname
|
||||
]
|
||||
clusters[clusterid] = hostnames
|
||||
|
||||
return clusters
|
||||
|
||||
|
||||
def _get_switch_ips(switch_config):
|
||||
"""Helper function to get switch ips."""
|
||||
ips = []
|
||||
blocks = switch_config['switch_ips'].split('.')
|
||||
ip_blocks_list = []
|
||||
for block in blocks:
|
||||
ip_blocks_list.append([])
|
||||
sub_blocks = block.split(',')
|
||||
for sub_block in sub_blocks:
|
||||
if not sub_block:
|
||||
continue
|
||||
|
||||
if '-' in sub_block:
|
||||
start_block, end_block = sub_block.split('-', 1)
|
||||
start_block = int(start_block)
|
||||
end_block = int(end_block)
|
||||
if start_block > end_block:
|
||||
continue
|
||||
|
||||
ip_block = start_block
|
||||
while ip_block <= end_block:
|
||||
ip_blocks_list[-1].append(str(ip_block))
|
||||
ip_block += 1
|
||||
|
||||
else:
|
||||
ip_blocks_list[-1].append(sub_block)
|
||||
|
||||
ip_prefixes = [[]]
|
||||
for ip_blocks in ip_blocks_list:
|
||||
prefixes = []
|
||||
for ip_block in ip_blocks:
|
||||
for prefix in ip_prefixes:
|
||||
prefixes.append(prefix + [ip_block])
|
||||
|
||||
ip_prefixes = prefixes
|
||||
|
||||
for prefix in ip_prefixes:
|
||||
if not prefix:
|
||||
continue
|
||||
|
||||
ips.append('.'.join(prefix))
|
||||
|
||||
return ips
|
||||
|
||||
|
||||
def _get_switch_filter_ports(switch_config):
|
||||
"""Helper function to get switch filter ports."""
|
||||
port_pat = re.compile(r'(\D*)(\d+(?:-\d+)?)')
|
||||
filter_ports = []
|
||||
for port_range in switch_config['filter_ports'].split(','):
|
||||
if not port_range:
|
||||
continue
|
||||
|
||||
mat = port_pat.match(port_range)
|
||||
if not mat:
|
||||
filter_ports.append(port_range)
|
||||
else:
|
||||
port_prefix = mat.group(1)
|
||||
port_range = mat.group(2)
|
||||
if '-' in port_range:
|
||||
start_port, end_port = port_range.split('-', 1)
|
||||
start_port = int(start_port)
|
||||
end_port = int(end_port)
|
||||
if start_port > end_port:
|
||||
continue
|
||||
|
||||
port = start_port
|
||||
while port <= end_port:
|
||||
filter_ports.append('%s%s' % (port_prefix, port))
|
||||
port += 1
|
||||
|
||||
else:
|
||||
filter_ports.append('%s%s' % (port_prefix, port_range))
|
||||
|
||||
return filter_ports
|
||||
|
||||
|
||||
def get_switch_filters(switch_configs):
|
||||
"""get switch filters."""
|
||||
switch_filters = []
|
||||
for switch_config in switch_configs:
|
||||
ips = _get_switch_ips(switch_config)
|
||||
filter_ports = _get_switch_filter_ports(switch_config)
|
||||
|
||||
for ip_addr in ips:
|
||||
for filter_port in filter_ports:
|
||||
switch_filters.append(
|
||||
{'ip': ip_addr, 'filter_port': filter_port})
|
||||
|
||||
return switch_filters
|
||||
|
||||
|
||||
def get_switch_machines_from_file(filename):
|
||||
"""get switch machines from file."""
|
||||
switches = []
|
||||
switch_machines = {}
|
||||
with open(filename) as switch_file:
|
||||
for line in switch_file:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
# ignore empty line
|
||||
continue
|
||||
|
||||
if line.startswith('#'):
|
||||
# ignore comments
|
||||
continue
|
||||
|
||||
columns = [column for column in line.split(',')]
|
||||
if not columns:
|
||||
# ignore empty line
|
||||
continue
|
||||
|
||||
if columns[0] == 'switch':
|
||||
(switch_ip, switch_vendor, switch_version,
|
||||
switch_community, switch_state) = columns[1:]
|
||||
switches.append({
|
||||
'ip': switch_ip,
|
||||
'vendor_info': switch_vendor,
|
||||
'credential': {
|
||||
'version': switch_version,
|
||||
'community': switch_community,
|
||||
},
|
||||
'state': switch_state,
|
||||
})
|
||||
elif columns[0] == 'machine':
|
||||
switch_ip, switch_port, vlan, mac = columns[1:]
|
||||
switch_machines.setdefault(switch_ip, []).append({
|
||||
'mac': mac,
|
||||
'port': switch_port,
|
||||
'vlan': int(vlan)
|
||||
})
|
||||
|
||||
return (switches, switch_machines)
|
||||
|
||||
|
||||
def get_properties_from_str(properties_str):
|
||||
"""get matching properties from string."""
|
||||
properties = {}
|
||||
if not properties_str:
|
||||
return properties
|
||||
|
||||
for property_str in properties_str.split(','):
|
||||
if not property_str:
|
||||
# ignore empty str
|
||||
continue
|
||||
|
||||
property_name, property_value = property_str.split('=', 1)
|
||||
properties[property_name] = property_value
|
||||
|
||||
return properties
|
||||
|
||||
|
||||
def get_properties_name_from_str(properties_name_str):
|
||||
"""get properties name to print from string."""
|
||||
properties_name = []
|
||||
for property_name in properties_name_str.split(','):
|
||||
if not property_name:
|
||||
# ignore empty str
|
||||
continue
|
||||
|
||||
properties_name.append(property_name)
|
||||
|
||||
return properties_name
|
||||
|
||||
|
||||
def print_properties(properties):
|
||||
"""print properties."""
|
||||
print '-----------------------------------------------'
|
||||
for property_name, property_value in properties.items():
|
||||
print '%s=%s' % (property_name, property_value)
|
||||
|
||||
print '-----------------------------------------------'
|
||||
|
59
ez_setup.py
59
ez_setup.py
@ -1,4 +1,4 @@
|
||||
#!python
|
||||
#!/usr/bin/python
|
||||
"""Bootstrap setuptools installation
|
||||
|
||||
If you want to use setuptools in your package's setup.py, just include this
|
||||
@ -23,19 +23,25 @@ import subprocess
|
||||
|
||||
from distutils import log
|
||||
|
||||
|
||||
try:
|
||||
from site import USER_SITE
|
||||
except ImportError:
|
||||
USER_SITE = None
|
||||
|
||||
|
||||
DEFAULT_VERSION = "0.9.6"
|
||||
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
|
||||
|
||||
|
||||
def _python_cmd(*args):
|
||||
"""run cmd in python"""
|
||||
args = (sys.executable,) + args
|
||||
return subprocess.call(args) == 0
|
||||
|
||||
|
||||
def _install(tarball, install_args=()):
|
||||
"""install tarball"""
|
||||
# extracting the tarball
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
log.warn('Extracting in %s', tmpdir)
|
||||
@ -58,12 +64,14 @@ def _install(tarball, install_args=()):
|
||||
log.warn('See the error message above.')
|
||||
# exitcode will be 2
|
||||
return 2
|
||||
|
||||
finally:
|
||||
os.chdir(old_wd)
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
def _build_egg(egg, tarball, to_dir):
|
||||
"""build egg"""
|
||||
# extracting the tarball
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
log.warn('Extracting in %s', tmpdir)
|
||||
@ -86,6 +94,7 @@ def _build_egg(egg, tarball, to_dir):
|
||||
finally:
|
||||
os.chdir(old_wd)
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
# returning the result
|
||||
log.warn(egg)
|
||||
if not os.path.exists(egg):
|
||||
@ -93,12 +102,14 @@ def _build_egg(egg, tarball, to_dir):
|
||||
|
||||
|
||||
def _do_download(version, download_base, to_dir, download_delay):
|
||||
"""download package"""
|
||||
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
|
||||
% (version, sys.version_info[0], sys.version_info[1]))
|
||||
if not os.path.exists(egg):
|
||||
tarball = download_setuptools(version, download_base,
|
||||
to_dir, download_delay)
|
||||
_build_egg(egg, tarball, to_dir)
|
||||
|
||||
sys.path.insert(0, egg)
|
||||
import setuptools
|
||||
setuptools.bootstrap_install_from = egg
|
||||
@ -106,6 +117,7 @@ def _do_download(version, download_base, to_dir, download_delay):
|
||||
|
||||
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
|
||||
to_dir=os.curdir, download_delay=15):
|
||||
"""use setuptools to do the setup"""
|
||||
# making sure we use the absolute path
|
||||
to_dir = os.path.abspath(to_dir)
|
||||
was_imported = 'pkg_resources' in sys.modules or \
|
||||
@ -114,26 +126,31 @@ def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
|
||||
import pkg_resources
|
||||
except ImportError:
|
||||
return _do_download(version, download_base, to_dir, download_delay)
|
||||
|
||||
try:
|
||||
pkg_resources.require("setuptools>=" + version)
|
||||
return
|
||||
except pkg_resources.DistributionNotFound:
|
||||
return _do_download(version, download_base, to_dir,
|
||||
download_delay)
|
||||
|
||||
except pkg_resources.VersionConflict:
|
||||
e = sys.exc_info()[1]
|
||||
error = sys.exc_info()[1]
|
||||
if was_imported:
|
||||
sys.stderr.write(
|
||||
"The required version of setuptools (>=%s) is not available,\n"
|
||||
"and can't be installed while this script is running. Please\n"
|
||||
"install a more recent version first, using\n"
|
||||
"'easy_install -U setuptools'."
|
||||
"\n\n(Currently using %r)\n" % (version, e.args[0]))
|
||||
sys.stderr.writelines([
|
||||
"The required version of setuptools (>=%s) is not available,",
|
||||
"and can't be installed while this script is running. Please",
|
||||
"install a more recent version first, using",
|
||||
"'easy_install -U setuptools'.",
|
||||
"",
|
||||
"(Currently using %r)" % (version, error.args[0]),
|
||||
"",
|
||||
])
|
||||
sys.exit(2)
|
||||
else:
|
||||
del pkg_resources, sys.modules['pkg_resources'] # reload ok
|
||||
return _do_download(version, download_base, to_dir,
|
||||
download_delay)
|
||||
except pkg_resources.DistributionNotFound:
|
||||
return _do_download(version, download_base, to_dir,
|
||||
download_delay)
|
||||
|
||||
|
||||
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
|
||||
@ -152,6 +169,7 @@ def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
|
||||
from urllib.request import urlopen
|
||||
except ImportError:
|
||||
from urllib2 import urlopen
|
||||
|
||||
tgz_name = "setuptools-%s.tar.gz" % version
|
||||
url = download_base + tgz_name
|
||||
saveto = os.path.join(to_dir, tgz_name)
|
||||
@ -170,6 +188,7 @@ def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
|
||||
src.close()
|
||||
if dst:
|
||||
dst.close()
|
||||
|
||||
return os.path.realpath(saveto)
|
||||
|
||||
|
||||
@ -194,12 +213,15 @@ def _extractall(self, path=".", members=None):
|
||||
directories.append(tarinfo)
|
||||
tarinfo = copy.copy(tarinfo)
|
||||
tarinfo.mode = 448 # decimal for oct 0700
|
||||
|
||||
self.extract(tarinfo, path)
|
||||
|
||||
# Reverse sort directories.
|
||||
if sys.version_info < (2, 4):
|
||||
def sorter(dir1, dir2):
|
||||
"""sort dir"""
|
||||
return cmp(dir1.name, dir2.name)
|
||||
|
||||
directories.sort(sorter)
|
||||
directories.reverse()
|
||||
else:
|
||||
@ -213,11 +235,11 @@ def _extractall(self, path=".", members=None):
|
||||
self.utime(tarinfo, dirpath)
|
||||
self.chmod(tarinfo, dirpath)
|
||||
except ExtractError:
|
||||
e = sys.exc_info()[1]
|
||||
error = sys.exc_info()[1]
|
||||
if self.errorlevel > 1:
|
||||
raise
|
||||
else:
|
||||
self._dbg(1, "tarfile: %s" % e)
|
||||
self._dbg(1, "tarfile: %s" % error)
|
||||
|
||||
|
||||
def _build_install_args(options):
|
||||
@ -229,13 +251,14 @@ def _build_install_args(options):
|
||||
if sys.version_info < (2, 6):
|
||||
log.warn("--user requires Python 2.6 or later")
|
||||
raise SystemExit(1)
|
||||
|
||||
install_args.append('--user')
|
||||
|
||||
return install_args
|
||||
|
||||
|
||||
def _parse_args():
|
||||
"""
|
||||
Parse the command line for options
|
||||
"""
|
||||
"""Parse the command line for options"""
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option(
|
||||
'--user', dest='user_install', action='store_true', default=False,
|
||||
@ -244,15 +267,17 @@ def _parse_args():
|
||||
'--download-base', dest='download_base', metavar="URL",
|
||||
default=DEFAULT_URL,
|
||||
help='alternative URL from where to download the setuptools package')
|
||||
options, args = parser.parse_args()
|
||||
options, _ = parser.parse_args()
|
||||
# positional arguments are ignored
|
||||
return options
|
||||
|
||||
|
||||
def main(version=DEFAULT_VERSION):
|
||||
"""Install or upgrade setuptools and EasyInstall"""
|
||||
options = _parse_args()
|
||||
tarball = download_setuptools(download_base=options.download_base)
|
||||
return _install(tarball, _build_install_args(options))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
|
@ -36,23 +36,17 @@ fi
|
||||
# configure chef client and knife
|
||||
rpm -q chef
|
||||
if [[ "$?" != "0" ]]; then
|
||||
sudo wget -c --progress=bar:force -O /tmp/chef_install.sh http://www.opscode.com/chef/install.sh
|
||||
if [[ "$?" != "0" ]]; then
|
||||
echo "failed to download chef install script"
|
||||
exit 1
|
||||
download http://www.opscode.com/chef/install.sh chef_install.sh
|
||||
sudo chmod 755 /tmp/chef_install.sh
|
||||
sudo /tmp/chef_install.sh
|
||||
if [[ "$?" != "0" ]]; then
|
||||
echo "chef install failed"
|
||||
exit 1
|
||||
else
|
||||
echo "chef is installed"
|
||||
fi
|
||||
else
|
||||
echo "chef install script is downloaded"
|
||||
fi
|
||||
sudo chmod 755 /tmp/chef_install.sh
|
||||
sudo /tmp/chef_install.sh
|
||||
if [[ "$?" != "0" ]]; then
|
||||
echo "chef install failed"
|
||||
exit 1
|
||||
else
|
||||
echo "chef is installed"
|
||||
fi
|
||||
else
|
||||
echo "chef has already installed"
|
||||
echo "chef has already installed"
|
||||
fi
|
||||
|
||||
sudo mkdir -p ~/.chef
|
||||
|
@ -198,18 +198,12 @@ ppa_repo_packages="ntp-4.2.6p5-1.el6.${IMAGE_TYPE,,}.$IMAGE_ARCH.rpm
|
||||
ntpdate-4.2.6p5-1.el6.${IMAGE_TYPE,,}.${IMAGE_ARCH}.rpm"
|
||||
for f in $ppa_repo_packages
|
||||
do
|
||||
if [[ ! -e /tmp/$f ]]; then
|
||||
sudo wget -c --progress=bar:force -O /tmp/$f ftp://rpmfind.net/linux/${IMAGE_TYPE,,}/${IMAGE_VERSION_MAJOR}/os/${IMAGE_ARCH}/Packages/$f
|
||||
else
|
||||
echo "$f already exist"
|
||||
fi
|
||||
download ftp://rpmfind.net/linux/${IMAGE_TYPE,,}/${IMAGE_VERSION_MAJOR}/os/${IMAGE_ARCH}/Packages/$f $f
|
||||
sudo cp /tmp/$f /var/lib/cobbler/repo_mirror/ppa_repo/
|
||||
done
|
||||
if [[ ! -e /tmp/chef-11.8.0-1.el6.${IMAGE_ARCH}.rpm ]]; then
|
||||
sudo wget -c --progress=bar:force -O /tmp/chef-11.8.0-1.el6.${IMAGE_ARCH}.rpm http://opscode-omnibus-packages.s3.amazonaws.com/el/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-11.8.0-1.el6.${IMAGE_ARCH}.rpm
|
||||
else
|
||||
echo "chef-11.8.0-1.el6.${IMAGE_ARCH}.rpm already exists"
|
||||
fi
|
||||
|
||||
# download chef client for ppa repo
|
||||
download http://opscode-omnibus-packages.s3.amazonaws.com/el/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-11.8.0-1.el6.${IMAGE_ARCH}.rpm
|
||||
sudo cp /tmp/chef-11.8.0-1.el6.${IMAGE_ARCH}.rpm /var/lib/cobbler/repo_mirror/ppa_repo/
|
||||
cd ..
|
||||
sudo createrepo ppa_repo
|
||||
@ -224,17 +218,7 @@ sudo cobbler reposync
|
||||
|
||||
# import cobbler distro
|
||||
sudo mkdir -p /var/lib/cobbler/iso
|
||||
if [[ ! -e /tmp/${IMAGE_NAME}-${IMAGE_ARCH}.iso ]]; then
|
||||
sudo wget -c --progress=bar:force -O /tmp/${IMAGE_NAME}-${IMAGE_ARCH}.iso "$IMAGE_SOURCE"
|
||||
if [[ "$?" != "0" ]]; then
|
||||
echo "failed to download images $IMAGE_SOURCE"
|
||||
exit 1
|
||||
else
|
||||
echo "$IMAGE_SOURCE is downloaded"
|
||||
fi
|
||||
else
|
||||
echo "${IMAGE_NAME}-${IMAGE_ARCH}.iso already exists"
|
||||
fi
|
||||
download "$IMAGE_SOURCE" ${IMAGE_NAME}-${IMAGE_ARCH}.iso
|
||||
sudo cp /tmp/${IMAGE_NAME}-${IMAGE_ARCH}.iso /var/lib/cobbler/iso/
|
||||
sudo mkdir -p /mnt/${IMAGE_NAME}-${IMAGE_ARCH}
|
||||
if [ $(mount | grep -c "/mnt/${IMAGE_NAME}-${IMAGE_ARCH} ") -eq 0 ]; then
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
echo 'Installing Required packages for Compass...'
|
||||
|
||||
sudo yum install -y rsyslog logrotate ntp iproute openssh-clients python git wget python-setuptools python-netaddr python-flask python-flask-sqlalchemy python-amqplib amqp python-paramiko python-mock mod_wsgi httpd squid dhcp bind rsync yum-utils xinetd tftp-server gcc net-snmp-utils net-snmp net-snmp-python python-daemon unzip openssl openssl098e
|
||||
sudo yum install -y rsyslog logrotate ntp iproute openssh-clients python git wget python-setuptools python-netaddr python-flask python-flask-sqlalchemy python-amqplib amqp python-paramiko python-mock mod_wsgi httpd squid dhcp bind rsync yum-utils xinetd tftp-server gcc net-snmp-utils net-snmp net-snmp-python python-daemon unzip openssl openssl098e ca-certificates
|
||||
if [[ "$?" != "0" ]]; then
|
||||
echo "failed to install yum dependency"
|
||||
exit 1
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# prepare the installation
|
||||
|
||||
copygit2dir()
|
||||
{
|
||||
repo=$1
|
||||
@ -42,87 +43,6 @@ copylocal2dir()
|
||||
sudo cp -rf $repo/* $destdir
|
||||
}
|
||||
|
||||
cd $SCRIPT_DIR
|
||||
if [ "$source" != "local" ]; then
|
||||
copygit2dir $WEB_SOURCE $WEB_HOME
|
||||
copygit2dir $ADAPTER_SOURCE $ADAPTER_HOME
|
||||
else
|
||||
copylocal2dir $WEB_SOURCE $WEB_HOME
|
||||
copylocal2dir $ADAPTER_SOURCE $ADAPTER_HOME
|
||||
fi
|
||||
|
||||
# download chef-server package
|
||||
if [[ -f /tmp/chef-server-11.0.8-1.el6.${IMAGE_ARCH}.rpm ]]; then
|
||||
echo "chef-server-11.0.8-1.el6.${IMAGE_ARCH}.rpm already exists"
|
||||
else
|
||||
wget -c --progress=bar:force -O /tmp/chef-server-11.0.8-1.el6.${IMAGE_ARCH}.rpm $CHEF_SRV
|
||||
if [[ "$?" != "0" ]]; then
|
||||
echo "failed to download chef-server-11.0.8-1.el6.${IMAGE_ARCH}.rpm"
|
||||
exit 1
|
||||
else
|
||||
echo "successfully download chef-server-11.0.8-1.el6.${IMAGE_ARCH}.rpm"
|
||||
fi
|
||||
fi
|
||||
|
||||
# download centos image
|
||||
if [[ -f /tmp/${IMAGE_NAME}-${IMAGE_ARCH}.iso ]]; then
|
||||
echo "/tmp/${IMAGE_NAME}-${IMAGE_ARCH}.iso already exists"
|
||||
else
|
||||
sudo wget -c --progress=bar:force -O /tmp/${IMAGE_NAME}-${IMAGE_ARCH}.iso "$IMAGE_SOURCE"
|
||||
if [[ "$?" != "0" ]]; then
|
||||
echo "failed to download ${IMAGE_NAME}-${IMAGE_ARCH}.iso"
|
||||
exit 1
|
||||
else
|
||||
echo "successfully download ${IMAGE_NAME}-${IMAGE_ARCH}.iso"
|
||||
fi
|
||||
fi
|
||||
|
||||
# download ppa_repo packages
|
||||
ppa_repo_packages="ntp-4.2.6p5-1.el6.${IMAGE_TYPE,,}.$IMAGE_ARCH.rpm
|
||||
openssh-clients-5.3p1-94.el6.${IMAGE_ARCH}.rpm
|
||||
iproute-2.6.32-31.el6.${IMAGE_ARCH}.rpm
|
||||
wget-1.12-1.8.el6.${IMAGE_ARCH}.rpm
|
||||
ntpdate-4.2.6p5-1.el6.${IMAGE_TYPE,,}.${IMAGE_ARCH}.rpm"
|
||||
for f in $ppa_repo_packages
|
||||
do
|
||||
if [ -f /tmp/$f ]; then
|
||||
echo "$f already exists"
|
||||
else
|
||||
sudo wget -c --progress=bar:force -O /tmp/$f ftp://rpmfind.net/linux/${IMAGE_TYPE,,}/${IMAGE_VERSION_MAJOR}/os/${IMAGE_ARCH}/Packages/$f
|
||||
if [[ "$?" != "0" ]]; then
|
||||
echo "fail to download $f"
|
||||
else
|
||||
echo "successfully download $f"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ! -e /tmp/chef-11.8.0-1.el6.${IMAGE_ARCH}.rpm ]]; then
|
||||
sudo wget -c --progress=bar:force -O /tmp/chef-11.8.0-1.el6.${IMAGE_ARCH}.rpm http://opscode-omnibus-packages.s3.amazonaws.com/el/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-11.8.0-1.el6.${IMAGE_ARCH}.rpm
|
||||
else
|
||||
echo "chef-11.8.0-1.el6.${IMAGE_ARCH}.rpm already exists"
|
||||
fi
|
||||
|
||||
# install js mvc package
|
||||
if [[ -f /tmp/$JS_MVC.zip ]]; then
|
||||
echo "$JS_MVC.zip already exists"
|
||||
else
|
||||
wget -c --progress=bar:force -O /tmp/$JS_MVC.zip http://github.com/downloads/bitovi/javascriptmvc/$JS_MVC.zip
|
||||
if [[ "$?" != "0" ]]; then
|
||||
echo "failed to download $JS_MVC"
|
||||
exit 1
|
||||
else
|
||||
echo "successfully download $JS_MVC"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -d /tmp/$JS_MVC ]; then
|
||||
echo "/tmp/$JS_MVC is already unzipped"
|
||||
else
|
||||
sudo unzip -o /tmp/$JS_MVC.zip -d /tmp/
|
||||
fi
|
||||
sudo cp -rf /tmp/$JS_MVC/. $WEB_HOME/public/
|
||||
|
||||
# Create backup dir
|
||||
sudo mkdir -p /root/backup
|
||||
|
||||
@ -187,6 +107,63 @@ else
|
||||
echo "squid conf is updated"
|
||||
fi
|
||||
|
||||
cd $SCRIPT_DIR
|
||||
if [ "$source" != "local" ]; then
|
||||
copygit2dir $WEB_SOURCE $WEB_HOME
|
||||
copygit2dir $ADAPTER_SOURCE $ADAPTER_HOME
|
||||
else
|
||||
copylocal2dir $WEB_SOURCE $WEB_HOME
|
||||
copylocal2dir $ADAPTER_SOURCE $ADAPTER_HOME
|
||||
fi
|
||||
|
||||
download()
|
||||
{
|
||||
url=$1
|
||||
package=${2:-$(basename $url)}
|
||||
if [[ -f /tmp/${package} ]]; then
|
||||
echo "$package already exists"
|
||||
else
|
||||
wget -c --progress=bar:force -O /tmp/${package}.tmp $url
|
||||
if [[ "$?" != "0" ]]; then
|
||||
echo "failed to download $package"
|
||||
exit 1
|
||||
else
|
||||
echo "successfully download $package"
|
||||
cp -rf /tmp/${package}.tmp /tmp/${package}
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# download chef-server package
|
||||
download $CHEF_SRV
|
||||
|
||||
# download centos image
|
||||
download $IMAGE_SOURCE ${IMAGE_NAME}-${IMAGE_ARCH}.iso
|
||||
|
||||
# download ppa_repo packages
|
||||
ppa_repo_packages="ntp-4.2.6p5-1.el6.${IMAGE_TYPE,,}.$IMAGE_ARCH.rpm
|
||||
openssh-clients-5.3p1-94.el6.${IMAGE_ARCH}.rpm
|
||||
iproute-2.6.32-31.el6.${IMAGE_ARCH}.rpm
|
||||
wget-1.12-1.8.el6.${IMAGE_ARCH}.rpm
|
||||
ntpdate-4.2.6p5-1.el6.${IMAGE_TYPE,,}.${IMAGE_ARCH}.rpm"
|
||||
for f in $ppa_repo_packages
|
||||
do
|
||||
download ftp://rpmfind.net/linux/${IMAGE_TYPE,,}/${IMAGE_VERSION_MAJOR}/os/${IMAGE_ARCH}/Packages/$f $f
|
||||
done
|
||||
|
||||
# download chef client for ppa repo
|
||||
download http://opscode-omnibus-packages.s3.amazonaws.com/el/${IMAGE_VERSION_MAJOR}/${IMAGE_ARCH}/chef-11.8.0-1.el6.${IMAGE_ARCH}.rpm
|
||||
|
||||
# download js mvc
|
||||
download http://github.com/downloads/bitovi/javascriptmvc/$JS_MVC.zip
|
||||
|
||||
if [ -d /tmp/$JS_MVC ]; then
|
||||
echo "/tmp/$JS_MVC is already unzipped"
|
||||
else
|
||||
sudo unzip -o /tmp/$JS_MVC.zip -d /tmp/
|
||||
fi
|
||||
sudo cp -rf /tmp/$JS_MVC/. $WEB_HOME/public/
|
||||
|
||||
# Install net-snmp
|
||||
sudo cp -rn /etc/snmp/snmp.conf /root/backup/
|
||||
sudo mkdir -p /usr/local/share/snmp/
|
||||
@ -194,3 +171,5 @@ sudo cp -rf $COMPASSDIR/mibs /usr/local/share/snmp/
|
||||
sudo rm -f /etc/snmp/snmp.conf
|
||||
sudo cp -rf $COMPASSDIR/misc/snmp/snmp.conf /etc/snmp/snmp.conf
|
||||
sudo chmod 644 /etc/snmp/snmp.conf
|
||||
sudo mkdir -p /var/lib/net-snmp/mib_indexes
|
||||
sudo chmod 755 /var/lib/net-snmp/mib_indexes
|
||||
|
15
setup.py
15
setup.py
@ -1,3 +1,4 @@
|
||||
"""setup script."""
|
||||
try:
|
||||
from setuptools import setup, find_packages
|
||||
from setuptools.command.test import test as TestCommand
|
||||
@ -7,11 +8,14 @@ except ImportError:
|
||||
from setuptools import setup
|
||||
from setuptools.command.test import test as TestCommand
|
||||
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
class Tox(TestCommand):
|
||||
"""Tox to do the setup"""
|
||||
|
||||
def finalize_options(self):
|
||||
TestCommand.finalize_options(self)
|
||||
self.test_args = []
|
||||
@ -22,11 +26,14 @@ class Tox(TestCommand):
|
||||
errno = tox.cmdline(self.test_args)
|
||||
sys.exit(errno)
|
||||
|
||||
install_requires_dir = os.path.join(
|
||||
|
||||
INSTALL_REQUIRES_DIR = os.path.join(
|
||||
os.path.dirname(__file__), 'requirements.txt')
|
||||
|
||||
with open(install_requires_dir, 'r') as f:
|
||||
requirements = [line.strip() for line in f if line != '\n']
|
||||
|
||||
with open(INSTALL_REQUIRES_DIR, 'r') as requires_file:
|
||||
REQUIREMENTS = [line.strip() for line in requires_file if line != '\n']
|
||||
|
||||
|
||||
setup(
|
||||
name='compass',
|
||||
@ -41,7 +48,7 @@ setup(
|
||||
download_url='',
|
||||
|
||||
# dependency
|
||||
install_requires=requirements,
|
||||
install_requires=REQUIREMENTS,
|
||||
packages=find_packages(exclude=['compass.tests']),
|
||||
classifiers=[
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
|
Loading…
Reference in New Issue
Block a user