merge all changes from private git 12.234.32.41 to public git compass-core. Fix all broken tests caused by the code change.

Change-Id: I6b629d5cdb4ddf3a1a3eedb08fae37501e51ed09
This commit is contained in:
Xiaodong Wang 2014-01-24 22:01:36 -08:00 committed by xiaodongwang
parent 97698042df
commit 16ebf00865
25 changed files with 1957 additions and 142 deletions

View File

@ -2,6 +2,7 @@
import logging
import os
import os.path
import re
import shutil
import sys
@ -9,8 +10,11 @@ from flask.ext.script import Manager
from compass.api import app
from compass.config_management.utils import config_manager
from compass.config_management.utils import config_reference
from compass.db import database
from compass.db.model import Adapter, Role, Switch, Machine, HostState, ClusterState, Cluster, ClusterHost, LogProgressingHistory
from compass.db.model import Adapter, Role, Switch, SwitchConfig
from compass.db.model import Machine, HostState, ClusterState
from compass.db.model import Cluster, ClusterHost, LogProgressingHistory
from compass.utils import flags
from compass.utils import logsetting
from compass.utils import setting_wrapper as setting
@ -24,6 +28,21 @@ flags.add('clusters',
'clusters to clean, the format is as '
'clusterid:hostname1,hostname2,...;...'),
default='')
flags.add('fake_switches_file',
help=(
'files for switches and machines '
'connected to each switch. each line in the file '
'is <switch ip>,<switch port>,<vlan>,<mac>'),
default='')
flags.add('fake_switches_vendor',
help='switch vendor used to set fake switch and machines.',
default='huawei')
flags.add('search_config_properties',
help='semicomma separated properties to search in config',
default='')
flags.add('print_config_properties',
help='semicomma separated config properties to print',
default='')
manager = Manager(app, usage="Perform database operations")
@ -33,6 +52,7 @@ TABLE_MAPPING = {
'role': Role,
'adapter': Adapter,
'switch': Switch,
'switch_config': SwitchConfig,
'machine': Machine,
'hoststate': HostState,
'clusterstate': ClusterState,
@ -52,11 +72,11 @@ def list_config():
@manager.command
def createdb():
"Creates database from sqlalchemy models"
if setting.DATABASE_TYPE == 'sqlite':
if setting.DATABASE_TYPE == 'file':
if os.path.exists(setting.DATABASE_FILE):
os.remove(setting.DATABASE_FILE)
database.create_db()
if setting.DATABASE_TYPE == 'sqlite':
if setting.DATABASE_TYPE == 'file':
os.chmod(setting.DATABASE_FILE, 0777)
@manager.command
@ -107,6 +127,121 @@ def sync_from_installers():
session.add(Role(**role))
@manager.command
def sync_switch_configs():
"""Set switch configs in SwitchConfig table from setting.
.. note::
the switch config is stored in SWITCHES list in setting config.
for each entry in the SWITCHES, its type is dict and must contain
fields 'switch_ips' and 'filter_ports'.
The format of switch_ips is
<ip_blocks>.<ip_blocks>.<ip_blocks>.<ip_blocks>.
ip_blocks consists of ip_block separated by comma.
ip_block can be an integer and a range of integer like xx-xx.
The example of switch_ips is like: xxx.xxx.xxx-yyy,xxx-yyy.xxx,yyy
The format of filter_ports consists of list of
<port_prefix><port_range> separated by comma. port_range can be an
integer or a rnage of integer like xx-xx.
The example of filter_ports is like: ae1-5,20-40.
"""
if not hasattr(setting, 'SWITCHES') or not setting.SWITCHES:
logging.info('no switch configs to set')
return
switch_configs = []
port_pat = re.compile(r'(\D*)(\d+(?:-\d+)?)')
for switch in setting.SWITCHES:
ips = []
blocks = switch['switch_ips'].split('.')
ip_blocks_list = []
for block in blocks:
ip_blocks_list.append([])
sub_blocks = block.split(',')
for sub_block in sub_blocks:
if not sub_block:
continue
if '-' in sub_block:
start_block, end_block = sub_block.split('-', 1)
start_block = int(start_block)
end_block = int(end_block)
if start_block > end_block:
continue
ip_block = start_block
while ip_block <= end_block:
ip_blocks_list[-1].append(str(ip_block))
ip_block += 1
else:
ip_blocks_list[-1].append(sub_block)
ip_prefixes = [[]]
for ip_blocks in ip_blocks_list:
prefixes = []
for ip_block in ip_blocks:
for prefix in ip_prefixes:
prefixes.append(prefix + [ip_block])
ip_prefixes = prefixes
for prefix in ip_prefixes:
if not prefix:
continue
ips.append('.'.join(prefix))
logging.debug('found switch ips: %s', ips)
filter_ports = []
for port_range in switch['filter_ports'].split(','):
if not port_range:
continue
mat = port_pat.match(port_range)
if not mat:
filter_ports.append(port_range)
else:
port_prefix = mat.group(1)
port_range = mat.group(2)
if '-' in port_range:
start_port, end_port = port_range.split('-', 1)
start_port = int(start_port)
end_port = int(end_port)
if start_port > end_port:
continue
port = start_port
while port <= end_port:
filter_ports.append('%s%s' % (port_prefix, port))
port += 1
else:
filter_ports.append('%s%s' % (port_prefix, port_range))
for ip in ips:
for filter_port in filter_ports:
switch_configs.append(
{'ip': ip, 'filter_port': filter_port})
switch_config_tuples = set([])
with database.session() as session:
session.query(SwitchConfig).delete(synchronize_session='fetch')
for switch_config in switch_configs:
switch_config_tuple = tuple(switch_config.values())
if switch_config_tuple in switch_config_tuples:
logging.debug('ignore adding switch config: %s',
switch_config)
continue
else:
logging.debug('add switch config: %s', switch_config)
switch_config_tuples.add(switch_config_tuple)
session.add(SwitchConfig(**switch_config))
def _get_clusters():
clusters = {}
logging.debug('get clusters from flag: %s', flags.OPTIONS.clusters)
@ -212,8 +347,11 @@ def _clean_clusters(clusters):
@manager.command
def clean_clusters():
"""delete clusters and hosts.
"""Delete clusters and hosts.
.. note::
The clusters and hosts are defined in --clusters.
the clusters flag is as clusterid:hostname1,hostname2,...;...
"""
clusters = _get_clusters()
_clean_clusters(clusters)
@ -289,7 +427,10 @@ def _clean_installation_progress(clusters):
@manager.command
def clean_installation_progress():
"""Clean clusters and hosts installation progress.
.. note::
The cluster and hosts is defined in --clusters.
The clusters flags is as clusterid:hostname1,hostname2,...;...
"""
clusters = _get_clusters()
_clean_installation_progress(clusters)
@ -367,54 +508,217 @@ def _reinstall_hosts(clusters):
@manager.command
def reinstall_hosts():
"""Reinstall hosts in clusters.
the hosts are defined in --clusters.
.. note::
The hosts are defined in --clusters.
The clusters flag is as clusterid:hostname1,hostname2,...;...
"""
clusters = _get_clusters()
_reinstall_hosts(clusters)
os.system('service rsyslog restart')
@manager.command
def set_fake_switch_machine():
"""Set fake switches and machines for test."""
with database.session() as session:
credential = { 'version' : 'v2c',
'community' : 'public',
}
switches = [ {'ip': '192.168.100.250'},
{'ip': '192.168.100.251'},
{'ip': '192.168.100.252'},
]
session.query(Switch).delete()
session.query(Machine).delete()
ip_switch ={}
for item in switches:
logging.info('add switch %s', item)
switch = Switch(ip=item['ip'], vendor_info='huawei',
state='under_monitoring')
switch.credential = credential
session.add(switch)
ip_switch[item['ip']] = switch
session.flush()
"""Set fake switches and machines.
.. note::
--fake_switches_vendor is the vendor name for all fake switches.
the default value is 'huawei'
--fake_switches_file is the filename which stores all fake switches
and fake machines.
each line in fake_switches_files presents one machine.
the format of each line <switch_ip>,<switch_port>,<vlan>,<mac>.
"""
missing_flags = False
if not flags.OPTIONS.fake_switches_vendor:
print 'the flag --fake_switches_vendor should be specified'
missing_flags = True
if not flags.OPTIONS.fake_switches_file:
print 'the flag --fake_switches_file should be specified.'
print 'each line in fake_switches_files presents one machine'
print 'the format of each line is <%s>,<%s>,<%s>,<%s>' % (
'switch ip as xxx.xxx.xxx.xxx',
'switch port as xxx12',
'vlan as 1',
'mac as xx:xx:xx:xx:xx:xx')
missing_flags = True
if missing_flags:
return
switch_ips = []
switch_machines = {}
vendor = flags.OPTIONS.fake_switches_vendor
credential = {
'version' : 'v2c',
'community' : 'public',
}
try:
with open(flags.OPTIONS.fake_switches_file) as f:
for line in f:
line = line.strip()
switch_ip, switch_port, vlan, mac = line.split(',', 3)
if switch_ip not in switch_ips:
switch_ips.append(switch_ip)
switch_machines.setdefault(switch_ip, []).append({
'mac': mac,
'port': switch_port,
'vlan': int(vlan)
})
except Exception as error:
logging.error('failed to parse file %s',
flags.OPTIONS.fake_switches_file)
logging.exception(error)
return
with database.session() as session:
session.query(Switch).delete(synchronize_session='fetch')
session.query(Machine).delete(synchronize_session='fetch')
for switch_ip in switch_ips:
logging.info('add switch %s', switch_ip)
switch = Switch(ip=switch_ip, vendor_info=vendor,
credential=credential,
state='under_monitoring')
logging.debug('add switch %s', switch_ip)
session.add(switch)
machines = switch_machines[switch_ip]
for item in machines:
logging.debug('add machine %s', item)
machine = Machine(**item)
machine.switch = switch
machines = [
{'mac': '00:0c:29:32:76:85', 'port':50, 'vlan':1, 'switch_ip':'192.168.100.250'},
{'mac': '00:0c:29:fa:cb:72', 'port':51, 'vlan':1, 'switch_ip':'192.168.100.250'},
{'mac': '28:6e:d4:64:c7:4a', 'port':1, 'vlan':1, 'switch_ip':'192.168.100.251'},
{'mac': '28:6e:d4:64:c7:4c', 'port':2, 'vlan':1, 'switch_ip':'192.168.100.251'},
{'mac': '28:6e:d4:46:c4:25', 'port': 40, 'vlan': 1, 'switch_ip': '192.168.100.252'},
{'mac': '26:6e:d4:4d:c6:be', 'port': 41, 'vlan': 1, 'switch_ip': '192.168.100.252'},
{'mac': '28:6e:d4:62:da:38', 'port': 42, 'vlan': 1, 'switch_ip': '192.168.100.252'},
{'mac': '28:6e:d4:62:db:76', 'port': 43, 'vlan': 1, 'switch_ip': '192.168.100.252'},
]
for item in machines:
logging.info('add machine %s', item)
machine = Machine(mac=item['mac'], port=item['port'],
vlan=item['vlan'],
switch_id=ip_switch[item['switch_ip']].id)
session.add(machine)
def _get_config_properties():
if not flags.OPTIONS.search_config_properties:
logging.info('the flag --search_config_properties is not specified.')
return {}
search_config_properties = flags.OPTIONS.search_config_properties
config_properties = {}
for config_property in search_config_properties.split(';'):
if not config_property:
continue
if '=' not in config_property:
logging.debug('ignore config property %s '
'since there is no = in it.', config_property)
continue
property_name, property_value = config_property.split('=', 1)
config_properties[property_name] = property_value
logging.debug('get search config properties: %s', config_properties)
return config_properties
def _get_print_properties():
if not flags.OPTIONS.print_config_properties:
logging.info('the flag --print_config_properties is not specified.')
return []
print_config_properties = flags.OPTIONS.print_config_properties
config_properties = []
for config_property in print_config_properties.split(';'):
if not config_property:
continue
config_properties.append(config_property)
logging.debug('get print config properties: %s', config_properties)
return config_properties
def _match_config_properties(config, config_properties):
ref = config_reference.ConfigReference(config)
for property_name, property_value in config_properties.items():
config_value = ref.get(property_name)
if config_value is None:
return False
if isinstance(config_value, list):
found = False
for config_value_item in config_value:
if str(config_value_item) == str(property_value):
found = True
if not found:
return False
else:
if not str(config_value) == str(property_value):
return False
return True
def _print_config_properties(config, config_properties):
ref = config_reference.ConfigReference(config)
print_properties = []
for property_name in config_properties:
config_value = ref.get(property_name)
if config_value is None:
logging.error('did not found %s in %s',
property_name, config)
continue
print_properties.append('%s=%s' % (property_name, config_value))
print ';'.join(print_properties)
@manager.command
def search_hosts():
"""Search hosts by properties.
.. note::
--search_config_properties defines what properties are used to search.
the format of search_config_properties is as
<property_name>=<property_value>;... If no search properties are set,
It will returns properties of all hosts.
--print_config_properties defines what properties to print.
the format of print_config_properties is as
<property_name>;...
"""
config_properties = _get_config_properties()
print_properties = _get_print_properties()
with database.session() as session:
hosts = session.query(ClusterHost).all()
for host in hosts:
if _match_config_properties(host.config, config_properties):
_print_config_properties(host.config, print_properties)
@manager.command
def search_clusters():
"""Search clusters by properties.
.. note::
--search_config_properties defines what properties are used to search.
the format of search_config_properties is as
<property_name>=<property_value>;... If no search properties are set,
It will returns properties of all hosts.
--print_config_properties defines what properties to print.
the format of print_config_properties is as
<property_name>;...
"""
config_properties = _get_config_properties()
print_properties = _get_print_properties()
with database.session() as session:
clusters = session.query(Cluster).all()
for cluster in clusters:
if _match_config_properties(cluster.config, config_properties):
_print_config_properties(cluster.config, print_properties)
if __name__ == "__main__":
flags.init()
logsetting.init()

View File

@ -10,6 +10,7 @@ if [ $initial_run -eq 0 ]; then
/opt/compass/bin/manage_db.py clean_clusters
fi
/opt/compass/bin/manage_db.py createdb
/opt/compass/bin/manage_db.py sync_switch_configs
/opt/compass/bin/manage_db.py sync_from_installers
service compassd restart
service httpd restart

View File

@ -31,14 +31,19 @@ def main(argv):
trigger_clusterids = [cluster.id for cluster in clusters]
else:
trigger_clusterids = clusterids
logging.info('trigger installer for clusters: %s',
trigger_clusterids)
for clusterid in trigger_clusterids:
hosts = session.query(
ClusterHost).filter_by(
cluster_id=clsuterid).all()
hostids = [host.id for host in hosts]
if flags.OPTIONS.async:
celery.send_task('compass.tasks.trigger_install',
(clusterid,))
(clusterid, hostids))
else:
trigger_install.trigger_install(clusterid)
trigger_install.trigger_install(clusterid, hostids)
if __name__ == '__main__':

View File

@ -3,21 +3,30 @@
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import logging
import os
import os.path
import shutil
from compass.db import database
from compass.db.model import Cluster, ClusterState, HostState
from compass.db.model import LogProgressingHistory
from compass.config_management.utils.config_manager import ConfigManager
from compass.utils import setting_wrapper as setting
def trigger_install(clusterid):
def trigger_install(clusterid, hostids=[]):
"""Deploy a given cluster.
:param clusterid: the id of the cluster to deploy.
:type clusterid: int
:param hostids: the ids of the hosts to deploy.
:type hostids: list of int
.. note::
The function should be called in database session.
"""
logging.debug('trigger install cluster %s hosts %s',
clusterid, hostids)
session = database.current_session()
cluster = session.query(Cluster).filter_by(id=clusterid).first()
if not cluster:
@ -29,30 +38,55 @@ def trigger_install(clusterid):
logging.error('no proper adapter found for cluster %s', cluster.id)
return
if cluster.mutable:
logging.error('ignore installing cluster %s since it is mutable',
cluster)
return
if not cluster.state:
cluster.state = ClusterState()
if cluster.state.state and cluster.state.state != 'UNINITIALIZED':
logging.error('ignore installing cluster %s since the state is %s',
cluster.id, cluster.state)
return
cluster.state.state = 'INSTALLING'
hostids = [host.id for host in cluster.hosts]
cluster.state.progress = 0.0
cluster.state.message = ''
cluster.state.severity = 'INFO'
all_hostids = [host.id for host in cluster.hosts]
update_hostids = []
for host in cluster.hosts:
if not host.state:
host.state = HostState()
elif host.state.state and host.state.state != 'UNINITIALIZED':
logging.info('ignore installing host %s sinc the state is %s',
host.id, host.state)
if host.id not in hostids:
logging.info('ignore installing %s since it is not in %s',
host, hostids)
continue
if host.mutable:
logging.error('ignore installing %s since it is mutable',
host)
continue
log_dir = os.path.join(
setting.INSTALLATION_LOGDIR,
'%s.%s' % (host.hostname, clusterid))
logging.info('clean log dir %s', log_dir)
shutil.rmtree(log_dir, True)
session.query(LogProgressingHistory).filter(
LogProgressingHistory.pathname.startswith(
'%s/' % log_dir)).delete(
synchronize_session='fetch')
if not host.state:
host.state = HostState()
host.state.state = 'INSTALLING'
host.state.progress = 0.0
host.state.message = ''
host.state.severity = 'INFO'
update_hostids.append(host.id)
os.system('service rsyslog restart')
manager = ConfigManager()
manager.update_cluster_and_host_configs(
clusterid, hostids, update_hostids,
clusterid, all_hostids, update_hostids,
adapter.os, adapter.target_system)
manager.sync()

View File

@ -23,12 +23,11 @@ class Installer(installer.Installer):
INSTALLERS = {}
def get_installer_by_name(name, package_installer):
def get_installer_by_name(name, **kwargs):
"""Get os installer by name.
:param name: os installer name.
:type name: str
:param package_installer: package installer instance.
:returns: :instance of subclass of :class:`Installer`
:raises: KeyError
@ -38,7 +37,7 @@ def get_installer_by_name(name, package_installer):
name, INSTALLERS)
raise KeyError('os installer name %s is not in os INSTALLERS')
os_installer = INSTALLERS[name](package_installer)
os_installer = INSTALLERS[name](**kwargs)
logging.debug('got os installer %s', os_installer)
return os_installer
@ -56,10 +55,10 @@ def register(os_installer):
raise KeyError(
'os installer %s is already registered' % os_installer)
logging.debug('register os installer %s', os_installer)
logging.info('register os installer %s', os_installer)
INSTALLERS[os_installer.NAME] = os_installer
def get_installer(package_installer):
def get_installer(**kwargs):
"""Get default os installer from compass setting."""
return get_installer_by_name(setting.OS_INSTALLER, package_installer)
return get_installer_by_name(setting.OS_INSTALLER, **kwargs)

View File

@ -46,7 +46,7 @@ class Installer(installer.Installer):
INSTALLERS = {}
def get_installer_by_name(name):
def get_installer_by_name(name, **kwargs):
"""Get package installer by name.
:param name: package installer name.
@ -60,7 +60,7 @@ def get_installer_by_name(name):
name, INSTALLERS)
raise KeyError('installer name %s is not in package INSTALLERS' % name)
package_installer = INSTALLERS[name]()
package_installer = INSTALLERS[name](**kwargs)
logging.debug('got package installer %s', package_installer)
return package_installer
@ -78,10 +78,10 @@ def register(package_installer):
raise KeyError(
'package installer %s already registered' % package_installer)
logging.debug('register package installer: %s', package_installer)
logging.info('register package installer: %s', package_installer)
INSTALLERS[package_installer.NAME] = package_installer
def get_installer():
def get_installer(**kwargs):
"""get default package installer from comapss setting."""
return get_installer_by_name(setting.PACKAGE_INSTALLER)
return get_installer_by_name(setting.PACKAGE_INSTALLER, **kwargs)

View File

@ -104,7 +104,7 @@ class Installer(package_installer.Installer):
"""chef package installer."""
NAME = 'chef'
def __init__(self):
def __init__(self, **kwargs):
import chef
self.installer_url_ = setting.CHEF_INSTALLER_URL
self.global_databag_name_ = setting.CHEF_GLOBAL_DATABAG_NAME
@ -178,8 +178,8 @@ class Installer(package_installer.Installer):
def _get_databag(self, target_system):
"""get databag."""
from chef import DataBag
return DataBag(target_system, api=self.api_)
import chef
return chef.DataBag(target_system, api=self.api_)
def _get_databag_item(self, bag, bag_item_name):
"""get databag item."""

View File

@ -111,7 +111,7 @@ class Installer(os_installer.Installer):
"""cobbler installer"""
NAME = 'cobbler'
def __init__(self, package_installer):
def __init__(self, **kwargs):
# the connection is created when cobbler installer is initialized.
self.remote_ = xmlrpclib.Server(
setting.COBBLER_INSTALLER_URL,
@ -120,7 +120,7 @@ class Installer(os_installer.Installer):
*setting.COBBLER_INSTALLER_TOKEN)
# cobbler tries to get package related config from package installer.
self.package_installer_ = package_installer
self.package_installer_ = kwargs['package_installer']
logging.debug('%s instance created', self)
def __repr__(self):

View File

@ -41,7 +41,10 @@ CLUSTER_HOST_MERGER = ConfigMerger(
),
ConfigMapping(
path_list=[
'/networking/global',
'/networking/global/nameservers',
'/networking/global/gateway',
'/networking/global/proxy',
'/networking/global/ntp_server',
'/networking/interfaces/*/netmask',
'/networking/interfaces/*/nic',
'/networking/interfaces/*/promisc',
@ -56,18 +59,30 @@ CLUSTER_HOST_MERGER = ConfigMerger(
'search_path': '/networking/global/search_path'},
from_lower_keys={'hostname': '/hostname'},
to_key='dns_alias',
value=functools.partial(config_merger_callbacks.assign_from_pattern,
upper_keys=['search_path', 'clusterid'],
lower_keys=['hostname'])
value=functools.partial(
config_merger_callbacks.assign_from_pattern,
upper_keys=['search_path', 'clusterid'],
lower_keys=['hostname'])
),
ConfigMapping(
path_list=['/networking/global'],
from_upper_keys={'default': 'default_no_proxy',
'clusterid': 'clusterid'},
'clusterid': '/clusterid',
'noproxy_pattern': 'noproxy_pattern'},
from_lower_keys={'hostnames': '/hostname',
'ips': '/networking/interfaces/management/ip'},
to_key='ignore_proxy',
value=config_merger_callbacks.assign_noproxy
),
ConfigMapping(
path_list=['/networking/global'],
from_upper_keys={'pattern': 'search_path_pattern',
'search_path': 'search_path',
'clusterid': '/clusterid'},
to_key='search_path',
value=functools.partial(
config_merger_callbacks.assign_from_pattern,
upper_keys=['search_path', 'clusterid'])
)])
@ -84,7 +99,7 @@ class ConfigManager(object):
self.package_installer_ = package_installer.get_installer()
logging.debug('got package installer: %s', self.package_installer_)
self.os_installer_ = os_installer.get_installer(
self.package_installer_)
package_installer=self.package_installer_)
logging.debug('got os installer: %s', self.os_installer_)
def get_adapters(self):

View File

@ -270,6 +270,9 @@ def assign_roles_by_host_numbers(upper_ref, from_key, lower_refs, to_key,
policy_kwargs = deepcopy(default)
if host_numbers in policy_by_host_numbers:
util.merge_dict(policy_kwargs, policy_by_host_numbers[host_numbers])
else:
logging.debug('didnot find policy %s by host numbers %s',
policy_by_host_numbers, host_numbers)
return assign_roles(upper_ref, from_key, lower_refs,
to_key, **policy_kwargs)
@ -346,16 +349,25 @@ def assign_from_pattern(_upper_ref, _from_key, lower_refs, to_key,
def assign_noproxy(_upper_ref, _from_key, lower_refs,
to_key, default=[], clusterid=1,
to_key, default=[], clusterid=None,
noproxy_pattern='',
hostnames={}, ips={}, **_kwargs):
"""Assign no proxy to hosts."""
no_proxy_list = deepcopy(default)
for _, hostname in hostnames.items():
no_proxy_list.append('%s.%s' % (hostname, clusterid))
for _, ip_addr in ips.items():
no_proxy_list.append(ip_addr)
for lower_key, _ in lower_refs.items():
mapping = {
'clusterid': clusterid,
'hostname': hostnames.get(lower_key, ''),
'ip': ips.get(lower_key, '')
}
try:
no_proxy_list.append(noproxy_pattern % mapping)
except Exception as error:
logging.error('failed to assign %s[%s] = %s %% %s',
lower_key, to_key, noproxy_pattern, mapping)
raise error
no_proxy = ','.join(no_proxy_list)
host_no_proxy = {}
for lower_key, _ in lower_refs.items():

View File

@ -16,9 +16,10 @@ BASE = declarative_base()
class SwitchConfig(BASE):
"""Swtich Config table.
:param id: The unique identifier of the switch config.
:param ip: The IP address of the switch.
:param filter_port: The port of the switch which need to be filtered.
:param id: The unique identifier of the switch config.
:param ip: The IP address of the switch.
:param filter_port: The port of the switch which need to be filtered.
"""
__tablename__ = 'switch_config'
id = Column(Integer, primary_key=True)
@ -61,11 +62,11 @@ class Switch(BASE):
vendor_info = Column(String(256), nullable=True)
state = Column(Enum('initialized', 'unreachable', 'notsupported',
'repolling', 'error', 'under_monitoring',
name='switch_state'))
name='switch_state'),
default='initialized')
err_msg = Column(Text)
def __init__(self, **kwargs):
self.state = 'initialized'
super(Switch, self).__init__(**kwargs)
def __repr__(self):
@ -96,7 +97,7 @@ class Switch(BASE):
logging.error('failed to load credential data %s: %s',
self.id, self.credential_data)
logging.exception(error)
return {}
raise error
else:
return {}
@ -119,8 +120,11 @@ class Switch(BASE):
logging.error('failed to dump credential data %s: %s',
self.id, value)
logging.exception(error)
raise error
else:
self.credential_data = json.dumps({})
logging.debug('switch now is %s', self)
@ -140,16 +144,16 @@ class Machine(BASE):
__tablename__ = 'machine'
id = Column(Integer, primary_key=True)
mac = Column(String(24))
port = Column(String(16))
vlan = Column(Integer)
mac = Column(String(24), default='')
port = Column(String(16), default='')
vlan = Column(Integer, default=0)
update_timestamp = Column(DateTime, default=datetime.now,
onupdate=datetime.now)
switch_id = Column(Integer, ForeignKey('switch.id',
onupdate='CASCADE',
ondelete='SET NULL'))
__table_args__ = (UniqueConstraint('mac', 'vlan', 'switch_id',
name='unique_1'), )
name='unique_machine'),)
switch = relationship('Switch', backref=backref('machines',
lazy='dynamic'))
@ -242,7 +246,7 @@ class ClusterState(BASE):
@property
def clustername(self):
'clustername getter'
"""clustername getter"""
return self.cluster.name
def __repr__(self):
@ -299,7 +303,7 @@ class Cluster(BASE):
logging.error('failed to load security config %s: %s',
self.id, self.partition_config)
logging.exception(error)
return {}
raise error
else:
return {}
@ -314,6 +318,7 @@ class Cluster(BASE):
logging.error('failed to dump partition config %s: %s',
self.id, value)
logging.exception(error)
raise error
else:
self.partition_config = None
@ -327,7 +332,7 @@ class Cluster(BASE):
logging.error('failed to load security config %s: %s',
self.id, self.security_config)
logging.exception(error)
return {}
raise error
else:
return {}
@ -342,6 +347,7 @@ class Cluster(BASE):
logging.error('failed to dump security config %s: %s',
self.id, value)
logging.exception(error)
raise error
else:
self.security_config = None
@ -355,7 +361,7 @@ class Cluster(BASE):
logging.error('failed to load networking config %s: %s',
self.id, self.networking_config)
logging.exception(error)
return {}
raise error
else:
return {}
@ -370,6 +376,7 @@ class Cluster(BASE):
logging.error('failed to dump networking config %s: %s',
self.id, value)
logging.exception(error)
raise error
else:
self.networking_config = None
@ -384,6 +391,8 @@ class Cluster(BASE):
logging.error('failed to load raw config %s: %s',
self.id, self.raw_config)
logging.exception(error)
raise error
util.merge_dict(config, {'security': self.security})
util.merge_dict(config, {'networking': self.networking})
util.merge_dict(config, {'partition': self.partition})
@ -401,15 +410,18 @@ class Cluster(BASE):
self.partition = None
self.raw_config = None
return
self.security = value.get('security')
self.networking = value.get('networking')
self.partition = value.get('partition')
try:
self.raw_config = json.dumps(value)
except Exception as error:
logging.error('failed to dump raw config %s: %s',
self.id, value)
logging.exception(error)
raise error
class ClusterHost(BASE):
@ -432,7 +444,7 @@ class ClusterHost(BASE):
machine_id = Column(Integer, ForeignKey('machine.id',
onupdate='CASCADE',
ondelete='CASCADE'),
nullable=True)
nullable=True, unique=True)
cluster_id = Column(Integer, ForeignKey('cluster.id',
onupdate='CASCADE',
@ -463,28 +475,38 @@ class ClusterHost(BASE):
def config(self):
"""config getter."""
config = {}
if self.config_data:
try:
try:
if self.config_data:
config.update(json.loads(self.config_data))
config.update({'hostid': self.id, 'hostname': self.hostname})
if self.cluster:
config.update({'clusterid': self.cluster.id,
'clustername': self.cluster.name})
if self.machine:
util.merge_dict(
config, {
'networking': {
'interfaces': {
'management': {
'mac': self.machine.mac
}
config.update({'hostid': self.id, 'hostname': self.hostname})
if self.cluster:
config.update({'clusterid': self.cluster.id,
'clustername': self.cluster.name})
if self.machine:
util.merge_dict(
config, {
'networking': {
'interfaces': {
'management': {
'mac': self.machine.mac
}
}
})
except Exception as error:
logging.error('failed to load config %s: %s',
self.hostname, self.config_data)
logging.exception(error)
},
'switch_port': self.machine.port,
'vlan': self.machine.vlan,
})
if self.machine.switch:
util.merge_dict(
config, {'switch_ip': self.machine.switch.ip})
except Exception as error:
logging.error('failed to load config %s: %s',
self.hostname, self.config_data)
logging.exception(error)
raise error
return config
@config.setter
@ -505,6 +527,7 @@ class ClusterHost(BASE):
logging.error('failed to dump config %s: %s',
self.hostname, value)
logging.exception(error)
raise error
class LogProgressingHistory(BASE):
@ -561,6 +584,8 @@ class Adapter(BASE):
name = Column(String, unique=True)
os = Column(String)
target_system = Column(String)
__table_args__ = (
UniqueConstraint('os', 'target_system', name='unique_adapter'),)
def __init__(self, **kwargs):
super(Adapter, self).__init__(**kwargs)

View File

@ -40,14 +40,16 @@ def pollswitch(ip_addr, req_obj='mac', oper="SCAN"):
@celery.task(name="compass.tasks.trigger_install")
def triggerinstall(clusterid):
def triggerinstall(clusterid, hostids=[]):
"""Deploy the given cluster.
:param clusterid: the id of the cluster to deploy.
:type clusterid: int
:param hostids: the ids of the hosts to deploy.
:type hostids: list of int
"""
with database.session():
trigger_install.trigger_install(clusterid)
trigger_install.trigger_install(clusterid, hostids)
@celery.task(name="compass.tasks.progress_update")

View File

@ -705,6 +705,8 @@ class ClusterHostAPITest(ApiTestCase):
expected_config['clustername'] = 'cluster_01'
expected_config['networking']['interfaces']['management']['mac'] \
= "00:27:88:0c:01"
expected_config['switch_port'] = ''
expected_config['vlan'] = 0
self.assertDictEqual(config, expected_config)
def test_clusterHost_put_config(self):
@ -1107,7 +1109,10 @@ class TestAPIWorkFlow(ApiTestCase):
excepted["clustername"] = "cluster_01"
excepted["hostid"] = host_info["id"]
excepted["networking"]["interfaces"]["management"]["mac"] = mac
excepted['switch_port'] = machine.port
excepted['vlan'] = machine.vlan
switch = machine.switch
excepted['switch_ip'] = switch.ip
host = session.query(ClusterHost)\
.filter_by(id=host_info["id"]).first()
self.maxDiff = None

View File

@ -19,10 +19,11 @@ class Dummy2Installer(os_installer.Installer):
class TestInstallerFunctions(unittest2.TestCase):
def setUp(self):
self.installers_backup = os_installer.INSTALLERS
os_installer.INSTALLERS = {}
def tearDown(self):
os_installer.INSTALLERS = {}
os_installer.INSTALLERS = self.installers_backup
def test_found_installer(self):
os_installer.register(DummyInstaller)

View File

@ -19,10 +19,11 @@ class Dummy2Installer(package_installer.Installer):
class TestInstallerFunctions(unittest2.TestCase):
def setUp(self):
self.installers_backup = package_installer.INSTALLERS
package_installer.INSTALLERS = {}
def tearDown(self):
package_installer.INSTALLERS = {}
package_installer.INSTALLERS = self.installers_backup
def test_found_installer(self):
package_installer.register(DummyInstaller)

View File

@ -0,0 +1,62 @@
networking = {
'global': {
'default_no_proxy': ['127.0.0.1', 'localhost'],
'search_path_pattern': '%(clusterid)s.%(search_path)s %(search_path)s',
'noproxy_pattern': '%(hostname)s.%(clusterid)s,%(ip)s'
},
'interfaces': {
'management': {
'dns_pattern': '%(hostname)s.%(clusterid)s.%(search_path)s',
'netmask': '255.255.255.0',
'nic': 'eth0',
'promisc': 0,
},
'tenant': {
'netmask': '255.255.255.0',
'nic': 'eth0',
'dns_pattern': 'virtual-%(hostname)s.%(clusterid)s.%(search_path)s',
'promisc': 0,
},
'public': {
'netmask': '255.255.255.0',
'nic': 'eth1',
'dns_pattern': 'floating-%(hostname)s.%(clusterid)s.%(search_path)s',
'promisc': 1,
},
'storage': {
'netmask': '255.255.255.0',
'nic': 'eth0',
'dns_pattern': 'storage-%(hostname)s.%(clusterid)s.%(search_path)s',
'promisc': 0,
},
},
}
security = {
'server_credentials': {
'username': 'root',
'password': 'huawei',
},
'console_credentials': {
'username': 'admin',
'password': 'huawei',
},
'service_credentials': {
'username': 'admin',
'password': 'huawei',
},
}
role_assign_policy = {
'policy_by_host_numbers': {
},
'default': {
'roles': [],
'maxs': {},
'mins': {},
'default_max': -1,
'default_min': 0,
'exclusives': [],
'bundles': [],
},
}

View File

@ -0,0 +1,19 @@
PROVIDER_NAME = 'mix'
GLOBAL_CONFIG_PROVIDER = 'file'
CLUSTER_CONFIG_PROVIDER = 'db'
HOST_CONFIG_PROVIDER = 'db'
GLOBAL_CONFIG_FILENAME = 'global_config'
CONFIG_FILE_FORMAT = 'python'
SQLALCHEMY_DATABASE_URI = 'sqlite://'
OS_INSTALLER = 'cobbler'
COBBLER_INSTALLER_URL = 'http://localhost/cobbler_api'
COBBLER_INSTALLER_TOKEN = ['cobbler', 'cobbler']
PACKAGE_INSTALLER = 'chef'
CHEF_INSTALLER_URL = 'https://localhost/'
CHEF_GLOBAL_DATABAG_NAME = 'env_default'
INSTALLATION_LOGDIR = '/var/log/cobbler/anamon'
DEFAULT_LOGLEVEL = 'info'
DEFAULT_LOGDIR = ''
DEFAULT_LOGINTERVAL = 1
DEFAULT_LOGINTERVAL_UNIT = 'h'
DEFAULT_LOGFORMAT = '%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s'

View File

@ -0,0 +1,248 @@
ADAPTERS = [
{'name': 'CentOS_openstack', 'os': 'CentOS', 'target_system': 'openstack'},
]
ROLES = [
{'name': 'os-single-controller', 'target_system': 'openstack'},
{'name': 'os-network', 'target_system': 'openstack'},
{'name': 'os-compute', 'target_system': 'openstack'},
]
SWITCHES = [
{'ip': '1.2.3.4', 'vendor': 'huawei', 'credential': {'version': 'v2c', 'community': 'public'}},
]
MACHINES_BY_SWITCH = {
'1.2.3.4': [
{'mac': '00:00:01:02:03:04', 'port': 1, 'vlan': 1},
],
}
CLUSTERS = [
{
'name': 'cluster1',
'adapter': 'CentOS_openstack',
'mutable': False,
'security': {
'server_credentials': {
'username': 'root', 'password': 'huawei'
},
'service_credentials': {
'username': 'service', 'password': 'huawei'
},
'console_credentials': {
'username': 'admin', 'password': 'huawei'
}
},
'networking': {
'interfaces': {
'management': {
'nic': 'eth0',
'promisc': 0,
'netmask': '255.255.255.0',
'ip_end': '192.168.20.200',
'gateway': '',
'ip_start': '192.168.20.100'
},
'storage': {
'nic': 'eth0',
'promisc': 0,
'netmask': '255.255.254.0',
'ip_end': '10.145.88.200',
'gateway': '10.145.88.1',
'ip_start': '10.145.88.100'
},
'public': {
'nic': 'eth2',
'promisc': 1,
'netmask': '255.255.254.0',
'ip_end': '10.145.88.255',
'gateway': '10.145.88.1',
'ip_start': '10.145.88.100'
},
'tenant': {
'nic': 'eth0',
'promisc': 0,
'netmask': '255.255.254.0',
'ip_end': '10.145.88.120',
'gateway': '10.145.88.1',
'ip_start': '10.145.88.100'
}
},
'global': {
'nameservers': '192.168.20.254',
'proxy': 'http://192.168.20.254:3128',
'ntp_server': '192.168.20.254',
'search_path': 'ods.com',
'gateway': '10.145.88.1'
},
},
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
},
]
HOSTS_BY_CLUSTER = {
'cluster1': [
{
'hostname': 'host1',
'mac': '00:00:01:02:03:04',
'mutable': False,
'config': {
'networking': {
'interfaces': {
'management': {
'ip': '192.168.20.100',
},
},
},
'roles': ["os-single-controller", "os-network", "os-compute"],
},
},
],
}
cobbler_MOCK = {
'host_configs': []
}
chef_MOCK = {
'configs': {
'env_default': {
'all_roles': {
'os-single-controller': 'openstack controller node',
'os-network': 'openstack network node',
'os-compute-worker': 'openstack nova node'
},
'dashboard_roles': ['os-single-controller'],
'role_assign_policy': {
'default':{
'bundles': [],
'exclusives': ['os-single-controller', 'os-network'],
'roles': ['os-single-controller', 'os-compute-worker', 'os-network'],
'default_min': 1,
'default_max': 1,
'maxs': {'os-compute-worker':-1}
},
'policy_by_host_numbers':{
'1': {
'bundles': [['os-single-controller','os-compute-worker','os-network']],
'exclusives':[]
},
'2': {
'bundles': [['os-compute-worker','os-network']],
'exclusives':['os-single-controller']
},
},
},
},
},
}
cobbler_EXPECTED = {
'expected_host_configs': [{
'profile': 'CentOS',
'name_servers_search': '1.ods.com ods.com',
'name': 'host1.1',
'hostname': 'host1',
'modify_interface': {
'dnsname-eth2': 'floating-host1.1.ods.com',
'dnsname-eth0': u'host1.1.ods.com',
'ipaddress-eth2': '10.145.88.100',
'static-eth2': True,
'static-eth0': True,
'netmask-eth0': '255.255.255.0',
'ipaddress-eth0': u'192.168.20.100',
'netmask-eth2': '255.255.254.0',
'macaddress-eth0': '00:00:01:02:03:04',
'management-eth2': False,
'management-eth0': True
},
'name_servers': '192.168.20.254',
'gateway': '10.145.88.1',
'ksmeta': {
'username': u'root',
'promisc_nics': 'eth2',
'chef_url': 'https://localhost/',
'tool': 'chef',
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
'proxy': 'http://192.168.20.254:3128',
'run_list': '"role[os-single-controller]","role[os-network]","role[os-compute]"',
'ignore_proxy': '127.0.0.1,localhost,host1.1,192.168.20.100',
'ntp_server': '192.168.20.254',
'chef_client_name': 'host1_openstack_1',
'cluster_databag': 'openstack_1',
'chef_node_name': u'host1_openstack_1'
},
}],
}
chef_EXPECTED = {
'expected_configs': {
'openstack_1': {
'credential': {
'identity': {
'users': {
'compute': {'username': 'service', 'password': 'huawei'},
'network': {'username': 'service', 'password': 'huawei'},
'admin': {'username': 'admin', 'password': 'huawei'},
'image': {'username': 'service', 'password': 'huawei'},
'metering': {'username': 'service', 'password': 'huawei'},
'volume': {'username': 'service', 'password': 'huawei'},
'object-store': {'username': 'service', 'password': 'huawei'}
}
},
'mysql': {
'compute': {'username': 'service', 'password': 'huawei'},
'network': {'username': 'service', 'password': 'huawei'},
'image': {'username': 'service', 'password': 'huawei'},
'metering': {'username': 'service', 'password': 'huawei'},
'volume': {'username': 'service', 'password': 'huawei'},
'dashboard': {'username': 'service', 'password': 'huawei'},
'super': {'username': 'service', 'password': 'huawei'},
'identity': {'username': 'service', 'password': 'huawei'}
}
},
'networking': {
'control': {'interface': 'eth0'},
'storage': {'interface': 'eth0'},
'public': {'interface': 'eth2'},
'tenant': {'interface': 'eth0'}
},
'ntp': {'ntpserver': '192.168.20.254'},
'db': {
'mysql': {
'bind_address': '192.168.20.100'
}
},
'dashboard_roles': ['os-single-controller'],
'mq': {
'rabbitmq': {'bind_address': '192.168.20.100'}
},
'endpoints': {
'compute': {
'novnc': {'host': '192.168.20.100'},
'xvpvnc': {'host': '192.168.20.100'},
'service': {'host': '192.168.20.100'},
'metadata': {'host': '192.168.20.100'}
},
'network': {
'service': {'host': '192.168.20.100'}
},
'image': {
'registry': {'host': '192.168.20.100'},
'service': {'host': '192.168.20.100'}
},
'metering': {
'service': {'host': '192.168.20.100'}
},
'volume': {
'service': {'host': '192.168.20.100'}
},
'ec2': {
'admin': {'host': '192.168.20.100'},
'service': {'host': '192.168.20.100'}
},
'identity': {
'admin': {'host': u'192.168.20.100'},
'service': {'host': u'192.168.20.100'}
},
},
},
},
}

View File

@ -0,0 +1,298 @@
ADAPTERS = [
{'name': 'CentOS_openstack', 'os': 'CentOS', 'target_system': 'openstack'},
]
ROLES = [
{'name': 'os-single-controller', 'target_system': 'openstack'},
{'name': 'os-network', 'target_system': 'openstack'},
{'name': 'os-compute', 'target_system': 'openstack'},
]
SWITCHES = [
{'ip': '1.2.3.4', 'vendor': 'huawei', 'credential': {'version': 'v2c', 'community': 'public'}},
]
MACHINES_BY_SWITCH = {
'1.2.3.4': [
{'mac': '00:00:01:02:03:04', 'port': 1, 'vlan': 1},
{'mac': '00:00:01:02:03:05', 'port': 2, 'vlan': 2},
],
}
CLUSTERS = [
{
'name': 'cluster1',
'adapter': 'CentOS_openstack',
'mutable': False,
'security': {
'server_credentials': {
'username': 'root', 'password': 'huawei'
},
'service_credentials': {
'username': 'service', 'password': 'huawei'
},
'console_credentials': {
'username': 'admin', 'password': 'huawei'
}
},
'networking': {
'interfaces': {
'management': {
'nic': 'eth0',
'promisc': 0,
'netmask': '255.255.255.0',
'ip_end': '192.168.20.200',
'gateway': '',
'ip_start': '192.168.20.100'
},
'storage': {
'nic': 'eth0',
'promisc': 0,
'netmask': '255.255.254.0',
'ip_end': '10.145.88.200',
'gateway': '10.145.88.1',
'ip_start': '10.145.88.100'
},
'public': {
'nic': 'eth2',
'promisc': 1,
'netmask': '255.255.254.0',
'ip_end': '10.145.88.255',
'gateway': '10.145.88.1',
'ip_start': '10.145.88.100'
},
'tenant': {
'nic': 'eth0',
'promisc': 0,
'netmask': '255.255.254.0',
'ip_end': '10.145.88.120',
'gateway': '10.145.88.1',
'ip_start': '10.145.88.100'
}
},
'global': {
'nameservers': '192.168.20.254',
'proxy': 'http://192.168.20.254:3128',
'ntp_server': '192.168.20.254',
'search_path': 'ods.com',
'gateway': '10.145.88.1'
},
},
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
},
]
HOSTS_BY_CLUSTER = {
'cluster1': [
{
'hostname': 'host1',
'mac': '00:00:01:02:03:04',
'mutable': False,
'config': {
'networking': {
'interfaces': {
'management': {
'ip': '192.168.20.100',
},
},
},
'roles': ["os-single-controller", "os-network"],
},
},{
'hostname': 'host2',
'mac': '00:00:01:02:03:05',
'mutable': False,
'config': {
'networking': {
'interfaces': {
'management': {
'ip': '192.168.20.101',
},
},
},
'roles': ["os-compute"],
},
},
],
}
cobbler_MOCK = {
'host_configs': []
}
chef_MOCK = {
'configs': {
'env_default': {
'all_roles': {
'os-single-controller': 'openstack controller node',
'os-network': 'openstack network node',
'os-compute-worker': 'openstack nova node'
},
'dashboard_roles': ['os-single-controller'],
'role_assign_policy': {
'default':{
'bundles': [],
'exclusives': ['os-single-controller', 'os-network'],
'roles': ['os-single-controller', 'os-compute-worker', 'os-network'],
'default_min': 1,
'default_max': 1,
'maxs': {'os-compute-worker':-1}
},
'policy_by_host_numbers':{
'1': {
'bundles': [['os-single-controller','os-compute-worker','os-network']],
'exclusives':[]
},
'2': {
'bundles': [['os-compute-worker','os-network']],
'exclusives':['os-single-controller']
},
},
},
},
},
}
cobbler_EXPECTED = {
'expected_host_configs': [{
'profile': 'CentOS',
'name_servers_search': '1.ods.com ods.com',
'name': 'host1.1',
'hostname': 'host1',
'modify_interface': {
'dnsname-eth2': 'floating-host1.1.ods.com',
'dnsname-eth0': u'host1.1.ods.com',
'ipaddress-eth2': '10.145.88.100',
'static-eth2': True,
'static-eth0': True,
'netmask-eth0': '255.255.255.0',
'ipaddress-eth0': u'192.168.20.100',
'netmask-eth2': '255.255.254.0',
'macaddress-eth0': '00:00:01:02:03:04',
'management-eth2': False,
'management-eth0': True
},
'name_servers': '192.168.20.254',
'gateway': '10.145.88.1',
'ksmeta': {
'username': 'root',
'promisc_nics': 'eth2',
'chef_url': 'https://localhost/',
'tool': 'chef',
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
'proxy': 'http://192.168.20.254:3128',
'run_list': '"role[os-single-controller]","role[os-network]"',
'ignore_proxy': '127.0.0.1,localhost,host1.1,192.168.20.100,host2.1,192.168.20.101',
'ntp_server': '192.168.20.254',
'chef_client_name': 'host1_openstack_1',
'cluster_databag': 'openstack_1',
'chef_node_name': 'host1_openstack_1'
},
},{
'profile': 'CentOS',
'name_servers_search': '1.ods.com ods.com',
'name': 'host2.1',
'hostname': 'host2',
'modify_interface': {
'dnsname-eth2': 'floating-host2.1.ods.com',
'dnsname-eth0': 'host2.1.ods.com',
'ipaddress-eth2': '10.145.88.101',
'static-eth2': True,
'static-eth0': True,
'netmask-eth0': '255.255.255.0',
'ipaddress-eth0': u'192.168.20.101',
'netmask-eth2': '255.255.254.0',
'macaddress-eth0': '00:00:01:02:03:05',
'management-eth2': False,
'management-eth0': True
},
'name_servers': '192.168.20.254',
'gateway': '10.145.88.1',
'ksmeta': {
'username': u'root',
'promisc_nics': 'eth2',
'chef_url': 'https://localhost/',
'tool': 'chef',
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
'proxy': 'http://192.168.20.254:3128',
'run_list': '"role[os-compute]"',
'ignore_proxy': '127.0.0.1,localhost,host1.1,192.168.20.100,host2.1,192.168.20.101',
'ntp_server': '192.168.20.254',
'chef_client_name': 'host2_openstack_1',
'cluster_databag': 'openstack_1',
'chef_node_name': 'host2_openstack_1'
},
}],
}
chef_EXPECTED = {
'expected_configs': {
'openstack_1': {
'credential': {
'identity': {
'users': {
'compute': {'username': 'service', 'password': 'huawei'},
'network': {'username': 'service', 'password': 'huawei'},
'admin': {'username': 'admin', 'password': 'huawei'},
'image': {'username': 'service', 'password': 'huawei'},
'metering': {'username': 'service', 'password': 'huawei'},
'volume': {'username': 'service', 'password': 'huawei'},
'object-store': {'username': 'service', 'password': 'huawei'}
}
},
'mysql': {
'compute': {'username': 'service', 'password': 'huawei'},
'network': {'username': 'service', 'password': 'huawei'},
'image': {'username': 'service', 'password': 'huawei'},
'metering': {'username': 'service', 'password': 'huawei'},
'volume': {'username': 'service', 'password': 'huawei'},
'dashboard': {'username': 'service', 'password': 'huawei'},
'super': {'username': 'service', 'password': 'huawei'},
'identity': {'username': 'service', 'password': 'huawei'}
}
},
'networking': {
'control': {'interface': 'eth0'},
'storage': {'interface': 'eth0'},
'public': {'interface': 'eth2'},
'tenant': {'interface': 'eth0'}
},
'ntp': {'ntpserver': '192.168.20.254'},
'db': {
'mysql': {
'bind_address': '192.168.20.100'
}
},
'dashboard_roles': ['os-single-controller'],
'mq': {
'rabbitmq': {'bind_address': '192.168.20.100'}
},
'endpoints': {
'compute': {
'novnc': {'host': '192.168.20.100'},
'xvpvnc': {'host': '192.168.20.100'},
'service': {'host': '192.168.20.100'},
'metadata': {'host': '192.168.20.100'}
},
'network': {
'service': {'host': '192.168.20.100'}
},
'image': {
'registry': {'host': '192.168.20.100'},
'service': {'host': '192.168.20.100'}
},
'metering': {
'service': {'host': '192.168.20.100'}
},
'volume': {
'service': {'host': '192.168.20.100'}
},
'ec2': {
'admin': {'host': '192.168.20.100'},
'service': {'host': '192.168.20.100'}
},
'identity': {
'admin': {'host': u'192.168.20.100'},
'service': {'host': u'192.168.20.100'}
},
},
},
},
}

View File

@ -0,0 +1,528 @@
ADAPTERS = [
{'name': 'CentOS_openstack', 'os': 'CentOS', 'target_system': 'openstack'},
]
ROLES = [
{'name': 'os-single-controller', 'target_system': 'openstack'},
{'name': 'os-network', 'target_system': 'openstack'},
{'name': 'os-compute', 'target_system': 'openstack'},
]
SWITCHES = [
{'ip': '1.2.3.4', 'vendor': 'huawei', 'credential': {'version': 'v2c', 'community': 'public'}},
]
MACHINES_BY_SWITCH = {
'1.2.3.4': [
{'mac': '00:00:01:02:03:04', 'port': 1, 'vlan': 1},
{'mac': '00:00:01:02:03:05', 'port': 1, 'vlan': 1},
{'mac': '00:00:01:02:03:06', 'port': 1, 'vlan': 1},
{'mac': '00:00:01:02:03:07', 'port': 1, 'vlan': 1},
],
}
CLUSTERS = [
{
'name': 'cluster1',
'adapter': 'CentOS_openstack',
'mutable': False,
'security': {
'server_credentials': {
'username': 'root', 'password': 'huawei'
},
'service_credentials': {
'username': 'service', 'password': 'huawei'
},
'console_credentials': {
'username': 'admin', 'password': 'huawei'
}
},
'networking': {
'interfaces': {
'management': {
'nic': 'eth0',
'promisc': 0,
'netmask': '255.255.255.0',
'ip_end': '192.168.20.200',
'gateway': '',
'ip_start': '192.168.20.100'
},
'storage': {
'nic': 'eth0',
'promisc': 0,
'netmask': '255.255.254.0',
'ip_end': '10.145.88.200',
'gateway': '10.145.88.1',
'ip_start': '10.145.88.100'
},
'public': {
'nic': 'eth2',
'promisc': 1,
'netmask': '255.255.254.0',
'ip_end': '10.145.88.255',
'gateway': '10.145.88.1',
'ip_start': '10.145.88.100'
},
'tenant': {
'nic': 'eth0',
'promisc': 0,
'netmask': '255.255.254.0',
'ip_end': '10.145.88.120',
'gateway': '10.145.88.1',
'ip_start': '10.145.88.100'
}
},
'global': {
'nameservers': '192.168.20.254',
'proxy': 'http://192.168.20.254:3128',
'ntp_server': '192.168.20.254',
'search_path': 'ods.com',
'gateway': '10.145.88.1'
},
},
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
}, {
'name': 'cluster2',
'adapter': 'CentOS_openstack',
'mutable': False,
'security': {
'server_credentials': {
'username': 'root', 'password': 'huawei'
},
'service_credentials': {
'username': 'service', 'password': 'huawei'
},
'console_credentials': {
'username': 'admin', 'password': 'huawei'
}
},
'networking': {
'interfaces': {
'management': {
'nic': 'eth0',
'promisc': 0,
'netmask': '255.255.255.0',
'ip_end': '192.168.20.200',
'gateway': '',
'ip_start': '192.168.20.110'
},
'storage': {
'nic': 'eth0',
'promisc': 0,
'netmask': '255.255.254.0',
'ip_end': '10.145.88.200',
'gateway': '10.145.88.1',
'ip_start': '10.145.88.110'
},
'public': {
'nic': 'eth2',
'promisc': 1,
'netmask': '255.255.254.0',
'ip_end': '10.145.88.255',
'gateway': '10.145.88.1',
'ip_start': '10.145.88.110'
},
'tenant': {
'nic': 'eth0',
'promisc': 0,
'netmask': '255.255.254.0',
'ip_end': '10.145.88.120',
'gateway': '10.145.88.1',
'ip_start': '10.145.88.110'
}
},
'global': {
'nameservers': '192.168.20.254',
'proxy': 'http://192.168.20.254:3128',
'ntp_server': '192.168.20.254',
'search_path': 'ods.com',
'gateway': '10.145.88.1'
},
},
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
},
]
HOSTS_BY_CLUSTER = {
'cluster1': [
{
'hostname': 'host1',
'mac': '00:00:01:02:03:04',
'mutable': False,
'config': {
'networking': {
'interfaces': {
'management': {
'ip': '192.168.20.100',
},
},
},
'roles': ["os-single-controller"],
},
}, {
'hostname': 'host2',
'mac': '00:00:01:02:03:05',
'mutable': False,
'config': {
'networking': {
'interfaces': {
'management': {
'ip': '192.168.20.101',
},
},
},
'roles': ["os-network", "os-compute"],
},
},
],
'cluster2': [
{
'hostname': 'host1',
'mac': '00:00:01:02:03:06',
'mutable': False,
'config': {
'networking': {
'interfaces': {
'management': {
'ip': '192.168.20.110',
},
},
},
'roles': ["os-single-controller"],
},
}, {
'hostname': 'host2',
'mac': '00:00:01:02:03:07',
'mutable': False,
'config': {
'networking': {
'interfaces': {
'management': {
'ip': '192.168.20.111',
},
},
},
'roles': ["os-network", "os-compute"],
},
},
],
}
cobbler_MOCK = {
'host_configs': []
}
chef_MOCK = {
'configs': {
'env_default': {
'all_roles': {
'os-single-controller': 'openstack controller node',
'os-network': 'openstack network node',
'os-compute-worker': 'openstack nova node'
},
'dashboard_roles': ['os-single-controller'],
'role_assign_policy': {
'default':{
'bundles': [],
'exclusives': ['os-single-controller', 'os-network'],
'roles': ['os-single-controller', 'os-compute-worker', 'os-network'],
'default_min': 1,
'default_max': 1,
'maxs': {'os-compute-worker':-1}
},
'policy_by_host_numbers':{
'1': {
'bundles': [['os-single-controller','os-compute-worker','os-network']],
'exclusives':[]
},
'2': {
'bundles': [['os-compute-worker','os-network']],
'exclusives':['os-single-controller']
},
},
},
},
},
}
cobbler_EXPECTED = {
'expected_host_configs': [{
'profile': 'CentOS',
'name_servers_search': '1.ods.com ods.com',
'name': 'host1.1',
'hostname': 'host1',
'modify_interface': {
'dnsname-eth2': 'floating-host1.1.ods.com',
'dnsname-eth0': u'host1.1.ods.com',
'ipaddress-eth2': '10.145.88.100',
'static-eth2': True,
'static-eth0': True,
'netmask-eth0': '255.255.255.0',
'ipaddress-eth0': u'192.168.20.100',
'netmask-eth2': '255.255.254.0',
'macaddress-eth0': '00:00:01:02:03:04',
'management-eth2': False,
'management-eth0': True
},
'name_servers': '192.168.20.254',
'gateway': '10.145.88.1',
'ksmeta': {
'username': u'root',
'promisc_nics': 'eth2',
'chef_url': 'https://localhost/',
'tool': 'chef',
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
'proxy': 'http://192.168.20.254:3128',
'run_list': '"role[os-single-controller]"',
'ignore_proxy': '127.0.0.1,localhost,host1.1,192.168.20.100,host2.1,192.168.20.101',
'ntp_server': '192.168.20.254',
'chef_client_name': 'host1_openstack_1',
'cluster_databag': 'openstack_1',
'chef_node_name': u'host1_openstack_1'
},
},{
'profile': 'CentOS',
'name_servers_search': '1.ods.com ods.com',
'name': 'host2.1',
'hostname': 'host2',
'modify_interface': {
'dnsname-eth2': 'floating-host2.1.ods.com',
'dnsname-eth0': u'host2.1.ods.com',
'ipaddress-eth2': '10.145.88.101',
'static-eth2': True,
'static-eth0': True,
'netmask-eth0': '255.255.255.0',
'ipaddress-eth0': u'192.168.20.101',
'netmask-eth2': '255.255.254.0',
'macaddress-eth0': '00:00:01:02:03:05',
'management-eth2': False,
'management-eth0': True
},
'name_servers': '192.168.20.254',
'gateway': '10.145.88.1',
'ksmeta': {
'username': u'root',
'promisc_nics': 'eth2',
'chef_url': 'https://localhost/',
'tool': 'chef',
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
'proxy': 'http://192.168.20.254:3128',
'run_list': '"role[os-network]","role[os-compute]"',
'ignore_proxy': '127.0.0.1,localhost,host1.1,192.168.20.100,host2.1,192.168.20.101',
'ntp_server': '192.168.20.254',
'chef_client_name': 'host2_openstack_1',
'cluster_databag': 'openstack_1',
'chef_node_name': u'host2_openstack_1'
},
},{
'profile': 'CentOS',
'name_servers_search': '2.ods.com ods.com',
'name': 'host1.2',
'hostname': 'host1',
'modify_interface': {
'dnsname-eth2': 'floating-host1.2.ods.com',
'dnsname-eth0': u'host1.2.ods.com',
'ipaddress-eth2': '10.145.88.110',
'static-eth2': True,
'static-eth0': True,
'netmask-eth0': '255.255.255.0',
'ipaddress-eth0': u'192.168.20.110',
'netmask-eth2': '255.255.254.0',
'macaddress-eth0': '00:00:01:02:03:06',
'management-eth2': False,
'management-eth0': True
},
'name_servers': '192.168.20.254',
'gateway': '10.145.88.1',
'ksmeta': {
'username': u'root',
'promisc_nics': 'eth2',
'chef_url': 'https://localhost/',
'tool': 'chef',
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
'proxy': 'http://192.168.20.254:3128',
'run_list': '"role[os-single-controller]"',
'ignore_proxy': '127.0.0.1,localhost,host1.2,192.168.20.110,host2.2,192.168.20.111',
'ntp_server': '192.168.20.254',
'chef_client_name': 'host1_openstack_2',
'cluster_databag': 'openstack_2',
'chef_node_name': u'host1_openstack_2'
},
},{
'profile': 'CentOS',
'name_servers_search': '2.ods.com ods.com',
'name': 'host2.2',
'hostname': 'host2',
'modify_interface': {
'dnsname-eth2': 'floating-host2.2.ods.com',
'dnsname-eth0': u'host2.2.ods.com',
'ipaddress-eth2': '10.145.88.111',
'static-eth2': True,
'static-eth0': True,
'netmask-eth0': '255.255.255.0',
'ipaddress-eth0': u'192.168.20.111',
'netmask-eth2': '255.255.254.0',
'macaddress-eth0': '00:00:01:02:03:07',
'management-eth2': False,
'management-eth0': True
},
'name_servers': '192.168.20.254',
'gateway': '10.145.88.1',
'ksmeta': {
'username': u'root',
'promisc_nics': 'eth2',
'chef_url': 'https://localhost/',
'tool': 'chef',
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
'proxy': 'http://192.168.20.254:3128',
'run_list': '"role[os-network]","role[os-compute]"',
'ignore_proxy': '127.0.0.1,localhost,host1.2,192.168.20.110,host2.2,192.168.20.111',
'ntp_server': '192.168.20.254',
'chef_client_name': 'host2_openstack_2',
'cluster_databag': 'openstack_2',
'chef_node_name': u'host2_openstack_2'
},
}],
}
chef_EXPECTED = {
'expected_configs': {
'openstack_1': {
'credential': {
'identity': {
'users': {
'compute': {'username': 'service', 'password': 'huawei'},
'network': {'username': 'service', 'password': 'huawei'},
'admin': {'username': 'admin', 'password': 'huawei'},
'image': {'username': 'service', 'password': 'huawei'},
'metering': {'username': 'service', 'password': 'huawei'},
'volume': {'username': 'service', 'password': 'huawei'},
'object-store': {'username': 'service', 'password': 'huawei'}
}
},
'mysql': {
'compute': {'username': 'service', 'password': 'huawei'},
'network': {'username': 'service', 'password': 'huawei'},
'image': {'username': 'service', 'password': 'huawei'},
'metering': {'username': 'service', 'password': 'huawei'},
'volume': {'username': 'service', 'password': 'huawei'},
'dashboard': {'username': 'service', 'password': 'huawei'},
'super': {'username': 'service', 'password': 'huawei'},
'identity': {'username': 'service', 'password': 'huawei'}
}
},
'networking': {
'control': {'interface': 'eth0'},
'storage': {'interface': 'eth0'},
'public': {'interface': 'eth2'},
'tenant': {'interface': 'eth0'}
},
'ntp': {'ntpserver': '192.168.20.254'},
'db': {
'mysql': {
'bind_address': '192.168.20.100'
}
},
'dashboard_roles': ['os-single-controller'],
'mq': {
'rabbitmq': {'bind_address': '192.168.20.100'}
},
'endpoints': {
'compute': {
'novnc': {'host': '192.168.20.100'},
'xvpvnc': {'host': '192.168.20.100'},
'service': {'host': '192.168.20.100'},
'metadata': {'host': '192.168.20.100'}
},
'network': {
'service': {'host': '192.168.20.100'}
},
'image': {
'registry': {'host': '192.168.20.100'},
'service': {'host': '192.168.20.100'}
},
'metering': {
'service': {'host': '192.168.20.100'}
},
'volume': {
'service': {'host': '192.168.20.100'}
},
'ec2': {
'admin': {'host': '192.168.20.100'},
'service': {'host': '192.168.20.100'}
},
'identity': {
'admin': {'host': u'192.168.20.100'},
'service': {'host': u'192.168.20.100'}
},
},
},
'openstack_2': {
'credential': {
'identity': {
'users': {
'compute': {'username': 'service', 'password': 'huawei'},
'network': {'username': 'service', 'password': 'huawei'},
'admin': {'username': 'admin', 'password': 'huawei'},
'image': {'username': 'service', 'password': 'huawei'},
'metering': {'username': 'service', 'password': 'huawei'},
'volume': {'username': 'service', 'password': 'huawei'},
'object-store': {'username': 'service', 'password': 'huawei'}
}
},
'mysql': {
'compute': {'username': 'service', 'password': 'huawei'},
'network': {'username': 'service', 'password': 'huawei'},
'image': {'username': 'service', 'password': 'huawei'},
'metering': {'username': 'service', 'password': 'huawei'},
'volume': {'username': 'service', 'password': 'huawei'},
'dashboard': {'username': 'service', 'password': 'huawei'},
'super': {'username': 'service', 'password': 'huawei'},
'identity': {'username': 'service', 'password': 'huawei'}
}
},
'networking': {
'control': {'interface': 'eth0'},
'storage': {'interface': 'eth0'},
'public': {'interface': 'eth2'},
'tenant': {'interface': 'eth0'}
},
'ntp': {'ntpserver': '192.168.20.254'},
'db': {
'mysql': {
'bind_address': '192.168.20.110'
}
},
'dashboard_roles': ['os-single-controller'],
'mq': {
'rabbitmq': {'bind_address': '192.168.20.110'}
},
'endpoints': {
'compute': {
'novnc': {'host': '192.168.20.110'},
'xvpvnc': {'host': '192.168.20.110'},
'service': {'host': '192.168.20.110'},
'metadata': {'host': '192.168.20.110'}
},
'network': {
'service': {'host': '192.168.20.110'}
},
'image': {
'registry': {'host': '192.168.20.110'},
'service': {'host': '192.168.20.110'}
},
'metering': {
'service': {'host': '192.168.20.110'}
},
'volume': {
'service': {'host': '192.168.20.110'}
},
'ec2': {
'admin': {'host': '192.168.20.110'},
'service': {'host': '192.168.20.110'}
},
'identity': {
'admin': {'host': u'192.168.20.110'},
'service': {'host': u'192.168.20.110'}
},
},
},
},
}

View File

@ -0,0 +1,265 @@
import chef
import logging
import os
import os.path
import shutil
import unittest2
import xmlrpclib
from mock import Mock
os.environ['COMPASS_SETTING'] = '%s/data/setting' % os.path.dirname(os.path.abspath(__file__))
from compass.utils import setting_wrapper as setting
reload(setting)
setting.CONFIG_DIR = '%s/data' % os.path.dirname(os.path.abspath(__file__))
import compass.config_management.installers
import compass.config_management.providers
from compass.actions import trigger_install
from compass.db import database
from compass.db.model import Switch, Machine, Cluster, ClusterHost, Adapter, Role
from compass.utils import flags
from compass.utils import logsetting
class TestEndToEnd(unittest2.TestCase):
def _contains(self, origin_config, expected_config):
if isinstance(expected_config, dict):
for key, value in expected_config.items():
if not isinstance(origin_config, dict):
logging.error('%s type is not dict',
origin_config)
return False
if key not in origin_config:
logging.error('%s is not in config:\n%s',
key, origin_config.keys())
return False
if not self._contains(origin_config[key], value):
logging.error('%s is not match:\n%s\nvs\n%s',
key, origin_config[key], value)
return False
return True
elif callable(expected_config):
return expected_config(origin_config)
else:
return expected_config == origin_config
def _mock_cobbler(self, host_configs):
mock_server = Mock()
xmlrpclib.Server = mock_server
mock_server.return_value.login.return_value = ''
mock_server.return_value.sync = Mock()
mock_server.return_value.find_profile = Mock(
side_effect=lambda x: [x['name']])
def _get_system_handle(sys_name, token):
for i, config in enumerate(host_configs):
if config['name'] == sys_name:
return i
raise Exception('Not Found %s' % sys_name)
mock_server.return_value.get_system_handle = Mock(
side_effect=_get_system_handle)
def _new_system(token):
host_configs.append({'name': ''})
return len(host_configs) - 1
mock_server.return_value.new_system = Mock(
side_effect=_new_system)
def _remove_system(sys_name, token):
for i, config in host_configs:
if config['name'] == sys_name:
del host_configs[i]
return
raise Exception('Not Found %s' % sys_name)
mock_server.return_value.remove_system = Mock(
side_effect=_remove_system)
mock_server.return_value.save_system = Mock()
def _modify_system(sys_id, key, value, token):
host_configs[sys_id][key] = value
mock_server.return_value.modify_system = Mock(
side_effect=_modify_system)
def _check_cobbler(self, host_configs, expected_host_configs):
self.assertEqual(len(host_configs), len(expected_host_configs))
for i in range(len(host_configs)):
self.assertTrue(
self._contains(host_configs[i], expected_host_configs[i]))
def _mock_chef(self, configs):
chef.autoconfigure = Mock()
chef.DataBag = Mock()
import collections
class _mockDict(collections.Mapping):
def __init__(in_self, bag, bag_item_name, api):
in_self.bag_item_name_ = bag_item_name
in_self.config_ = configs.get(bag_item_name, {})
def __len__(in_self):
return len(in_self.config_)
def __iter__(in_self):
return iter(in_self.config_)
def __getitem__(in_self, name):
return in_self.config_[name]
def __setitem__(in_self, name, value):
in_self.config_[name] = value
def delete(in_self):
del configs[in_self.bag_item_name_]
def save(in_self):
configs[in_self.bag_item_name_] = in_self.config_
chef.DataBagItem = Mock(side_effect=_mockDict)
chef.Client = Mock()
chef.Client.return_value.delete = Mock()
chef.Node = Mock()
chef.Node.return_value.delete = Mock()
def _check_chef(self, configs, expected_configs):
self.assertTrue(self._contains(configs, expected_configs))
def _mock_os_installer(self, config_locals):
self.os_installer_mock_[setting.OS_INSTALLER](
**config_locals['%s_MOCK' % setting.OS_INSTALLER])
def _mock_package_installer(self, config_locals):
self.package_installer_mock_[setting.PACKAGE_INSTALLER](
**config_locals['%s_MOCK' % setting.PACKAGE_INSTALLER])
def _check_os_installer(self, config_locals):
mock_kwargs = config_locals['%s_MOCK' % setting.OS_INSTALLER]
expected_kwargs = config_locals['%s_EXPECTED' % setting.OS_INSTALLER]
kwargs = {}
kwargs.update(mock_kwargs)
kwargs.update(expected_kwargs)
self.os_installer_checker_[setting.OS_INSTALLER](**kwargs)
def _check_package_installer(self, config_locals):
mock_kwargs = config_locals['%s_MOCK' % setting.PACKAGE_INSTALLER]
expected_kwargs = config_locals['%s_EXPECTED' % setting.PACKAGE_INSTALLER]
kwargs = {}
kwargs.update(mock_kwargs)
kwargs.update(expected_kwargs)
self.package_installer_checker_[setting.PACKAGE_INSTALLER](**kwargs)
def _test(self, config_filename):
full_path = '%s/data/%s' % (
os.path.dirname(os.path.abspath(__file__)),
config_filename)
config_globals = {}
config_locals = {}
execfile(full_path, config_globals, config_locals)
self._prepare_database(config_locals)
self._mock_os_installer(config_locals)
self._mock_package_installer(config_locals)
with database.session() as session:
clusters = session.query(Cluster).all()
for cluster in clusters:
clusterid = cluster.id
hostids = [host.id for host in cluster.hosts]
trigger_install.trigger_install(clusterid, hostids)
self._check_os_installer(config_locals)
self._check_package_installer(config_locals)
def _prepare_database(self, config_locals):
with database.session() as session:
adapters = {}
for adapter_config in config_locals['ADAPTERS']:
adapter = Adapter(**adapter_config)
session.add(adapter)
adapters[adapter_config['name']] = adapter
roles = {}
for role_config in config_locals['ROLES']:
role = Role(**role_config)
session.add(role)
roles[role_config['name']] = role
switches = {}
for switch_config in config_locals['SWITCHES']:
switch = Switch(**switch_config)
session.add(switch)
switches[switch_config['ip']] = switch
machines = {}
for switch_ip, machine_configs in config_locals['MACHINES_BY_SWITCH'].items():
for machine_config in machine_configs:
machine = Machine(**machine_config)
machines[machine_config['mac']] = machine
machine.switch = switches[switch_ip]
session.add(machine)
clusters = {}
for cluster_config in config_locals['CLUSTERS']:
adapter_name = cluster_config['adapter']
del cluster_config['adapter']
cluster = Cluster(**cluster_config)
clusters[cluster_config['name']] = cluster
cluster.adapter = adapters[adapter_name]
session.add(cluster)
hosts = {}
for cluster_name, host_configs in config_locals['HOSTS_BY_CLUSTER'].items():
for host_config in host_configs:
mac = host_config['mac']
del host_config['mac']
host = ClusterHost(**host_config)
hosts['%s.%s' % (host_config['hostname'], cluster_name)] = host
host.machine = machines[mac]
host.cluster = clusters[cluster_name]
session.add(host)
def setUp(self):
database.create_db()
shutil.rmtree = Mock()
os.system = Mock()
self.os_installer_mock_ = {}
self.os_installer_mock_['cobbler'] = self._mock_cobbler
self.package_installer_mock_ = {}
self.package_installer_mock_['chef'] = self._mock_chef
self.os_installer_checker_ = {}
self.os_installer_checker_['cobbler'] = self._check_cobbler
self.package_installer_checker_ = {}
self.package_installer_checker_['chef'] = self._check_chef
def tearDown(self):
database.drop_db()
def test_1(self):
self._test('test1')
def test_2(self):
self._test('test2')
def test_3(self):
self._test('test3')
if __name__ == '__main__':
flags.init()
logsetting.init()
unittest2.main()

View File

@ -26,7 +26,6 @@ class TestConfigMerger(unittest2.TestCase):
},
'global': {
'search_path': 'ods.com',
'default_no_proxy': ['127.0.0.1', 'localhost'],
},
},
'clustername': 'cluster1',
@ -73,8 +72,6 @@ class TestConfigMerger(unittest2.TestCase):
},
'global': {
'search_path': 'ods.com',
'default_no_proxy': ['127.0.0.1', 'localhost'],
'ignore_proxy': '127.0.0.1,localhost,host1,192.168.1.1,host2,192.168.1.50'
}
},
'hostname': 'host1',
@ -97,8 +94,6 @@ class TestConfigMerger(unittest2.TestCase):
},
'global': {
'search_path': 'ods.com',
'default_no_proxy': ['127.0.0.1', 'localhost'],
'ignore_proxy': '127.0.0.1,localhost,host1,192.168.1.1,host2,192.168.1.50'
}
},
'hostname': 'host2',
@ -148,15 +143,6 @@ class TestConfigMerger(unittest2.TestCase):
upper_keys=['search_path', 'clustername'],
lower_keys=['hostname'])
),
config_merger.ConfigMapping(
path_list=['/networking/global'],
from_upper_keys={'default': 'default_no_proxy',
'clusterid': '/clusterid'},
from_lower_keys={'hostnames': '/hostname',
'ips': '/networking/interfaces/management/ip'},
to_key='ignore_proxy',
value=config_merger_callbacks.assign_noproxy
)
]
merger = config_merger.ConfigMerger(mappings)
merger.merge(upper_config, lower_configs)

View File

@ -9,6 +9,7 @@ else:
SETTING = '/etc/compass/setting'
try:
print 'load setting from %s' % SETTING
execfile(SETTING, globals(), locals())
except Exception as error:
logging.exception(error)

View File

@ -1,6 +1,8 @@
networking = {
'global': {
'default_no_proxy': ['127.0.0.1', 'localhost'],
'search_path_pattern': '%(clusterid)s.%(search_path)s %(search_path)s',
'noproxy_pattern': '%(hostname)s.%(clusterid)s,%(ip)s'
},
'interfaces': {
'management': {

View File

@ -5,14 +5,14 @@ HOST_CONFIG_PROVIDER = 'db'
CONFIG_DIR = '/etc/compass'
GLOBAL_CONFIG_FILENAME = 'global_config'
CONFIG_FILE_FORMAT = 'python'
DATABASE_TYPE = 'sqlite'
DATABASE_TYPE = 'file'
DATABASE_FILE = '/opt/compass/db/app.db'
SQLALCHEMY_DATABASE_URI = DATABASE_TYPE + ':///' + DATABASE_FILE
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + DATABASE_FILE
OS_INSTALLER = 'cobbler'
COBBLER_INSTALLER_URL = 'http://192.168.1.201/cobbler_api'
COBBLER_INSTALLER_URL = 'http://localhost/cobbler_api'
COBBLER_INSTALLER_TOKEN = ['cobbler', 'cobbler']
PACKAGE_INSTALLER = 'chef'
CHEF_INSTALLER_URL = 'https://192.168.1.201'
CHEF_INSTALLER_URL = 'https://localhost'
CHEF_GLOBAL_DATABAG_NAME = 'env_default'
INSTALLATION_LOGDIR = '/var/log/cobbler/anamon'
DEFAULT_LOGLEVEL = 'debug'
@ -26,3 +26,5 @@ CELERYCONFIG_DIR = '/etc/compass'
CELERYCONFIG_FILE = 'celeryconfig'
PROGRESS_UPDATE_INTERVAL=30
POLLSWITCH_INTERVAL=60
SWITCHES = [
]