add flavor support and re-organzied var_dicts

Re-organize the first level dict value in var_dict to a new searchlist
for cheetah template.

Change-Id: I584594509abc82c11302ca6db577769ab3df8db4
This commit is contained in:
grace.yu 2014-08-27 23:11:54 -07:00 committed by Weidong Shao
parent 7ea34feb85
commit 2468fc5410
25 changed files with 674 additions and 423 deletions

View File

@ -29,21 +29,28 @@ import logging
class DeployManager(object):
"""Deploy manager module."""
def __init__(self, adapter_info, cluster_info, hosts_info):
"""Init deploy manager."""
self.os_installer = None
self.pk_installer = None
# Get OS installer
os_installer_name = adapter_info[const.OS_INSTALLER][const.NAME]
pk_installer_name = adapter_info[const.PK_INSTALLER][const.NAME]
os_hosts_info = self._get_hosts_for_os_installation(hosts_info)
self.os_installer = DeployManager._get_installer(OSInstaller,
os_installer_name,
adapter_info,
cluster_info,
os_hosts_info)
self.pk_installer = DeployManager._get_installer(PKInstaller,
pk_installer_name,
adapter_info,
cluster_info,
hosts_info)
# Get package installer
pk_info = adapter_info.setdefault(const.PK_INSTALLER, {})
if pk_info:
pk_installer_name = pk_info[const.NAME]
self.pk_installer = DeployManager._get_installer(PKInstaller,
pk_installer_name,
adapter_info,
cluster_info,
hosts_info)
@staticmethod
def _get_installer(installer_type, name, adapter_info, cluster_info,
@ -56,14 +63,15 @@ class DeployManager(object):
def clean_progress(self):
"""Clean previous installation log and progress."""
# Clean DB
# db_api.cluster.clean_progress(self.cluster_id)
# db_api.cluster.clean_progress(self.cluster_id, self.host_id_list)
self.clean_os_installtion_progress()
self.clean_package_installation_progress()
def clean_os_installtion_progress(self):
# OS installer cleans previous installing progress.
if self.os_installer:
self.os_installer.clean_progress()
def clean_package_installation_progress(self):
# Package installer cleans previous installing progress.
if self.pk_installer:
self.pk_installer.clean_progress()
@ -71,48 +79,70 @@ class DeployManager(object):
def prepare_for_deploy(self):
self.clean_progress()
def deploy(self):
"""Deploy the cluster."""
deploy_config = {}
pk_instl_confs = {}
def deploy_os(self):
"""Deploy OS to hosts which need to in the cluster. Return OS deployed
config.
"""
if not self.os_installer:
return {}
pk_installer_config = {}
if self.pk_installer:
# generate target system config which will be installed by OS
# installer right after OS installation is completed.
pk_instl_confs = self.pk_installer.generate_installer_config()
logging.debug('[DeployManager][deploy] pk_instl_confs is %s',
pk_instl_confs)
pk_installer_config = self.pk_installer.generate_installer_config()
logging.debug('[DeployManager]package installer config is %s',
pk_installer_config)
if self.os_installer:
logging.info('[DeployManager][deploy]get OS installer %s',
self.os_installer)
# Send package installer config info to OS installer.
if pk_instl_confs:
self.os_installer.set_package_installer_config(pk_instl_confs)
# Send package installer config info to OS installer.
self.os_installer.set_package_installer_config(pk_installer_config)
# start to deploy OS
os_deploy_config = self.os_installer.deploy()
deploy_config = os_deploy_config
# start to deploy OS
return self.os_installer.deploy()
if self.pk_installer:
logging.info('DeployManager][deploy]get package installer %s',
self.pk_installer)
def deploy_target_system(self):
"""Deploy target system to all hosts in the cluster. Return package
deployed config.
"""
if not self.pk_installer:
return {}
pk_deploy_config = self.pk_installer.deploy()
util.merge_dict(deploy_config, pk_deploy_config)
return self.pk_installer.deploy()
return deploy_config
def deploy(self):
"""Deploy the cluster."""
deployed_config = self.deploy_os()
package_deployed_config = self.deploy_target_system()
util.merge_dict(deployed_config, package_deployed_config)
return deployed_config
def redeploy_os(self):
"""Redeploy OS for this cluster without changing configurations."""
if not self.os_installer:
logging.info("Redeploy_os: No OS installer found!")
return
self.os_installer.redeploy()
logging.info("Start to redeploy OS for cluster.")
def redeploy_target_system(self):
"""Redeploy target system for the cluster without changing config."""
if not self.pk_installer:
logging.info("Redeploy_target_system: No installer found!")
self.os_installer.redeploy()
logging.info("Start to redeploy target system.")
def redeploy(self):
"""Redeploy the cluster without changing configurations."""
if self.os_installer:
self.os_installer.redeploy()
self.redeploy_os()
self.redeploy_target_system()
if self.pk_installer:
self.pk_installer.redeploy()
def remove_hosts(self):
"""Remove hosts from both OS and package installlers server side."""
if self.os_installer:
def remove_hosts(self, package_only=False):
"""Remove hosts from both OS and/or package installlers server side."""
if self.os_installer and not package_only:
self.os_installer.delete_hosts()
if self.pk_installer:

View File

@ -95,7 +95,8 @@ class BaseInstaller(object):
return (True, metadata[key])
temp = deepcopy(metadata)
del temp['_self']
if '_self' in temp:
del temp['_self']
meta_key = temp.keys()[0]
if meta_key.startswith("$"):
return (False, metadata[meta_key])
@ -113,8 +114,8 @@ class BaseInstaller(object):
else:
new_output = output
self._get_tmpl_vars_helper(sub_meta, config_value,
new_output)
self._get_tmpl_vars_helper(sub_meta, config_value, new_output)
elif mapping_to:
output[mapping_to] = config_value
@ -125,14 +126,22 @@ class BaseInstaller(object):
logging.debug("vars_dict is %s", vars_dict)
return {}
tmpl = Template(file=tmpl_dir, searchList=[vars_dict])
searchList = []
copy_vars_dict = deepcopy(vars_dict)
for key, value in vars_dict.iteritems():
if isinstance(value, dict):
temp = copy_vars_dict[key]
del copy_vars_dict[key]
searchList.append(temp)
searchList.append(copy_vars_dict)
tmpl = Template(file=tmpl_dir, searchList=searchList)
config = json.loads(tmpl.respond(), encoding='utf-8')
config = json.loads(json.dumps(config), encoding='utf-8')
return config
@classmethod
def get_installer(cls, name, path, adapter_info, cluster_info, hosts_info):
installer = None
try:
mod_file, path, descr = imp.find_module(name, [path])
if mod_file:
@ -145,7 +154,7 @@ class BaseInstaller(object):
logging.error('No such module found: %s', name)
logging.exception(exc)
return installer
return None
class OSInstaller(BaseInstaller):
@ -162,6 +171,10 @@ class OSInstaller(BaseInstaller):
@classmethod
def get_installer(cls, name, adapter_info, cluster_info, hosts_info):
if name is None:
logging.info("Installer name is None! No OS installer loaded!")
return None
path = os.path.join(cls.INSTALLER_BASE_DIR, name)
installer = super(OSInstaller, cls).get_installer(name, path,
adapter_info,
@ -211,6 +224,10 @@ class PKInstaller(BaseInstaller):
@classmethod
def get_installer(cls, name, adapter_info, cluster_info, hosts_info):
if name is None:
logging.info("Install name is None. No package installer loaded!")
return None
path = os.path.join(cls.INSTALLER_BASE_DIR, name)
installer = super(PKInstaller, cls).get_installer(name, path,
adapter_info,

View File

@ -40,6 +40,7 @@ class CobblerInstaller(OSInstaller):
SYS_TMPL = 'system.tmpl'
SYS_TMPL_NAME = 'system.tmpl'
PROFILE = 'profile'
POWER_TYPE = 'power_type'
POWER_ADDR = 'power_address'
POWER_USER = 'power_user'
@ -119,6 +120,7 @@ class CobblerInstaller(OSInstaller):
host_ids = self.config_manager.get_host_id_list()
if not host_ids:
# No hosts need to install OS
logging.info("Cobbler: No host needs to install OS.")
return {}
os_version = self.config_manager.get_os_version()
@ -138,18 +140,19 @@ class CobblerInstaller(OSInstaller):
self.update_host_config_to_cobbler(host_id, hostname, vars_dict)
# set host deploy config
temp = {}
temp.update(vars_dict[const.HOST])
host_config = {const.DEPLOYED_OS_CONFIG: temp}
host_config = {}
host_config[const.DEPLOYED_OS_CONFIG] = vars_dict[const.OS_CONFIG]
hosts_deploy_config[host_id] = host_config
# sync to cobbler and trigger installtion.
self._sync()
cluster_config = global_vars_dict.setdefault(const.OS_CONFIG, {})
return {
const.CLUSTER: {
const.ID: self.config_manager.get_cluster_id(),
const.DEPLOYED_OS_CONFIG: global_vars_dict[const.CLUSTER]
const.DEPLOYED_OS_CONFIG: cluster_config
},
const.HOSTS: hosts_deploy_config
}
@ -157,7 +160,7 @@ class CobblerInstaller(OSInstaller):
def clean_progress(self):
"""clean log files and config for hosts which to deploy."""
clusterhost_list = self.config_manager.get_host_id_list()
log_dir_prefix = compass_setting.INSTALLATION_LOGDIR[self.NAME]
log_dir_prefix = compass_setting.INSTALLATION_LOGDIR[NAME]
for host_id in clusterhost_list:
hostname = self.config_manager.get_hostname(host_id)
@ -166,10 +169,14 @@ class CobblerInstaller(OSInstaller):
def redeploy(self):
"""redeploy hosts."""
host_ids = self.config_manager.get_host_id_list()
if not host_ids:
logging.info("Cobbler: hostlist is None, no host is redeployed")
return
for host_id in host_ids:
hostname = self.config_manager.get_hostname(host_id)
sys_id = self._get_system_id(hostname)
sys_id = self._get_create_system(hostname)
if sys_id:
# enable netboot for this host
self._netboot_enabled(sys_id)
self._sync()
@ -195,19 +202,20 @@ class CobblerInstaller(OSInstaller):
logging.debug("Failed to sync cobbler server! Error: %s" % ex)
raise ex
def _get_system_config(self, host_id, vars_dict):
def _generate_system_config(self, host_id, host_vars_dict):
"""Generate updated system config from the template.
:param vars_dict: dict of variables for the system template to
generate system config dict.
:param host_vars_dict: dict of variables for the system template to
generate system config dict for each host.
"""
os_version = self.config_manager.get_os_version()
system_tmpl_path = os.path.join(os.path.join(self.tmpl_dir,
os_version),
self.SYS_TMPL_NAME)
system_config = self.get_config_from_template(system_tmpl_path,
vars_dict)
tmpl_path = os.path.join(
os.path.join(self.tmpl_dir, os_version), self.SYS_TMPL_NAME
)
system_config = self.get_config_from_template(tmpl_path,
host_vars_dict)
# update package config info to cobbler ksmeta
if self.pk_installer_config and host_id in self.pk_installer_config:
pk_config = self.pk_installer_config[host_id]
@ -226,7 +234,7 @@ class CobblerInstaller(OSInstaller):
profile = result[0]
return profile
def _get_system_id(self, hostname):
def _get_create_system(self, hostname):
"""get system reference id for the host."""
sys_name = hostname
sys_id = None
@ -268,11 +276,11 @@ class CobblerInstaller(OSInstaller):
log_dir = os.path.join(log_dir_prefix, system_name)
shutil.rmtree(log_dir, True)
def update_host_config_to_cobbler(self, host_id, hostname, vars_dict):
def update_host_config_to_cobbler(self, host_id, hostname, host_vars_dict):
"""update host config and upload to cobbler server."""
sys_id = self._get_system_id(hostname)
sys_id = self._get_create_system(hostname)
system_config = self._get_system_config(host_id, vars_dict)
system_config = self._generate_system_config(host_id, host_vars_dict)
logging.debug('%s system config to update: %s', host_id, system_config)
self._update_system_config(sys_id, system_config)
@ -301,36 +309,40 @@ class CobblerInstaller(OSInstaller):
vars_dict = {}
if global_vars_dict:
# Set cluster template vars_dict from cluster os_config.
vars_dict.update(deepcopy(global_vars_dict[const.CLUSTER]))
vars_dict = deepcopy(global_vars_dict)
# Set hostname, MAC address and hostname, networks, dns and so on.
host_baseinfo = self.config_manager.get_host_baseinfo(host_id)
vars_dict[const.BASEINFO] = host_baseinfo
# Set profile
if self.PROFILE in kwargs:
profile = kwargs[self.PROFILE]
else:
os_version = self.config_manager.get_os_version()
profile = self._get_profile_from_server(os_version)
vars_dict[self.PROFILE] = profile
# Set hostname, MAC address and hostname, networks, dns and so on.
host_baseinfo = self.config_manager.get_host_baseinfo(host_id)
util.merge_dict(vars_dict, host_baseinfo)
vars_dict[const.BASEINFO][self.PROFILE] = profile
os_config_metadata = self.config_manager.get_os_config_metadata()
host_os_config = self.config_manager.get_host_os_config(host_id)
metadata = self.config_manager.get_os_config_metadata()
os_config = self.config_manager.get_host_os_config(host_id)
# Get template variables values from host os_config
host_vars_dict = self.get_tmpl_vars_from_metadata(os_config_metadata,
host_os_config)
util.merge_dict(vars_dict, host_vars_dict)
return {const.HOST: vars_dict}
host_vars_dict = self.get_tmpl_vars_from_metadata(metadata, os_config)
util.merge_dict(
vars_dict.setdefault(const.OS_CONFIG, {}), host_vars_dict
)
return vars_dict
def _get_cluster_tmpl_vars_dict(self):
os_config_metadata = self.config_manager.get_os_config_metadata()
cluster_os_config = self.config_manager.get_cluster_os_config()
metadata = self.config_manager.get_os_config_metadata()
os_config = self.config_manager.get_cluster_os_config()
vars_dict = self.get_tmpl_vars_from_metadata(os_config_metadata,
cluster_os_config)
return {const.CLUSTER: vars_dict}
cluster_vas_dict = {}
cluster_vas_dict[const.OS_CONFIG] = \
self.get_tmpl_vars_from_metadata(metadata, os_config)
return cluster_vas_dict
def _check_and_set_system_impi(self, host_id, sys_id):
if not sys_id:
@ -358,7 +370,7 @@ class CobblerInstaller(OSInstaller):
def poweron(self, host_id):
hostname = self.config_manager.get_hostname(host_id)
sys_id = self._get_system_id(hostname)
sys_id = self._get_create_system(hostname)
if not self._check_and_set_system_impi(sys_id):
return
@ -367,7 +379,7 @@ class CobblerInstaller(OSInstaller):
def poweroff(self, host_id):
hostname = self.config_manager.get_hostname(host_id)
sys_id = self._get_system_id(hostname)
sys_id = self._get_create_system(hostname)
if not self._check_and_set_system_impi(sys_id):
return
@ -376,7 +388,7 @@ class CobblerInstaller(OSInstaller):
def reset(self, host_id):
hostname = self.config_manager.get_hostname(host_id)
sys_id = self._get_system_id(hostname)
sys_id = self._get_create_system(hostname)
if not self._check_and_set_system_impi(sys_id):
return

View File

@ -62,9 +62,10 @@ class ChefInstaller(PKInstaller):
@classmethod
def get_tmpl_path(cls, adapter_name):
tmpl_path = os.path.join(os.path.join(compass_setting.TMPL_DIR,
'chef_installer'),
adapter_name)
tmpl_path = os.path.join(
os.path.join(compass_setting.TMPL_DIR, 'chef_installer'),
adapter_name
)
return tmpl_path
def __repr__(self):
@ -91,7 +92,7 @@ class ChefInstaller(PKInstaller):
"""Generate environment name."""
return "-".join((dist_sys_name, cluster_name))
def get_databag(self, databag_name):
def get_create_databag(self, databag_name):
"""Get databag object from chef server. Create the databag if it
does not exist.
"""
@ -104,7 +105,7 @@ class ChefInstaller(PKInstaller):
return databag
def get_node(self, node_name, env_name):
def get_create_node(self, node_name, env_name=None):
"""Get chef node if existing, otherwise create one and set its
environment.
@ -131,8 +132,9 @@ class ChefInstaller(PKInstaller):
def delete_node(self, host_id):
fullname = self.config_manager.get_host_fullname(host_id)
node = self.get_node(fullname)
self._delete_node(node)
node = self.get_create_node(fullname)
if node:
self._delete_node(node)
def _delete_node(self, node):
"""clean node attributes about target system."""
@ -143,7 +145,7 @@ class ChefInstaller(PKInstaller):
client_name = node_name
# Clean log for this node first
log_dir_prefix = compass_setting.INSTALLATION_LOGDIR[self.NAME]
log_dir_prefix = compass_setting.INSTALLATION_LOGDIR[NAME]
self._clean_log(log_dir_prefix, node_name)
# Delete node and its client on chef server
@ -152,11 +154,10 @@ class ChefInstaller(PKInstaller):
client = chef.Client(client_name, api=self.chef_api)
client.delete()
logging.debug('delete node %s', node_name)
log_dir_prefix = compass_setting.INSTALLATION_LOGDIR[self.NAME]
self._clean_log(log_dir_prefix, node_name)
except Exception as error:
logging.debug(
'failed to delete node %s, error: %s', node_name, error)
logging.debug('failed to delete node %s, error: %s',
node_name,
error)
def _add_roles(self, node, roles):
"""Add roles to the node.
@ -179,14 +180,14 @@ class ChefInstaller(PKInstaller):
node.save()
logging.debug('Runlist for node %s is %s', node.name, node.run_list)
def _get_node_attributes(self, roles, vars_dict):
"""Get node attributes from templates according to its roles. The
def _generate_node_attributes(self, roles, host_vars_dict):
"""Generate node attributes from templates according to its roles. The
templates are named by roles without '-'. Return the dictionary
of attributes defined in the templates.
:param list roles: The roles for this node, used to load the
specific template.
:param dict vars_dict: The dictionary used in cheetah searchList to
:param dict host_vars_dict: The dict used in cheetah searchList to
render attributes from templates.
"""
if not roles:
@ -199,12 +200,13 @@ class ChefInstaller(PKInstaller):
tmpl_name = '.'.join((role, 'tmpl'))
node_tmpl = os.path.join(node_tmpl_dir, tmpl_name)
util.merge_dict(
node_attr, self.get_config_from_template(node_tmpl, vars_dict)
node_attr,
self.get_config_from_template(node_tmpl, host_vars_dict)
)
return node_attr
def update_node(self, node, roles, vars_dict):
def update_node(self, node, roles, host_vars_dict):
"""Update node attributes to chef server."""
if node is None:
raise Exception("Node is None!")
@ -217,23 +219,25 @@ class ChefInstaller(PKInstaller):
self._add_roles(node, roles)
# Update node attributes.
node_config = self._get_node_attributes(roles, vars_dict)
node_config = self._generate_node_attributes(roles, host_vars_dict)
available_attrs = ['default', 'normal', 'override']
for attr in node_config:
node.attributes[attr] = node_config[attr]
if attr in available_attrs:
# print node_config[attr]
setattr(node, attr, node_config[attr])
node.save()
def _get_env_attributes(self, vars_dict):
def _generate_env_attributes(self, global_vars_dict):
"""Get environment attributes from env templates."""
env_tmpl_fname = self.config_manager.get_cluster_flavor_template()
tmpl_name = self.config_manager.get_cluster_flavor_template()
env_tmpl_path = os.path.join(
os.path.join(self.tmpl_dir, self.ENV_TMPL_DIR), env_tmpl_fname
os.path.join(self.tmpl_dir, self.ENV_TMPL_DIR), tmpl_name
)
env_attri = self.get_config_from_template(env_tmpl_path, vars_dict)
return env_attri
return self.get_config_from_template(env_tmpl_path, global_vars_dict)
def get_environment(self, env_name):
def get_create_environment(self, env_name):
import chef
env = chef.Environment(env_name, api=self.chef_api)
env.save()
@ -245,7 +249,7 @@ class ChefInstaller(PKInstaller):
setattr(env, attr, env_attrs[attr])
env.save()
def update_environment(self, env_name, vars_dict):
def update_environment(self, env_name, global_vars_dict):
"""Generate environment attributes based on the template file and
upload it to chef server.
@ -253,17 +257,14 @@ class ChefInstaller(PKInstaller):
:param dict vars_dict: The dictionary used in cheetah searchList to
render attributes from templates.
"""
env_config = self._get_env_attributes(vars_dict)
env = self.get_environment(env_name)
env_config = self._generate_env_attributes(global_vars_dict)
env = self.get_create_environment(env_name)
self._update_env(env, env_config)
def _get_databagitem_attributes(self, tmpl_dir, vars_dict):
databagitem_attrs = self.get_config_from_template(tmpl_dir,
vars_dict)
def _generate_databagitem_attributes(self, tmpl_dir, vars_dict):
return self.get_config_from_template(tmpl_dir, vars_dict)
return databagitem_attrs
def update_databags(self, vars_dict):
def update_databags(self, global_vars_dict):
"""Update databag item attributes.
:param dict vars_dict: The dictionary used to get attributes from
@ -277,14 +278,15 @@ class ChefInstaller(PKInstaller):
databags_dir = os.path.join(self.tmpl_dir, self.DATABAG_TMPL_DIR)
for databag_name in databag_names:
databag_tmpl = os.path.join(databags_dir, databag_name)
databagitem_attrs = self._get_databagitem_attributes(databag_tmpl,
vars_dict)
databagitem_attrs = self._generate_databagitem_attributes(
databag_tmpl, global_vars_dict
)
if not databagitem_attrs:
logging.info("Databag template not found or vars_dict is None")
logging.info("databag template is %s", databag_tmpl)
continue
databag = self.get_databag(databag_name)
databag = self.get_create_databag(databag_name)
for item, item_values in databagitem_attrs.iteritems():
databagitem = chef.DataBagItem(databag, item, self.chef_api)
for key, value in item_values.iteritems():
@ -292,52 +294,75 @@ class ChefInstaller(PKInstaller):
databagitem.save()
def _get_host_tmpl_vars(self, host_id, global_vars_dict):
"""Get templates variables dictionary for cheetah searchList based
"""Generate templates variables dictionary for cheetah searchList based
on host package config.
:param int host_id: The host ID.
:param dict global_vars_dict: The vars_dict got from cluster level
package_config.
"""
vars_dict = {}
if global_vars_dict:
temp = global_vars_dict[const.CLUSTER][const.DEPLOYED_PK_CONFIG]
vars_dict[const.DEPLOYED_PK_CONFIG] = deepcopy(temp)
The output format is the same as cluster_vars_dict.
"""
host_vars_dict = {}
if global_vars_dict:
host_vars_dict = deepcopy(global_vars_dict)
# Update host basic info
host_baseinfo = self.config_manager.get_host_baseinfo(host_id)
util.merge_dict(vars_dict, host_baseinfo)
util.merge_dict(
host_vars_dict.setdefault(const.BASEINFO, {}), host_baseinfo
)
pk_config = self.config_manager.get_host_package_config(host_id)
if pk_config:
# Get host template variables and merge to vars_dict
metadata = self.config_manager.get_pk_config_meatadata()
host_dict = self.get_tmpl_vars_from_metadata(metadata, pk_config)
#util.merge_dict(vars_dict[const.DEPLOYED_PK_CONFIG], host_dict)
vars_dict[const.DEPLOYED_PK_CONFIG].update(host_dict)
meta_dict = self.get_tmpl_vars_from_metadata(metadata, pk_config)
util.merge_dict(
host_vars_dict.setdefault(const.PK_CONFIG, {}), meta_dict
)
# Set role_mapping for host
# Override role_mapping for host if host role_mapping exists
mapping = self.config_manager.get_host_roles_mapping(host_id)
vars_dict[const.DEPLOYED_PK_CONFIG][const.ROLES_MAPPING] = mapping
if mapping:
host_vars_dict[const.ROLES_MAPPING] = mapping
return {const.HOST: vars_dict}
return host_vars_dict
def _get_cluster_tmpl_vars(self):
vars_dict = {}
"""Generate template variables dict based on cluster level config.
The vars_dict will be:
{
"baseinfo": {
"id":1,
"name": "cluster01",
...
},
"package_config": {
.... //mapped from original package config based on metadata
},
"role_mapping": {
....
}
}
"""
cluster_vars_dict = {}
# set cluster basic information to vars_dict
cluster_baseinfo = self.config_manager.get_cluster_baseinfo()
util.merge_dict(vars_dict, cluster_baseinfo)
cluster_vars_dict[const.BASEINFO] = cluster_baseinfo
# get and set template variables from cluster package config.
pk_metadata = self.config_manager.get_pk_config_meatadata()
pk_config = self.config_manager.get_cluster_package_config()
meta_dict = self.get_tmpl_vars_from_metadata(pk_metadata, pk_config)
vars_dict[const.DEPLOYED_PK_CONFIG] = meta_dict
cluster_vars_dict[const.PK_CONFIG] = meta_dict
# get and set roles_mapping to vars_dict
mapping = self.config_manager.get_cluster_roles_mapping()
vars_dict[const.DEPLOYED_PK_CONFIG][const.ROLES_MAPPING] = mapping
cluster_vars_dict[const.ROLES_MAPPING] = mapping
return {const.CLUSTER: vars_dict}
return cluster_vars_dict
def deploy(self):
"""Start to deploy a distributed system. Return both cluster and hosts
@ -359,6 +384,10 @@ class ChefInstaller(PKInstaller):
}
}
"""
host_list = self.config_manager.get_host_id_list()
if not host_list:
return {}
adapter_name = self.config_manager.get_adapter_name()
cluster_name = self.config_manager.get_clustername()
env_name = self.get_env_name(adapter_name, cluster_name)
@ -371,34 +400,41 @@ class ChefInstaller(PKInstaller):
# Update Databag item
self.update_databags(global_vars_dict)
host_list = self.config_manager.get_host_id_list()
hosts_deployed_configs = {}
for host_id in host_list:
node_name = self.config_manager.get_host_fullname(host_id)
roles = self.config_manager.get_host_roles(host_id)
node = self.get_node(node_name, env_name)
node = self.get_create_node(node_name, env_name)
vars_dict = self._get_host_tmpl_vars(host_id, global_vars_dict)
self.update_node(node, roles, vars_dict)
# set each host deployed config
tmp = {}
host_config = {}
tmp.update(vars_dict[const.HOST][const.DEPLOYED_PK_CONFIG])
temp = vars_dict.setdefault(const.PK_CONFIG, {})
temp[const.ROLES_MAPPING] = vars_dict.setdefault(
const.ROLES_MAPPING, {}
)
host_config = {
host_id: {const.DEPLOYED_PK_CONFIG: tmp}
host_id: {const.DEPLOYED_PK_CONFIG: temp}
}
hosts_deployed_configs.update(host_config)
# set cluster deployed config
cl_config = {}
cl_config.update(global_vars_dict)
cluster_config = {}
cluster_config = global_vars_dict.setdefault(const.PK_CONFIG, {})
cluster_config[const.ROLES_MAPPING] = global_vars_dict.setdefault(
const.ROLES_MAPPING, {}
)
output = {}
output.update(cl_config)
output.update({const.HOSTS: hosts_deployed_configs})
return output
return {
const.CLUSTER: {
const.ID: self.config_manager.get_cluster_id(),
const.DEPLOYED_PK_CONFIG: cluster_config
},
const.HOSTS: hosts_deployed_configs
}
def generate_installer_config(self):
"""Render chef config file (client.rb) by OS installing right after

View File

@ -19,6 +19,7 @@ __author__ = "Grace Yu (grace.yu@huawei.com)"
# General keywords
BASEINFO = 'baseinfo'
CLUSTER = 'cluster'
HOST = 'host'
HOSTS = 'hosts'

View File

@ -42,7 +42,7 @@ class TestCobblerInstaller(unittest2.TestCase):
super(TestCobblerInstaller, self).setUp()
self.test_cobbler = self._get_cobbler_installer()
self.expected_host_vars_dict = {
"host": {
"baseinfo": {
"mac": "00:0c:29:3e:60:e9",
"name": "server01.test",
"profile": "Ubuntu-12.04-x86_64",
@ -64,7 +64,9 @@ class TestCobblerInstaller(unittest2.TestCase):
"is_promiscuous": False,
"subnet": "172.16.1.0/24"
}
},
}
},
"os_config": {
"partition": {
"/home": {
"vol_size": 50,
@ -171,7 +173,7 @@ class TestCobblerInstaller(unittest2.TestCase):
}
host_id = 1
self.test_cobbler.set_package_installer_config(package_config)
output = self.test_cobbler._get_system_config(
output = self.test_cobbler._generate_system_config(
host_id, self.expected_host_vars_dict)
self.maxDiff = None
self.assertEqual(expected_system_config, output)
@ -214,28 +216,6 @@ class TestCobblerInstaller(unittest2.TestCase):
"hosts": {
1: {
"deployed_os_config": {
"mac": "00:0c:29:3e:60:e9",
"name": "server01.test",
"hostname": "server01",
"profile": "Ubuntu-12.04-x86_64",
"reinstall_os": True,
"dns": "server01.test.ods.com",
"networks": {
"vnet0": {
"ip": "12.234.32.100",
"netmask": "255.255.255.0",
"is_mgmt": True,
"is_promiscuous": False,
"subnet": "12.234.32.0/24"
},
"vnet1": {
"ip": "172.16.1.1",
"netmask": "255.255.255.0",
"is_mgmt": False,
"is_promiscuous": False,
"subnet": "172.16.1.0/24"
}
},
"language": "EN",
"timezone": "UTC",
"gateway": "10.145.88.1",
@ -266,28 +246,6 @@ class TestCobblerInstaller(unittest2.TestCase):
},
2: {
"deployed_os_config": {
"mac": "00:0c:29:3e:60:a1",
"name": "server02.test",
"hostname": "server02",
"profile": "Ubuntu-12.04-x86_64",
"reinstall_os": True,
"dns": "server02.test.ods.com",
"networks": {
"eth0": {
"ip": "12.234.32.101",
"netmask": "255.255.255.0",
"is_mgmt": True,
"is_promiscuous": False,
"subnet": "12.234.32.0/24"
},
"eth1": {
"ip": "172.16.1.2",
"netmask": "255.255.255.0",
"is_mgmt": False,
"is_promiscuous": False,
"subnet": "172.16.1.0/24"
}
},
"language": "EN",
"timezone": "UTC",
"gateway": "12.234.32.1",

View File

@ -55,9 +55,10 @@ class TestChefInstaller(unittest2.TestCase):
hosts_info)
ChefInstaller.get_tmpl_path = Mock()
test_tmpl_dir = os.path.join(os.path.join(config_data.test_tmpl_dir,
'chef_installer'),
'openstack_icehouse')
test_tmpl_dir = os.path.join(
os.path.join(config_data.test_tmpl_dir, 'chef_installer'),
'openstack_icehouse'
)
ChefInstaller.get_tmpl_path.return_value = test_tmpl_dir
ChefInstaller._get_chef_api = Mock()
@ -82,7 +83,8 @@ class TestChefInstaller(unittest2.TestCase):
}
}
}
output = self.test_chef._get_node_attributes(['os-compute'], vars_dict)
output = self.test_chef._generate_node_attributes(['os-compute'],
vars_dict)
self.maxDiff = None
self.assertDictEqual(expected_node_attr, output)
@ -131,29 +133,27 @@ class TestChefInstaller(unittest2.TestCase):
}
}
vars_dict = self.test_chef._get_cluster_tmpl_vars()
output = self.test_chef._get_env_attributes(vars_dict)
output = self.test_chef._generate_env_attributes(vars_dict)
self.maxDiff = None
self.assertDictEqual(expected_env, output)
def test_get_databagitem_attributes(self):
vars_dict = {
"cluster": {
"deployed_package_config": {
"service_credentials": {
"nova": {
"username": "nova",
"password": "compute"
}
"package_config": {
"service_credentials": {
"nova": {
"username": "nova",
"password": "compute"
}
},
"users_credentials": {
"ksadmin": {
"username": "ksadmin",
"password": "ksadmin"
},
"users_credentials": {
"ksadmin": {
"username": "ksadmin",
"password": "ksadmin"
},
"demo": {
"username": "demo",
"password": "demo"
}
"demo": {
"username": "demo",
"password": "demo"
}
}
}
@ -186,8 +186,8 @@ class TestChefInstaller(unittest2.TestCase):
databags = self.test_chef.get_chef_databag_names()
for bag in databags:
tmpl_path = os.path.join(databag_dir, '.'.join((bag, 'tmpl')))
output = self.test_chef._get_databagitem_attributes(tmpl_path,
vars_dict)
output = self.test_chef._generate_databagitem_attributes(tmpl_path,
vars_dict)
self.maxDiff = None
self.assertDictEqual(expected_output[bag], output)
@ -205,8 +205,6 @@ class TestChefInstaller(unittest2.TestCase):
expected_output = {
"cluster": {
"id": 1,
"name": "test",
"os_name": "Ubuntu-12.04-x86_64",
"deployed_package_config": {
"service_credentials": {
"mq": {
@ -409,7 +407,7 @@ class TestChefInstaller(unittest2.TestCase):
}
self.test_chef.update_environment = Mock()
self.test_chef.update_databags = Mock()
self.test_chef.get_node = Mock()
self.test_chef.get_create_node = Mock()
self.test_chef.update_node = Mock()
output = self.test_chef.deploy()

View File

@ -1,9 +1,6 @@
#set aval_services = ['nova', 'horizon', 'keystone']
#set config = $cluster.deployed_package_config
#set service_config = {}
#if "service_credentials" in $config:
#set service_config = $cluster.deployed_package_config.service_credentials
#end if
#set service_config = $getVar('service_credentials', {})
#set databagitems = {}
#for $service in $aval_services:
#set databagitems[$service] = {$service: $service}

View File

@ -1,9 +1,5 @@
#set required_services = ['openstack-image', 'openstack-compute', 'openstack-block-storage', 'openstack-orchestration', 'openstack-network', 'rbd']
#set config = $cluster.deployed_package_config
#set services_config = {}
#if "services_credentials" in $config:
#set services_config = $cluster.deployed_package_config.services_credentials
#end if
#set services_config = $getVar('services_credentials', {})
#set databagitems = {}
#for $service in $required_services:

View File

@ -1,9 +1,5 @@
#set required_users = ['admin']
#set config = $cluster.deployed_package_config
#set user_config = {}
#if "users_credentials" in $config:
#set user_config = $cluster.deployed_package_config.users_credentials
#end if
#set user_config = $getVar('users_credentials', {})
#set databagitems = {}
#if user_config:
#for $user, $value in $user_config.iteritems():

View File

@ -1,6 +1,4 @@
#set config = $cluster.deployed_package_config
#set mappings = $config.roles_mapping
#set credentials = $config.service_credentials
#set credentials = $getVar('service_credentials', {})
{
"name": "testing",
"description": "Environment",
@ -16,22 +14,22 @@
"use": false
},
"libvirt": {
"bind_interface": "$mappings.os_compute.management.interface"
"bind_interface": "$os_compute.management.interface"
},
"novnc_proxy": {
"bind_interface": "$mappings.os_controller.management.interface"
"bind_interface": "$os_controller.management.interface"
},
"xvpvnc_proxy": {
"bind_interface": "eth0"
}
},
"db": {
"bind_interface": "$mappings.os_controller.management.interface",
"bind_interface": "$os_controller.management.interface",
"compute": {
"host": "$mappings.os_controller.management.ip"
"host": "$os_controller.management.ip"
},
"identity": {
"host": "$mappings.os_controller.management.ip"
"host": "$os_controller.management.ip"
}
},
"mq": {

View File

@ -3,7 +3,7 @@
"openstack": {
"endpoints": {
"compute-vnc-bind": {
"host":"$host.deployed_package_config.roles_mapping.os_compute.management.ip"
"host":"$os_compute.management.ip"
}
}
}

View File

@ -1,16 +1,16 @@
{
"name": "$host.name",
"hostname": "$host.hostname",
"profile": "$host.profile",
"gateway": "$host.gateway",
"name": "$name",
"hostname": "$hostname",
"profile": "$profile",
"gateway": "$gateway",
#import simplejson as json
#set nameservers = json.dumps($host.nameservers, encoding='utf-8')
#set nameservers = json.dumps($nameservers, encoding='utf-8')
"name_servers": $nameservers,
#set search_path = ' '.join($host.search_path)
#set search_path = ' '.join($search_path)
"name_servers_search": "$search_path",
"proxy": "$host.http_proxy",
"proxy": "$http_proxy",
"modify_interface":
#set networks = $host.networks
#set networks = $networks
#set rekeyed = {}
#set promicnic = ""
#for $nic, $val in $networks.iteritems():
@ -29,8 +29,8 @@
#if $val.is_mgmt:
#set mac_key = '-'.join(('macaddress', $nic))
#set dns_key = '-'.join(('dns', $nic))
#set $rekeyed[$mac_key] = $host.mac
#set $rekeyed[$dns_key] = $host.dns
#set $rekeyed[$mac_key] = $mac
#set $rekeyed[$dns_key] = $dns
#end if
#end for
#set nic_info = json.dumps($rekeyed, encoding='utf-8')
@ -38,7 +38,7 @@
,
"ksmeta":{
#set partition_config = ''
#for k, v in sorted($host.partition.iteritems()):
#for k, v in sorted($partition.iteritems()):
#set path = ''
#if v['vol_percentage']:
#set $path = k + ' ' + str(v['vol_percentage']) + '%'
@ -50,8 +50,8 @@
#set partition_config = partition_config[1:]
"promisc_nics": "$promicnic",
"partition": "$partition_config",
"https_proxy": "$host.https_proxy",
"ntp_server": "$host.ntp_server",
"timezone": "$host.timezone"
"https_proxy": "$https_proxy",
"ntp_server": "$ntp_server",
"timezone": "$timezone"
}
}

View File

@ -26,7 +26,10 @@ import os.path
# default setting
CONFIG_DIR = '/etc/compass'
SQLALCHEMY_DATABASE_URI = 'sqlite://'
INSTALLATION_LOGDIR = ''
INSTALLATION_LOGDIR = {
'CobblerInstaller': '/var/log/cobbler/anamon',
'ChefInstaller': '/var/log/chef'
}
DEFAULT_LOGLEVEL = 'debug'
DEFAULT_LOGDIR = '/tmp'
DEFAULT_LOGINTERVAL = 1

View File

@ -5,7 +5,10 @@ DATABASE_PASSWORD = 'root'
DATABASE_SERVER = '127.0.0.1:3306'
DATABASE_NAME = 'db'
SQLALCHEMY_DATABASE_URI = '%s://%s:%s@%s/%s' % (DATABASE_TYPE, DATABASE_USER, DATABASE_PASSWORD, DATABASE_SERVER, DATABASE_NAME)
INSTALLATION_LOGDIR = '/var/log/cobbler/anamon'
INSTALLATION_LOGDIR = {
'CobblerInstaller': '/var/log/cobbler/anamon',
'ChefInstaller': '/var/log/chef'
}
DEFAULT_LOGLEVEL = 'debug'
DEFAULT_LOGDIR = '/var/log/compass'
DEFAULT_LOGINTERVAL = 1

View File

@ -1,22 +1,10 @@
#set aval_services = ['nova', 'horizon', 'keystone', 'glance', 'ceilometer', 'neutron', 'cinder', 'heat', 'dash']
#set config = $cluster.deployed_package_config
#set service_config = {}
#if "service_credentials" in $config:
#set service_config = $cluster.deployed_package_config.service_credentials
#end if
#set databagitems = {}
#for $service in $aval_services:
#set databagitems[$service] = {$service: $service}
#end for
#if service_config:
#for $service, $value in $service_config.iteritems():
#if $service in $aval_services:
#set databagitems[$service] = {$service: $value.password}
#end if
#end for
#end if
#import simplejson as json
#set output = json.dumps($databagitems, encoding='utf-8')
$output
{
"nova": {
"nova": "mypass"
},
"horizon": {
"horizon": "horizon"
},
"keystone": {
}
}

View File

@ -1,8 +0,0 @@
{
"openstack_identity_bootstrap_token": {
"openstack_identity_bootstrap_token": "mytoken"
},
"neutron_metadata_secret": {
"neutron_metadata_secret": "secret"
}
}

View File

@ -1,24 +0,0 @@
#set required_services = ['openstack-image', 'openstack-compute', 'openstack-block-storage', 'openstack-orchestration', 'openstack-network', 'rbd']
#set config = $cluster.deployed_package_config
#set services_config = {}
#if "services_credentials" in $config:
#set services_config = $cluster.deployed_package_config.services_credentials
#end if
#set databagitems = {}
#for $service in $required_services:
#if $service not in $databagitems:
#set databagitems[$service] = {$service: $service}
#end if
#end for
#if services_config:
#for $service, $value in $services_config.iteritems():
#if $service in $required_services:
#set databagitems[$service] = {$service: $value.password}
#end if
#end for
#end if
#import simplejson as json
#set output = json.dumps($databagitems, encoding='utf-8')
$output

View File

@ -1,20 +0,0 @@
#set required_users = ['admin']
#set config = $cluster.deployed_package_config
#set user_config = {}
#if "users_credentials" in $config:
#set user_config = $cluster.deployed_package_config.users_credentials
#end if
#set databagitems = {}
#if user_config:
#for $user, $value in $user_config.iteritems():
#set databagitems[$user] = {$value.username: $value.password}
#end for
#end if
#for $user in $required_users:
#if $user not in $databagitems:
#set databagitems[$user] = {$user: $user}
#end if
#end for
#import simplejson as json
#set output = json.dumps($databagitems, encoding='utf-8')
$output

View File

@ -1,9 +1,6 @@
#set config = $cluster.deployed_package_config
#set mappings = $config.roles_mapping
#set credentials = $config.service_credentials
#set allinone_compute_mgmt_nic = $mappings.allinone_compute.management.interface
#set allinone_compute_mgmt_ip = $mappings.allinone_compute.management.ip
#set allinone_compute_mgmt_nic = $mappings.allinone_compute.management.interface
#set credentials = $service_credentials
#set allinone_compute_mgmt_nic = $allinone_compute.management.interface
#set allinone_compute_mgmt_ip = $allinone_compute.management.ip
{
"name": "testing",
"description": "Environment used in testing the upstream cookbooks and reference Chef repository",

View File

@ -0,0 +1,300 @@
#set controller_mgmt_ip = $os_controller.management.ip
{
"name": "testing",
"description": "Environment used in testing the upstream cookbooks and reference Chef repository",
"cookbook_versions": {
},
"json_class": "Chef::Environment",
"chef_type": "environment",
"default_attributes": {
"mysql": {
"server_root_password": "test",
"server_debian_password": "root",
"server_repl_password": "root",
"allow_remote_root": true,
"root_network_acl": "%"
},
"openstack": {
"auth": {
"validate_certs": false
},
"block-storage": {
"syslog": {
"use": false
},
"api": {
"ratelimit": "False"
},
"debug": true
},
"dashboard": {
"use_ssl": "false"
},
"compute": {
#set compute_mgmt_nic = $os_compute_worker.management.interface
"syslog": {
"use": false
},
"libvirt": {
"bind_interface": "$compute_mgmt_nic"
},
"novnc_proxy": {
"bind_interface": "$compute_mgmt_nic"
},
"xvpvnc_proxy": {
"bind_interface": "$compute_mgmt_nic"
},
"ratelimit": {
"api": {
"enabled": false
},
"volume": {
"enabled": false
}
},
"network": {
"service_type": "neutron"
}
},
"network": {
"verbose": "True",
"debug": "True",
"service_plugins": [
"router"
],
"ml2": {
"type_drivers": "gre",
"tenant_network_types": "gre",
"tunnel_id_ranges": "1:1000",
"enable_security_group": "True"
},
"openvswitch": {
"tenant_network_type": "gre",
"enable_tunneling": "True",
"tunnel_id_ranges": "1:1000"
},
"l3": {
"external_network_bridge_interface": "$os_compute_worker.public.interface"
}
},
"db": {
"bind_interface": "$os_controller.management.interface",
#set db_role = $getVar('os_ops_database', None)
#set db_mgmt_ip = $os_controller.management.ip
#if db_role is not None:
#set db_mgmt_ip = $db_role.management.ip
#end if
"compute": {
"host": "$db_mgmt_ip"
},
"identity": {
"host": "$db_mgmt_ip"
},
"image": {
"host": "$db_mgmt_ip"
},
"network": {
"host": "$db_mgmt_ip"
},
"volume": {
"host": "$db_mgmt_ip"
},
"dashboard": {
"host": "$db_mgmt_ip"
},
"telemetry": {
"host": "$db_mgmt_ip"
},
"orchestration": {
"host": "$db_mgmt_ip"
}
},
"developer_mode": true,
"endpoints": {
"db": {
"host": "$db_mgmt_ip"
},
"mq": {
#set msg_queue_ip = controller_mgmt_ip
#if $getVar('os_ops_messaging', None) is not None:
#set msg_queue_ip = $os_ops_messaging.management.ip
#end if
"host": "$msg_queue_ip"
},
"compute-api": {
"host": "$controller_mgmt_ip",
"scheme": "http",
"port": "8774",
"path": "/v2/%(tenant_id)s"
},
"compute-api-bind": {
"bind_interface": "$os_controller.management.interface"
},
"compute-ec2-admin": {
"host": "$controller_mgmt_ip",
"scheme": "http",
"port": "8773",
"path": "/services/Admin"
},
"compute-ec2-api": {
"host": "$controller_mgmt_ip",
"scheme": "http",
"port": "8773",
"path": "/services/Cloud"
},
"compute-novnc": {
"host": "$controller_mgmt_ip",
"scheme": "http",
"port": "6080",
"path": "/vnc_auto.html"
},
"compute-novnc-bind": {
"bind_interface": "$os_controller.management.interface"
},
"vnc_bind": {
"bind_interface": "$os_controller.management.interface"
},
"image-api": {
#set glance_ip = controller_mgmt_ip
#set glance_nic = $os_controller.management.interface
#if $getVar('os_image', None) is not None:
#set glance_ip = $os_image.management.ip
#set glance_nic = $os_image.management.interface
#end if
"host": "$glance_ip",
"scheme": "http",
"port": "9292",
"path": "/v2"
},
"image-api-bind": {
"bind_interface": "$glance_nic"
},
"image-registry": {
"host": "$glance_ip",
"scheme": "http",
"port": "9191",
"path": "/v2"
},
"image-registry-bind": {
"bind_interface": "$glance_nic"
},
"identity-bind": {
"bind_interface": "$os_controller.management.interface"
},
"identity-api": {
"host": "$controller_mgmt_ip",
"scheme": "http",
"port": "5000",
"path": "/v2.0"
},
"identity-admin": {
"host": "$controller_mgmt_ip",
"scheme": "http",
"port": "35357",
"path": "/v2.0"
},
"block-storage-api": {
"host": "$controller_mgmt_ip",
"scheme": "http",
"port": "8776",
"path": "/v1/%(tenant_id)s"
},
"block-storage-api-bind": {
"bind_interface": "$os_controller.management.interface"
},
"telemetry-api": {
"host": "$controller_mgmt_ip",
"scheme": "http",
"port": "8777",
"path": "/v1"
},
"network-api": {
"host": "$controller_mgmt_ip",
"scheme": "http",
"port": "9696",
"path": ""
},
"network-api-bind": {
"bind_interface": "$os_controller.management.interface"
},
"network-openvswitch": {
"bind_interface": "$os_controller.management.interface"
},
"orchestration-api": {
"host": "$controller_mgmt_ip",
"scheme": "http",
"port": "8004",
"path": "/v1/%(tenant_id)s"
},
"orchestration-api-cfn": {
"host": "$controller_mgmt_ip",
"scheme": "http",
"port": "8000",
"path": "/v1"
}
},
"identity": {
"admin_user": "admin",
"bind_interface": "$os_controller.management.interface",
"catalog": {
"backend": "sql"
},
"debug": true,
"roles": [
"admin",
"member"
],
"syslog": {
"use": false
},
"tenants": [
"admin",
"service"
],
"token": {
"backend": "sql"
},
"users": {
"admin": {
"password": "admin",
"default_tenant": "admin",
"roles": {
"admin": [
"admin"
]
}
}
}
},
"image": {
"api": {
"bind_interface": "$glance_nic"
},
"debug": true,
"registry": {
"bind_interface": "$glance_nic"
},
"syslog": {
"use": false
},
"upload_image": {
"cirros": "http://download.cirros-cloud.net/0.3.2/cirros-0.3.2-x86_64-disk.img"
},
"upload_images": [
"cirros"
]
},
"memcached_servers": [
"$controller_mgmt_ip:11211"
],
"mq": {
"user": "guest",
"password": "test",
"vhost": "/nova",
"network": {
"service_type": "rabbitmq"
}
}
}
}
}

View File

@ -1,9 +0,0 @@
{
"override_attributes": {
"endpoints": {
"compute-vnc-bind": {
"host":"$host.deployed_package_config.roles_mapping.os_compute.management.ip"
}
}
}
}

View File

@ -0,0 +1,9 @@
{
"override": {
"openstack": {
"endpoints": {
"bind-host": "$os_compute_worker.management.ip"
}
}
}
}

View File

@ -1,18 +1,18 @@
{
"name": "$host.hostname",
"hostname": "$host.hostname",
"profile": "$host.profile",
"gateway": "$host.gateway",
"name": "$hostname",
"hostname": "$hostname",
"profile": "$profile",
"gateway": "$gateway",
#import simplejson as json
#set nameservers = json.dumps($host.nameservers, encoding='utf-8')
#set nameservers = json.dumps($nameservers, encoding='utf-8')
"name_servers": $nameservers,
#set search_path = ' '.join($host.search_path)
#set search_path = ' '.join($search_path)
"name_servers_search": "$search_path",
"proxy": "$host.http_proxy",
"proxy": "$getVar('http_proxy', '')",
"modify_interface":
#set networks = $host.networks
#set networks = $networks
#set rekeyed = {}
#set promicnic = ""
#set promic_nic = ""
#for $nic, $val in $networks.iteritems():
#set ip_key = '-'.join(('ipaddress', $nic))
#set netmask_key = '-'.join(('netmask', $nic))
@ -24,13 +24,13 @@
#set $rekeyed[$static_key] = True
#if $val.is_promiscuous:
#set promicnic = $nic
#set promic_nic = $nic
#end if
#if $val.is_mgmt:
#set mac_key = '-'.join(('macaddress', $nic))
#set dns_key = '-'.join(('dns', $nic))
#set $rekeyed[$mac_key] = $host.mac
#set $rekeyed[$dns_key] = $host.dns
#set $rekeyed[$mac_key] = $mac
#set $rekeyed[$dns_key] = $dns
#end if
#end for
#set nic_info = json.dumps($rekeyed, encoding='utf-8')
@ -38,7 +38,7 @@
,
"ksmeta":{
#set partition_config = ''
#for k, v in $host.partition.iteritems():
#for k, v in $partition.iteritems():
#set path = ''
#if v['vol_percentage']:
#set $path = k + ' ' + str(v['vol_percentage']) + '%'
@ -49,15 +49,15 @@
#end for
#set partition_config = partition_config[1:]
#import crypt
#set $password = crypt.crypt($host.server_credentials.password, "az")
#set no_proxy = ','.join($host.no_proxy)
"username": "$host.server_credentials.username",
#set $password = crypt.crypt($server_credentials.password, "az")
#set no_proxy = ','.join($getVar('no_proxy', []))
"username": "$server_credentials.username",
"password": "$password",
"promisc_nics": "$promicnic",
"promisc_nics": "$promic_nic",
"partition": "$partition_config",
"https_proxy": "$host.https_proxy",
"ntp_server": "$host.ntp_server",
"timezone": "$host.timezone",
"https_proxy": "$getVar('https_proxy', '')",
"ntp_server": "$ntp_server",
"timezone": "$timezone",
"ignore_proxy": "$no_proxy"
}
}

View File

@ -1,63 +1,36 @@
{
"name": "$host.hostname",
"hostname": "$host.hostname",
"profile": "$host.profile",
"gateway": "$host.gateway",
#import simplejson as json
#set nameservers = json.dumps($host.nameservers, encoding='utf-8')
"name_servers": $nameservers,
#set search_path = ' '.join($host.search_path)
"name": "$fullname",
"hostname": "$hostname",
"profile": "$profile",
"gateway": "$gateway",
"name_servers": "$name_servers",
"name_servers_search": "$search_path",
"proxy": "$host.http_proxy",
"proxy": "$getVar('http_proxy', '')",
"modify_interface":
#set networks = $host.networks
#import simplejson
#set interfaces = $networks.interfaces
#set rekeyed = {}
#set promicnic = ""
#for $nic, $val in $networks.iteritems():
#set ip_key = '-'.join(('ipaddress', $nic))
#set netmask_key = '-'.join(('netmask', $nic))
#set mgmt_key = '-'.join(('management', $nic))
#set static_key = '-'.join(('static', $nic))
#set $rekeyed[$ip_key] = $val.ip
#set $rekeyed[$netmask_key] = $val.netmask
#set $rekeyed[$mgmt_key] = $val.is_mgmt
#set $rekeyed[$static_key] = True
#if $val.is_promiscuous:
#set promicnic = $nic
#end if
#if $val.is_mgmt:
#set mac_key = '-'.join(('macaddress', $nic))
#set dns_key = '-'.join(('dns', $nic))
#set $rekeyed[$mac_key] = $host.mac
#set $rekeyed[$dns_key] = $host.dns
#for $nic, $val in $interfaces.iteritems():
#set $rekeyed = { "ipaddress-vnet0" : $val.ip, "netmask-vnet0": $val.netmask, "management-vnet0": $val.is_mgmt, "static-vnet0" : True }
#if $val.is_mgmt:
#set $rekeyed["macaddress-vnet0"] = $mac_address
#set $rekeyed["dnsname-vnet0"] = $dns
#end if
#end for
#set nic_info = json.dumps($rekeyed, encoding='utf-8')
$nic_info
#set $str=simplejson.dumps($rekeyed, encoding='utf-8')
$str
,
"ksmeta":{
#set partition_config = ''
#for k, v in $host.partition.iteritems():
#set path = ''
#if v['vol_percentage']:
#set $path = k + ' ' + str(v['vol_percentage']) + '%'
#else:
#set $path = k + ' ' + str(v['vol_size'])
#end if
#set partition_config = ';'.join((partition_config, $path))
#end for
#set partition_config = partition_config[1:]
"username": "$server_credentials.username",
#import crypt
#set $password = crypt.crypt($host.server_credentials.password, "az")
#set no_proxy = ','.join($host.no_proxy)
"username": "$host.server_credentials.username",
#set $password = crypt.crypt($server_credentials.password, "az")
"password": "$password",
"promisc_nics": "$promicnic",
#set f=[(k + ' ' + v['vol_percentage'] or v['vol_size']) for k,v in $partition.iteritems()]
#set $partition_config = '; '.join($f)
"partition": "$partition_config",
"https_proxy": "$host.https_proxy",
"ntp_server": "$host.ntp_server",
"timezone": "$host.timezone",
"ignore_proxy": "$no_proxy"
"https_proxy": "$getVar('https_proxy', '')",
"ignore_proxy": "$getVar('ignore_proxy', '')",
"ntp_server": "$ntp_server"
}
}