a096ddab34
When a service is enabled on multiple roles, the parameters for the service will be global. This change enables an option to provide role specific parameter to services and other templates. Two new parameters - RoleName and RoleParameters, are added to the service template. RoleName provides the role name of on which the current instance of the service is being applied on. RoleParameters provides the list of parameters which are configured specific to the role in the environment file, like below: parameters_default: # Default value for applied to all roles NovaReservedHostMemory: 2048 ComputeDpdkParameters: # Applied only to ComputeDpdk role NovaReservedHostMemory: 4096 In above sample, the cluster contains 2 roles - Compute, ComputeDpdk. The values of ComputeDpdkParameters will be passed on to the templates as RoleParameters while creating the stack for ComputeDpdk role. The parameter which supports role specific configuration, should find the parameter first in in the RoleParameters list, if not found, then the default (for all roles) should be used. Implements: blueprint tripleo-derive-parameters Change-Id: I72376a803ec6b2ed93903cc0c95a6ffce718b6dc
315 lines
12 KiB
Python
Executable File
315 lines
12 KiB
Python
Executable File
#!/usr/bin/env python
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
import os
|
|
import sys
|
|
import traceback
|
|
import yaml
|
|
|
|
|
|
required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords',
|
|
'RoleName', 'RoleParameters']
|
|
|
|
envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml',
|
|
'tls-endpoints-public-ip.yaml',
|
|
'tls-everywhere-endpoints-dns.yaml']
|
|
ENDPOINT_MAP_FILE = 'endpoint_map.yaml'
|
|
REQUIRED_DOCKER_SECTIONS = ['service_name', 'docker_config', 'puppet_config',
|
|
'config_settings', 'step_config']
|
|
OPTIONAL_DOCKER_SECTIONS = ['docker_puppet_tasks', 'upgrade_tasks',
|
|
'service_config_settings', 'host_prep_tasks',
|
|
'metadata_settings', 'kolla_config']
|
|
REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
|
|
'config_image']
|
|
OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags' ]
|
|
|
|
|
|
def exit_usage():
|
|
print('Usage %s <yaml file or directory>' % sys.argv[0])
|
|
sys.exit(1)
|
|
|
|
|
|
def get_base_endpoint_map(filename):
|
|
try:
|
|
tpl = yaml.load(open(filename).read())
|
|
return tpl['parameters']['EndpointMap']['default']
|
|
except Exception:
|
|
print(traceback.format_exc())
|
|
return None
|
|
|
|
|
|
def get_endpoint_map_from_env(filename):
|
|
try:
|
|
tpl = yaml.load(open(filename).read())
|
|
return {
|
|
'file': filename,
|
|
'map': tpl['parameter_defaults']['EndpointMap']
|
|
}
|
|
except Exception:
|
|
print(traceback.format_exc())
|
|
return None
|
|
|
|
|
|
def validate_endpoint_map(base_map, env_map):
|
|
return sorted(base_map.keys()) == sorted(env_map.keys())
|
|
|
|
|
|
def validate_hci_compute_services_default(env_filename, env_tpl):
|
|
env_services_list = env_tpl['parameter_defaults']['ComputeServices']
|
|
env_services_list.remove('OS::TripleO::Services::CephOSD')
|
|
roles_filename = os.path.join(os.path.dirname(env_filename),
|
|
'../roles_data.yaml')
|
|
roles_tpl = yaml.load(open(roles_filename).read())
|
|
for role in roles_tpl:
|
|
if role['name'] == 'Compute':
|
|
roles_services_list = role['ServicesDefault']
|
|
if sorted(env_services_list) != sorted(roles_services_list):
|
|
print('ERROR: ComputeServices in %s is different '
|
|
'from ServicesDefault in roles_data.yaml' % env_filename)
|
|
return 1
|
|
return 0
|
|
|
|
|
|
def validate_mysql_connection(settings):
|
|
no_op = lambda *args: False
|
|
error_status = [0]
|
|
|
|
def mysql_protocol(items):
|
|
return items == ['EndpointMap', 'MysqlInternal', 'protocol']
|
|
|
|
def client_bind_address(item):
|
|
return 'read_default_file' in item and \
|
|
'read_default_group' in item
|
|
|
|
def validate_mysql_uri(key, items):
|
|
# Only consider a connection if it targets mysql
|
|
if key.endswith('connection') and \
|
|
search(items, mysql_protocol, no_op):
|
|
# Assume the "bind_address" option is one of
|
|
# the token that made up the uri
|
|
if not search(items, client_bind_address, no_op):
|
|
error_status[0] = 1
|
|
return False
|
|
|
|
def search(item, check_item, check_key):
|
|
if check_item(item):
|
|
return True
|
|
elif isinstance(item, list):
|
|
for i in item:
|
|
if search(i, check_item, check_key):
|
|
return True
|
|
elif isinstance(item, dict):
|
|
for k in item.keys():
|
|
if check_key(k, item[k]):
|
|
return True
|
|
elif search(item[k], check_item, check_key):
|
|
return True
|
|
return False
|
|
|
|
search(settings, no_op, validate_mysql_uri)
|
|
return error_status[0]
|
|
|
|
|
|
def validate_docker_service(filename, tpl):
|
|
if 'outputs' in tpl and 'role_data' in tpl['outputs']:
|
|
if 'value' not in tpl['outputs']['role_data']:
|
|
print('ERROR: invalid role_data for filename: %s'
|
|
% filename)
|
|
return 1
|
|
role_data = tpl['outputs']['role_data']['value']
|
|
|
|
for section_name in REQUIRED_DOCKER_SECTIONS:
|
|
if section_name not in role_data:
|
|
print('ERROR: %s is required in role_data for %s.'
|
|
% (section_name, filename))
|
|
return 1
|
|
|
|
for section_name in role_data.keys():
|
|
if section_name in REQUIRED_DOCKER_SECTIONS:
|
|
continue
|
|
else:
|
|
if section_name in OPTIONAL_DOCKER_SECTIONS:
|
|
continue
|
|
else:
|
|
print('ERROR: %s is extra in role_data for %s.'
|
|
% (section_name, filename))
|
|
return 1
|
|
|
|
if 'puppet_config' in role_data:
|
|
puppet_config = role_data['puppet_config']
|
|
for key in puppet_config:
|
|
if key in REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS:
|
|
continue
|
|
else:
|
|
if key in OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS:
|
|
continue
|
|
else:
|
|
print('ERROR: %s should not be in puppet_config section.'
|
|
% key)
|
|
return 1
|
|
for key in REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS:
|
|
if key not in puppet_config:
|
|
print('ERROR: %s is required in puppet_config for %s.'
|
|
% (key, filename))
|
|
return 1
|
|
|
|
if 'parameters' in tpl:
|
|
for param in required_params:
|
|
if param not in tpl['parameters']:
|
|
print('ERROR: parameter %s is required for %s.'
|
|
% (param, filename))
|
|
return 1
|
|
return 0
|
|
|
|
|
|
def validate_service(filename, tpl):
|
|
if 'outputs' in tpl and 'role_data' in tpl['outputs']:
|
|
if 'value' not in tpl['outputs']['role_data']:
|
|
print('ERROR: invalid role_data for filename: %s'
|
|
% filename)
|
|
return 1
|
|
role_data = tpl['outputs']['role_data']['value']
|
|
if 'service_name' not in role_data:
|
|
print('ERROR: service_name is required in role_data for %s.'
|
|
% filename)
|
|
return 1
|
|
# service_name must match the filename, but with an underscore
|
|
if (role_data['service_name'] !=
|
|
os.path.basename(filename).split('.')[0].replace("-", "_")):
|
|
print('ERROR: service_name should match file name for service: %s.'
|
|
% filename)
|
|
return 1
|
|
# if service connects to mysql, the uri should use option
|
|
# bind_address to avoid issues with VIP failover
|
|
if 'config_settings' in role_data and \
|
|
validate_mysql_connection(role_data['config_settings']):
|
|
print('ERROR: mysql connection uri should use option bind_address')
|
|
return 1
|
|
if 'parameters' in tpl:
|
|
for param in required_params:
|
|
if param not in tpl['parameters']:
|
|
print('ERROR: parameter %s is required for %s.'
|
|
% (param, filename))
|
|
return 1
|
|
return 0
|
|
|
|
|
|
def validate(filename):
|
|
print('Validating %s' % filename)
|
|
retval = 0
|
|
try:
|
|
tpl = yaml.load(open(filename).read())
|
|
|
|
# The template alias version should be used instead a date, this validation
|
|
# will be applied to all templates not just for those in the services folder.
|
|
if 'heat_template_version' in tpl and not str(tpl['heat_template_version']).isalpha():
|
|
print('ERROR: heat_template_version needs to be the release alias not a date: %s'
|
|
% filename)
|
|
return 1
|
|
|
|
# qdr aliases rabbitmq service to provide alternative messaging backend
|
|
if (filename.startswith('./puppet/services/') and
|
|
filename not in ['./puppet/services/services.yaml',
|
|
'./puppet/services/qdr.yaml']):
|
|
retval = validate_service(filename, tpl)
|
|
|
|
if (filename.startswith('./docker/services/') and
|
|
filename != './docker/services/services.yaml'):
|
|
retval = validate_docker_service(filename, tpl)
|
|
|
|
if filename.endswith('hyperconverged-ceph.yaml'):
|
|
retval = validate_hci_compute_services_default(filename, tpl)
|
|
|
|
except Exception:
|
|
print(traceback.format_exc())
|
|
return 1
|
|
# yaml is OK, now walk the parameters and output a warning for unused ones
|
|
if 'heat_template_version' in tpl:
|
|
for p in tpl.get('parameters', {}):
|
|
if p in required_params:
|
|
continue
|
|
str_p = '\'%s\'' % p
|
|
in_resources = str_p in str(tpl.get('resources', {}))
|
|
in_outputs = str_p in str(tpl.get('outputs', {}))
|
|
if not in_resources and not in_outputs:
|
|
print('Warning: parameter %s in template %s '
|
|
'appears to be unused' % (p, filename))
|
|
|
|
return retval
|
|
|
|
if len(sys.argv) < 2:
|
|
exit_usage()
|
|
|
|
path_args = sys.argv[1:]
|
|
exit_val = 0
|
|
failed_files = []
|
|
base_endpoint_map = None
|
|
env_endpoint_maps = list()
|
|
|
|
for base_path in path_args:
|
|
if os.path.isdir(base_path):
|
|
for subdir, dirs, files in os.walk(base_path):
|
|
for f in files:
|
|
if f.endswith('.yaml') and not f.endswith('.j2.yaml'):
|
|
file_path = os.path.join(subdir, f)
|
|
failed = validate(file_path)
|
|
if failed:
|
|
failed_files.append(file_path)
|
|
exit_val |= failed
|
|
if f == ENDPOINT_MAP_FILE:
|
|
base_endpoint_map = get_base_endpoint_map(file_path)
|
|
if f in envs_containing_endpoint_map:
|
|
env_endpoint_map = get_endpoint_map_from_env(file_path)
|
|
if env_endpoint_map:
|
|
env_endpoint_maps.append(env_endpoint_map)
|
|
elif os.path.isfile(base_path) and base_path.endswith('.yaml'):
|
|
failed = validate(base_path)
|
|
if failed:
|
|
failed_files.append(base_path)
|
|
exit_val |= failed
|
|
else:
|
|
print('Unexpected argument %s' % base_path)
|
|
exit_usage()
|
|
|
|
if base_endpoint_map and \
|
|
len(env_endpoint_maps) == len(envs_containing_endpoint_map):
|
|
for env_endpoint_map in env_endpoint_maps:
|
|
matches = validate_endpoint_map(base_endpoint_map,
|
|
env_endpoint_map['map'])
|
|
if not matches:
|
|
print("ERROR: %s needs to be updated to match changes in base "
|
|
"endpoint map" % env_endpoint_map['file'])
|
|
failed_files.append(env_endpoint_map['file'])
|
|
exit_val |= 1
|
|
else:
|
|
print("%s matches base endpoint map" % env_endpoint_map['file'])
|
|
else:
|
|
print("ERROR: Can't validate endpoint maps since a file is missing. "
|
|
"If you meant to delete one of these files you should update this "
|
|
"tool as well.")
|
|
if not base_endpoint_map:
|
|
failed_files.append(ENDPOINT_MAP_FILE)
|
|
if len(env_endpoint_maps) != len(envs_containing_endpoint_map):
|
|
matched_files = set(os.path.basename(matched_env_file['file'])
|
|
for matched_env_file in env_endpoint_maps)
|
|
failed_files.extend(set(envs_containing_endpoint_map) - matched_files)
|
|
exit_val |= 1
|
|
|
|
if failed_files:
|
|
print('Validation failed on:')
|
|
for f in failed_files:
|
|
print(f)
|
|
else:
|
|
print('Validation successful!')
|
|
sys.exit(exit_val)
|