Enforce pep8/pyflakes rule on python codes

This change makes sure that we apply pyflake8 checks on all python
codes to improve its readability.

Note that there are some rules applied for other OpenStack projects,
but not yet turned on, which should be enabled in the future.

Change-Id: Iaf0299983d3a3fe48e3beb8f47bd33c21deb4972
This commit is contained in:
Takashi Kajinami 2019-08-29 20:49:40 +09:00
parent 9b88629d63
commit f47dfe1059
17 changed files with 412 additions and 321 deletions

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@ -13,20 +12,21 @@
# under the License.
# Shell script tool to run puppet inside of the given container image.
# Uses the config file at /var/lib/container-puppet/container-puppet.json as a source for a JSON
# array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings
# Uses the config file at /var/lib/container-puppet/container-puppet.json
# as a source for a JSON array of
# [config_volume, puppet_tags, manifest, config_image, [volumes]] settings
# that can be used to generate config files or run ad-hoc puppet modules
# inside of a container.
import glob
import json
import logging
import multiprocessing
import os
import subprocess
import sys
import tempfile
import time
import multiprocessing
from paunch import runner as containers_runner
@ -93,6 +93,7 @@ if (os.environ.get('MOUNT_HOST_PUPPET', 'true') == 'true' and
PUPPETS not in cli_dcmd):
cli_dcmd.extend(['--volume', PUPPETS])
# this is to match what we do in deployed-server
def short_hostname():
subproc = subprocess.Popen(['hostname', '-s'],
@ -188,6 +189,7 @@ def rm_container(name):
'No such container: {}\n'.format(name):
log.debug(cmd_stderr)
process_count = int(os.environ.get('PROCESS_COUNT',
multiprocessing.cpu_count()))
config_file = os.environ.get('CONFIG', '/var/lib/container-puppet/container-puppet.json')
@ -403,12 +405,11 @@ if not os.path.exists(sh_script):
""")
def mp_puppet_config(*args):
(config_volume, puppet_tags, manifest, config_image, volumes, privileged, check_mode) = args[0]
log = get_logger()
log.info('Starting configuration of %s using image %s' % (config_volume,
config_image))
log.info('Starting configuration of %s using image %s' %
(config_volume, config_image))
log.debug('config_volume %s' % config_volume)
log.debug('puppet_tags %s' % puppet_tags)
log.debug('manifest %s' % manifest)
@ -466,7 +467,6 @@ def mp_puppet_config(*args):
for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()):
env[k] = os.environ.get(k)
common_dcmd += cli_dcmd
if check_mode:
@ -486,7 +486,7 @@ def mp_puppet_config(*args):
'/etc/hosts:/etc/hosts:ro'])
else:
log.debug('running without containers Networking')
dcmd.extend(['--net', 'none'])
common_dcmd.extend(['--net', 'none'])
# script injection as the last mount to make sure it's accessible
# https://github.com/containers/libpod/issues/1844
@ -534,6 +534,7 @@ def mp_puppet_config(*args):
log.info('Finished processing puppet configs for %s' % (config_volume))
return retval
# Holds all the information for each process to consume.
# Instead of starting them all linearly we run them using a process
# pool. This creates a list of arguments for the above function
@ -608,4 +609,3 @@ for infile in infiles:
if not success:
sys.exit(1)

View File

@ -74,7 +74,7 @@ class PathManager(object):
try:
os.chown(self.path, target_uid, target_gid)
self._update()
except Exception as e:
except Exception:
LOG.exception('Could not change ownership of %s: ',
self.path)
else:
@ -172,5 +172,6 @@ class NovaStatedirOwnershipManager(object):
LOG.info('Nova statedir ownership complete')
if __name__ == '__main__':
NovaStatedirOwnershipManager('/var/lib/nova').run()

View File

@ -68,7 +68,7 @@ if __name__ == '__main__':
if os.path.isfile(nova_cfg):
try:
config.read(nova_cfg)
except Exception as e:
except Exception:
LOG.exception('Error while reading nova.conf:')
else:
LOG.error('Nova configuration file %s does not exist', nova_cfg)
@ -107,7 +107,7 @@ if __name__ == '__main__':
LOG.info('Nova-compute service registered')
sys.exit(0)
LOG.info('Waiting for nova-compute service to register')
except Exception as e:
except Exception:
LOG.exception(
'Error while waiting for nova-compute service to register')
time.sleep(timeout)

View File

@ -62,7 +62,7 @@ if __name__ == '__main__':
if os.path.isfile(nova_cfg):
try:
config.read(nova_cfg)
except Exception as e:
except Exception:
LOG.exception('Error while reading nova.conf:')
else:
LOG.error('Nova configuration file %s does not exist', nova_cfg)
@ -97,7 +97,7 @@ if __name__ == '__main__':
LOG.error('Failed to get placement service endpoint!')
else:
break
except Exception as e:
except Exception:
LOG.exception('Retry - Failed to get placement service endpoint:')
time.sleep(timeout)
@ -123,7 +123,7 @@ if __name__ == '__main__':
LOG.info('Placement service not up - %s, %s',
r.status_code,
r.text)
except Exception as e:
except Exception:
LOG.exception('Error query the placement endpoint:')
time.sleep(timeout)

View File

@ -211,7 +211,8 @@ class PathManagerCase(base.BaseTestCase):
pathinfo = PathManager('/var/lib/nova/instances/foo/baz')
self.assertTrue(pathinfo.has_owner(current_uid, current_gid))
pathinfo.chown(current_uid + 1, current_gid + 1)
assert_ids(testtree, pathinfo.path, current_uid+1, current_gid+1)
assert_ids(testtree, pathinfo.path,
current_uid + 1, current_gid + 1)
class NovaStatedirOwnershipManagerTestCase(base.BaseTestCase):

View File

@ -1,9 +1,19 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import netaddr
import os
import openstack
import os
import subprocess
CTLPLANE_NETWORK_NAME = 'ctlplane'
@ -61,12 +71,15 @@ def _ensure_neutron_network(sdk):
return network
def _get_nameservers_for_version(servers, ipversion):
"""Get list of nameservers for an IP version"""
return [s for s in servers if netaddr.IPAddress(s).version == ipversion]
def _neutron_subnet_create(sdk, network_id, cidr, gateway, host_routes,
allocation_pools, name, segment_id, dns_nameservers):
allocation_pools, name, segment_id,
dns_nameservers):
try:
if netaddr.IPNetwork(cidr).version == 6:
subnet = sdk.network.create_subnet(
@ -137,6 +150,7 @@ def _neutron_add_subnet_segment_association(sdk, subnet_id, segment_id):
print('ERROR: Associationg segment with subnet %s failed.' % subnet_id)
raise
def _neutron_segment_create(sdk, name, network_id, phynet):
try:
segment = sdk.network.create_segment(
@ -145,7 +159,7 @@ def _neutron_segment_create(sdk, name, network_id, phynet):
physical_network=phynet,
network_type='flat')
print('INFO: Neutron Segment created %s' % segment)
except Exception as ex:
except Exception:
print('ERROR: Neutron Segment %s create failed.' % name)
raise
@ -173,7 +187,7 @@ def _ensure_neutron_router(sdk, name, subnet_id):
def _get_subnet(sdk, cidr, network_id):
try:
subnet = list(sdk.network.subnets(cidr=cidr, network_id=network_id))
except Exception as ex:
except Exception:
print('ERROR: Get subnet with cidr %s failed.' % cidr)
raise
@ -206,12 +220,13 @@ def _local_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs):
"""Create's and updates the ctlplane subnet on the segment that is local to
the underclud.
"""
s = CONF['subnets'][CONF['local_subnet']]
name = CONF['local_subnet']
subnet = _get_subnet(sdk, s['NetworkCidr'], ctlplane_id)
segment = _get_segment(sdk, CONF['physical_network'], ctlplane_id)
if subnet:
if CONF['enable_routed_networks'] and subnet.segment_id == None:
if CONF['enable_routed_networks'] and subnet.segment_id is None:
# The subnet exists and does not have a segment association. Since
# routed networks is enabled in the configuration, we need to
# migrate the existing non-routed networks subnet to a routed
@ -239,10 +254,12 @@ def _local_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs):
return net_cidrs
def _remote_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs):
"""Create's and updates the ctlplane subnet(s) on segments that is
not local to the undercloud.
"""
for name in CONF['subnets']:
s = CONF['subnets'][name]
if name == CONF['local_subnet']:
@ -274,6 +291,7 @@ def _remote_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs):
return net_cidrs
if 'true' not in _run_command(['hiera', 'neutron_api_enabled'],
name='hiera').lower():
print('WARNING: UndercloudCtlplaneNetworkDeployment : The Neutron API '

View File

@ -1,13 +1,24 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import openstack
import os
import subprocess
from keystoneauth1 import exceptions as ks_exceptions
from mistralclient.api import client as mistralclient
from mistralclient.api import base as mistralclient_exc
from mistralclient.api import client as mistralclient
CONF = json.loads(os.environ['config'])

View File

@ -1,4 +1,15 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Generate the endpoint_map.yaml template from data in the endpoint_data.yaml
@ -13,12 +24,6 @@ mismatch is detected.
"""
from __future__ import print_function
__all__ = ['load_endpoint_data', 'generate_endpoint_map_template',
'write_template', 'build_endpoint_map', 'check_up_to_date']
import collections
import copy
import itertools
@ -27,6 +32,9 @@ import sys
import yaml
__all__ = ['load_endpoint_data', 'generate_endpoint_map_template',
'write_template', 'build_endpoint_map', 'check_up_to_date']
(IN_FILE, OUT_FILE) = ('endpoint_data.yaml', 'endpoint_map.yaml')
SUBST = (SUBST_IP_ADDRESS, SUBST_CLOUDNAME) = ('IP_ADDRESS', 'CLOUDNAME')
@ -88,7 +96,8 @@ def make_parameter(ptype, default, description=None):
def template_parameters(config):
params = collections.OrderedDict()
params[PARAM_NETIPMAP] = make_parameter('json', {}, 'The Net IP map')
params[PARAM_SERVICENETMAP] = make_parameter('json', {}, 'The Service Net map')
params[PARAM_SERVICENETMAP] = make_parameter('json', {},
'The Service Net map')
params[PARAM_ENDPOINTMAP] = make_parameter('json',
endpoint_map_default(config),
'Mapping of service endpoint '

View File

@ -17,6 +17,7 @@ import datetime
import os
import re
import shutil
import six
import subprocess
import sys
import yaml
@ -168,13 +169,9 @@ class TemplateLoader(yaml.SafeLoader):
return collections.OrderedDict(self.construct_pairs(node))
if sys.version_info.major >= 3:
TemplateDumper.add_representer(str, TemplateDumper.description_presenter)
TemplateDumper.add_representer(bytes,
TemplateDumper.add_representer(six.text_type,
TemplateDumper.description_presenter)
else:
TemplateDumper.add_representer(str, TemplateDumper.description_presenter)
TemplateDumper.add_representer(unicode,
TemplateDumper.add_representer(six.binary_type,
TemplateDumper.description_presenter)
TemplateDumper.add_representer(collections.OrderedDict,
@ -215,7 +212,8 @@ def process_templates_and_get_reference_parameters():
for x in roles_data
if x['name'] == OPTS.role_name))
except StopIteration:
raise RuntimeError('The role: {role_name} is not defined in roles '
raise RuntimeError(
'The role: {role_name} is not defined in roles '
'data file: {roles_data_file}'.format(
role_name=OPTS.role_name, roles_data_file=OPTS.roles_data))

View File

@ -38,6 +38,7 @@ def _shutil_copy_if_not_same(src, dst):
else:
raise
def parse_opts(argv):
parser = argparse.ArgumentParser(
description='Configure host network interfaces using a JSON'
@ -49,7 +50,8 @@ def parse_opts(argv):
help="""relative path to the roles_data.yaml file.""",
default='roles_data.yaml')
parser.add_argument('-n', '--network-data', metavar='NETWORK_DATA',
help="""relative path to the network_data.yaml file.""",
help=("""relative path to the network_data.yaml """
"""file."""),
default='network_data.yaml')
parser.add_argument('--safe',
action='store_true',
@ -86,7 +88,8 @@ def _j2_render_to_file(j2_template, j2_data, outfile_name=None,
# Search for templates relative to the current template path first
template_base = os.path.dirname(yaml_f)
j2_loader = jinja2.loaders.FileSystemLoader([template_base, __tht_root_dir])
j2_loader = \
jinja2.loaders.FileSystemLoader([template_base, __tht_root_dir])
try:
# Render the j2 template
@ -102,6 +105,7 @@ def _j2_render_to_file(j2_template, j2_data, outfile_name=None,
with open(outfile_name, 'w') as out_f:
out_f.write(r_template)
def process_templates(template_path, role_data_path, output_dir,
network_data_path, overwrite, dry_run):
@ -255,7 +259,8 @@ def process_templates(template_path, role_data_path, output_dir,
template_data = j2_template.read()
j2_data = {'roles': role_data,
'networks': network_data}
out_f = os.path.basename(f).replace('.j2.yaml', '.yaml')
out_f = os.path.basename(f).replace('.j2.yaml',
'.yaml')
out_f_path = os.path.join(out_dir, out_f)
_j2_render_to_file(template_data, j2_data, out_f_path,
overwrite, dry_run)
@ -265,6 +270,7 @@ def process_templates(template_path, role_data_path, output_dir,
else:
print('Unexpected argument %s' % template_path)
def clean_templates(base_path, role_data_path, network_data_path):
def delete(f):

View File

@ -42,6 +42,7 @@ def parse_opts(argv):
return opts
opts = parse_opts(sys.argv)
roles = collections.OrderedDict.fromkeys(opts.roles)

View File

@ -13,20 +13,20 @@
import argparse
import collections
import copy
import datetime
import os
import re
import shutil
import six
import sys
import traceback
import yaml
import six
import re
def parse_opts(argv):
parser = argparse.ArgumentParser(
description='Convert an old style NIC config file into the new format using '
'run-os-net-config.sh')
description='Convert an old style NIC config file into the new format '
'using run-os-net-config.sh')
parser.add_argument('--script-dir', metavar='<script directory>',
help="Relative path to run-os-net-config.sh",
default="network/scripts/run-os-net-config.sh")
@ -42,8 +42,9 @@ def parse_opts(argv):
return opts
#convert comments into 'comments<num>: ...' YAML
def to_commented_yaml(filename):
"""Convert comments into 'comments<num>: ...' YAML"""
out_str = ''
last_non_comment_spaces = ''
with open(filename, 'r') as f:
@ -51,20 +52,22 @@ def to_commented_yaml(filename):
for line in f:
# skip blank line
if line.isspace():
continue;
continue
char_count = 0
spaces = ''
for char in line:
char_count += 1
if char == ' ':
spaces += ' '
next;
next
elif char == '#':
last_non_comment_spaces = spaces
comment_count += 1
comment = line[char_count:-1]
out_str += "%scomment%i_%i: '%s'\n" % (last_non_comment_spaces, comment_count, len(spaces), comment)
break;
out_str += "%scomment%i_%i: '%s'\n" % \
(last_non_comment_spaces, comment_count, len(spaces),
comment)
break
else:
last_non_comment_spaces = spaces
out_str += line
@ -73,16 +76,19 @@ def to_commented_yaml(filename):
m = re.match(".*:.*#(.*)", line)
if m:
comment_count += 1
out_str += "%s inline_comment%i: '%s'\n" % (last_non_comment_spaces, comment_count, m.group(1))
break;
out_str += "%s inline_comment%i: '%s'\n" % \
(last_non_comment_spaces, comment_count,
m.group(1))
break
with open(filename, 'w') as f:
f.write(out_str)
return out_str
#convert back to normal #commented YAML
def to_normal_yaml(filename):
"""Convert back to normal #commented YAML"""
with open(filename, 'r') as f:
data = f.read()
@ -92,8 +98,12 @@ def to_normal_yaml(filename):
for line in data.split('\n'):
# get_input not supported by run-os-net-config.sh script
line = line.replace('get_input: ', '')
m = re.match(" +comment[0-9]+_([0-9]+): '(.*)'.*", line) #normal comments
i = re.match(" +inline_comment[0-9]+: '(.*)'.*", line) #inline comments
# normal comments
m = re.match(" +comment[0-9]+_([0-9]+): '(.*)'.*", line)
# inline comments
i = re.match(" +inline_comment[0-9]+: '(.*)'.*", line)
if m:
if next_line_break:
out_str += '\n'
@ -122,9 +132,11 @@ def to_normal_yaml(filename):
class description(six.text_type):
pass
# FIXME: Some of this duplicates code from build_endpoint_map.py, we should
# refactor to share the common code
class TemplateDumper(yaml.SafeDumper):
def represent_ordered_dict(self, data):
return self.represent_dict(data.items())
@ -154,9 +166,12 @@ TemplateDumper.add_representer(collections.OrderedDict,
TemplateLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
TemplateLoader.construct_mapping)
def write_template(template, filename=None):
with open(filename, 'w') as f:
yaml.dump(template, f, TemplateDumper, width=120, default_flow_style=False)
yaml.dump(template, f, TemplateDumper, width=120,
default_flow_style=False)
def convert(filename, script_path):
print('Converting %s' % filename)
@ -170,7 +185,6 @@ def convert(filename, script_path):
if (r[1].get('type') == 'OS::Heat::StructuredConfig' and
r[1].get('properties', {}).get('group') == 'os-apply-config' and
r[1].get('properties', {}).get('config', {}).get('os_net_config')):
#print("match %s" % r[0])
new_r = collections.OrderedDict()
new_r['type'] = 'OS::Heat::SoftwareConfig'
new_r['properties'] = collections.OrderedDict()
@ -179,7 +193,8 @@ def convert(filename, script_path):
'properties', {}).get('config', {}).get('os_net_config')
new_config = {'str_replace': collections.OrderedDict()}
new_config['str_replace']['template'] = {'get_file': script_path}
new_config['str_replace']['params'] = {'$network_config': old_net_config}
new_config['str_replace']['params'] = \
{'$network_config': old_net_config}
new_r['properties']['config'] = new_config
tpl['resources'][r[0]] = new_r
else:
@ -195,18 +210,16 @@ def convert(filename, script_path):
od_result['parameters'] = tpl['parameters']
od_result['resources'] = tpl['resources']
od_result['outputs'] = tpl['outputs']
#print('Result:')
#print('%s' % yaml.dump(od_result, Dumper=TemplateDumper, width=120, default_flow_style=False))
#print('---')
write_template(od_result, filename)
return 1
def check_old_style(filename):
with open(filename, 'r') as f:
tpl = yaml.load(open(filename).read())
tpl = yaml.load(f.read())
if isinstance(tpl.get('resources', {}), dict):
for r in (tpl.get('resources', {})).items():
@ -217,6 +230,7 @@ def check_old_style(filename):
return False
opts = parse_opts(sys.argv)
exit_val = 0
num_converted = 0
@ -231,8 +245,8 @@ for base_path in opts.files:
script_paths = [opts.script_dir]
script_paths.append('../../scripts/run-os-net-config.sh')
script_paths.append('../network/scripts/run-os-net-config.sh')
script_paths.append(
'/usr/share/openstack-tripleo-heat-templates/network/scripts/run-os-net-config.sh')
script_paths.append('/usr/share/openstack-tripleo-heat-templates/'
'network/scripts/run-os-net-config.sh')
script_path = None
for p in script_paths:
@ -240,7 +254,8 @@ for base_path in opts.files:
script_path = p
break
if script_path is None:
print("Error couldn't find run-os-net-config.sh relative to filename")
print("Error couldn't find run-os-net-config.sh relative "
"to filename")
sys.exit(1)
print("Using script at %s" % script_path)
@ -248,14 +263,16 @@ for base_path in opts.files:
extension = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
backup_filename = os.path.realpath(base_path) + '.' + extension
print('The yaml file will be overwritten and the original saved as %s'
% backup_filename)
if not (opts.yes or input("Overwrite %s? [y/n] " % base_path).lower() == 'y'):
print('The yaml file will be overwritten and the original saved '
'as %s' % backup_filename)
if not (opts.yes or
input("Overwrite %s? [y/n] " % base_path).lower() == 'y'):
print("Skipping file %s" % base_path)
continue
if os.path.exists(backup_filename):
print("Backup file already exists, skipping file %s" % base_path)
print("Backup file already exists, skipping file %s" %
base_path)
continue
shutil.copyfile(base_path, backup_filename)
@ -264,11 +281,13 @@ for base_path in opts.files:
num_converted += convert(base_path, script_path)
to_normal_yaml(base_path)
else:
print('File %s is not using old style NIC configuration' % base_path)
print('File %s is not using old style NIC configuration' %
base_path)
else:
print('Unexpected argument %s' % base_path)
if num_converted == 0:
exit_val = 1
sys.exit(exit_val)

View File

@ -55,7 +55,8 @@ OPTIONAL_DOCKER_SECTIONS = ['container_puppet_tasks', 'upgrade_tasks',
'post_update_tasks', 'service_config_settings',
'host_prep_tasks', 'metadata_settings',
'kolla_config', 'global_config_settings',
'external_deploy_tasks', 'external_post_deploy_tasks',
'external_deploy_tasks',
'external_post_deploy_tasks',
'container_config_scripts', 'step_config',
'monitoring_subscription', 'scale_tasks',
'external_update_tasks', 'external_upgrade_tasks']
@ -76,9 +77,8 @@ REQUIRED_DOCKER_LOGGING_OUTPUTS = ['config_settings', 'docker_config',
# consistency across files on. This should only contain parameters whose
# definition we cannot change for backwards compatibility reasons. New
# parameters to the templates should not be added to this list.
PARAMETER_DEFINITION_EXCLUSIONS = {'CephPools': ['description',
'type',
'default'],
PARAMETER_DEFINITION_EXCLUSIONS = {
'CephPools': ['description', 'type', 'default'],
'ManagementNetCidr': ['default'],
'ManagementAllocationPools': ['default'],
'ExternalNetCidr': ['default'],
@ -140,8 +140,7 @@ PARAMETER_DEFINITION_EXCLUSIONS = {'CephPools': ['description',
# differently, and I'm not sure if we can
# safely change it.
'ControlPlaneDefaultRoute': ['default'],
# TODO(bnemec): Address these existing
# inconsistencies.
# TODO(bnemec): Address these existing inconsistencies.
'ServiceNetMap': ['description', 'default'],
'network': ['default'],
'ControlPlaneIP': ['default',
@ -166,26 +165,19 @@ PARAMETER_DEFINITION_EXCLUSIONS = {'CephPools': ['description',
# tenant networks have a limited support
# in OVN. Till that is fixed, we restrict
# NeutronNetworkType to 'geneve'.
'NeutronNetworkType': ['description',
'default',
'constraints'],
'NeutronNetworkType': ['description', 'default', 'constraints'],
'KeyName': ['constraints'],
'OVNSouthboundServerPort': ['description'],
'ExternalInterfaceDefaultRoute':
['description', 'default'],
'ManagementInterfaceDefaultRoute':
['description', 'default'],
'ExternalInterfaceDefaultRoute': ['description', 'default'],
'ManagementInterfaceDefaultRoute': ['description', 'default'],
'IPPool': ['description'],
'SSLCertificate': ['description',
'default',
'hidden'],
'SSLCertificate': ['description', 'default', 'hidden'],
'NodeIndex': ['description'],
'name': ['description', 'default'],
'image': ['description', 'default'],
'NeutronBigswitchAgentEnabled': ['default'],
'EndpointMap': ['description', 'default'],
'ContainerManilaConfigImage': ['description',
'default'],
'ContainerManilaConfigImage': ['description', 'default'],
'replacement_policy': ['default'],
'CloudDomain': ['description', 'default'],
'EnableLoadBalancer': ['description'],
@ -270,6 +262,7 @@ HEAT_OUTPUTS_EXCLUSIONS = [
'./extraconfig/pre_network/host_config_and_reboot.yaml'
]
def exit_usage():
print('Usage %s <yaml file or directory>' % sys.argv[0])
sys.exit(1)
@ -364,8 +357,8 @@ def validate_controller_dashboard(filename, tpl):
def validate_hci_role(hci_role_filename, hci_role_tpl):
role_files = ['HciCephAll', 'HciCephFile', 'HciCephMon', 'HciCephObject']
if hci_role_filename in ['./roles/' + x + '.yaml' for x in role_files]:
compute_role_filename = os.path.join(os.path.dirname(hci_role_filename),
'./Compute.yaml')
compute_role_filename = \
os.path.join(os.path.dirname(hci_role_filename), './Compute.yaml')
compute_role_tpl = yaml.load(open(compute_role_filename).read())
compute_role_services = compute_role_tpl[0]['ServicesDefault']
for role in hci_role_tpl:
@ -397,11 +390,12 @@ def validate_hci_role(hci_role_filename, hci_role_tpl):
return 1
return 0
def validate_ceph_role(ceph_role_filename, ceph_role_tpl):
role_files = ['CephAll', 'CephFile', 'CephMon', 'CephObject']
if ceph_role_filename in ['./roles/' + x + '.yaml' for x in role_files]:
ceph_storage_role_filename = os.path.join(os.path.dirname(ceph_role_filename),
'./CephStorage.yaml')
ceph_storage_role_filename = \
os.path.join(os.path.dirname(ceph_role_filename), './CephStorage.yaml')
ceph_storage_role_tpl = yaml.load(open(ceph_storage_role_filename).read())
ceph_storage_role_services = ceph_storage_role_tpl[0]['ServicesDefault']
for role in ceph_role_tpl:
@ -427,6 +421,7 @@ def validate_ceph_role(ceph_role_filename, ceph_role_tpl):
return 1
return 0
def validate_controller_no_ceph_role(filename, tpl):
control_role_filename = os.path.join(os.path.dirname(filename),
'./Controller.yaml')
@ -448,6 +443,7 @@ def validate_controller_no_ceph_role(filename, tpl):
return 1
return 0
def validate_with_compute_role_services(role_filename, role_tpl, exclude_service=()):
cmpt_filename = os.path.join(os.path.dirname(role_filename),
'./Compute.yaml')
@ -484,6 +480,7 @@ def validate_with_compute_role_services(role_filename, role_tpl, exclude_service
return 0
def validate_multiarch_compute_roles(role_filename, role_tpl):
errors = 0
roles_dir = os.path.dirname(role_filename)
@ -677,8 +674,9 @@ def validate_docker_service(filename, tpl):
return 1
config_volume = puppet_config.get('config_volume')
expected_config_image_parameter = "Container%sConfigImage" % to_camel_case(config_volume)
if config_volume and not expected_config_image_parameter in tpl.get('parameters', []):
expected_config_image_parameter = \
"Container%sConfigImage" % to_camel_case(config_volume)
if config_volume and expected_config_image_parameter not in tpl.get('parameters', []):
print('ERROR: Missing %s heat parameter for %s config_volume.'
% (expected_config_image_parameter, config_volume))
return 1
@ -699,7 +697,8 @@ def validate_docker_service(filename, tpl):
command = ' '.join(map(str, command))
if 'bootstrap_host_exec' in command \
and container.get('user') != 'root':
print('ERROR: bootstrap_host_exec needs to run as the root user.')
print('ERROR: bootstrap_host_exec needs to run '
'as the root user.')
return 1
if 'upgrade_tasks' in role_data and role_data['upgrade_tasks']:
@ -871,6 +870,7 @@ def _rsearch_keys(d, pattern, search_keynames=False, enter_lists=False):
result = []
return _rsearch_keys_nested(d, pattern, search_keynames, enter_lists)
def _get(d, path):
"""Get a value (or None) from a dict by path given as a list
@ -885,6 +885,7 @@ def _get(d, path):
return None
return d
def validate_service_hiera_interpol(f, tpl):
"""Validate service templates for hiera interpolation rules
@ -979,6 +980,7 @@ def validate_service_hiera_interpol(f, tpl):
else:
return 0
def validate_upgrade_tasks_duplicate_whens(filename):
"""Take a heat template and starting at the upgrade_tasks
try to detect duplicate 'when:' statements
@ -1188,6 +1190,7 @@ def validate(filename, param_map):
return retval
def validate_upgrade_tasks(upgrade_tasks):
# some templates define its upgrade_tasks via list_concat
if isinstance(upgrade_tasks, dict):
@ -1200,15 +1203,20 @@ def validate_upgrade_tasks(upgrade_tasks):
task_name = task.get("name", "")
whenline = task.get("when", "")
if (type(whenline) == list):
if any('step|int ' in condition for condition in whenline) and ('step|int == ' not in whenline[0]):
print('ERROR: \'step|int ==\' condition should be evaluated first in when conditions for task (%s)' % (task))
if any('step|int ' in condition for condition in whenline) \
and ('step|int == ' not in whenline[0]):
print('ERROR: \'step|int ==\' condition should be evaluated '
'first in when conditions for task (%s)' % (task))
return 1
else:
if (' and ' in whenline) and (' or ' not in whenline) \
and args.quiet < 2:
print("Warning: Consider specifying \'and\' conditions as a list to improve readability in task: \"%s\"" % (task_name))
print("Warning: Consider specifying \'and\' conditions as "
"a list to improve readability in task: \"%s\""
% (task_name))
return 0
def validate_network_data_file(data_file_path):
try:
data_file = yaml.load(open(data_file_path).read())
@ -1227,6 +1235,7 @@ def validate_network_data_file(data_file_path):
return 1
return 0
def validate_nic_config_file(filename, tpl):
try:
if isinstance(tpl.get('resources', {}), dict):
@ -1243,6 +1252,7 @@ def validate_nic_config_file(filename, tpl):
return 1
return 0
def parse_args():
p = argparse.ArgumentParser()
@ -1256,6 +1266,7 @@ def parse_args():
return p.parse_args()
args = parse_args()
path_args = args.path_args
quiet = args.quiet
@ -1273,9 +1284,9 @@ for base_path in path_args:
for f in files:
file_path = os.path.join(subdir, f)
if 'environments/services-docker' in file_path:
print("ERROR: environments/services-docker should not be used "
"any more, use environments/services instead: %s " %
file_path)
print("ERROR: environments/services-docker should not be "
"used any more, use environments/services instead: "
"%s " % file_path)
failed_files.append(file_path)
exit_val |= 1

29
tox.ini
View File

@ -26,12 +26,35 @@ commands =
python ./tools/yaml-validate.py .
bash -c ./tools/roles-data-validation.sh
bash -c ./tools/check-up-to-date.sh
flake8 ./container_config_scripts/
flake8 --exclude releasenotes --ignore E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405,W503,W504,E501,E731,W605
[testenv:flake8]
basepython = python3
commands =
flake8 ./container_config_scripts/
# E125 is deliberately excluded. See
# https://github.com/jcrocholl/pep8/issues/126. It's just wrong.
#
# Most of the whitespace related rules (E12* and E131) are excluded
# because while they are often useful guidelines, strict adherence to
# them ends up causing some really odd code formatting and forced
# extra line breaks. Updating code to enforce these will be a hard sell.
#
# H405 is another one that is good as a guideline, but sometimes
# multiline doc strings just don't have a natural summary
# line. Rejecting code for this reason is wrong.
#
# E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301
#
# The following two are also ignored that we don't think it is useful.
# W503 line break before binary operator
# W504 line break after binary operator
#
# The following rules are currently ignored, but will be enforced
# in the future
# E501 line too long
# E731 do not assign a lambda expression, use a def
# W605 invalid escape sequence
ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405,W503,W504,E501,E731,W605
exclude = releasenotes
[testenv:templates]
basepython = python3

View File

@ -762,4 +762,5 @@ resource_registry:
self.nested_output)
self.assertEqual(expected, f.read())
GeneratorTestCase.generate_scenarios()

View File

@ -12,15 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import io
import tempfile
import mock
from oslotest import base
import six
import testscenarios
import yaml
import yaql
class YAQLTestCase(base.BaseTestCase):
@ -32,4 +25,3 @@ class YAQLTestCase(base.BaseTestCase):
for i in path.split('.'):
data = data[i]
return data['yaql']['expression']