Enforce pep8/pyflakes rule on python codes

This change makes sure that we apply pyflake8 checks on all python
codes to improve its readability.

Note that there are some rules applied for other OpenStack projects,
but not yet turned on, which should be enabled in the future.

Change-Id: Iaf0299983d3a3fe48e3beb8f47bd33c21deb4972
This commit is contained in:
Takashi Kajinami 2019-08-29 20:49:40 +09:00
parent 9b88629d63
commit f47dfe1059
17 changed files with 412 additions and 321 deletions

View File

@ -1,32 +1,32 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Shell script tool to run puppet inside of the given container image.
# Uses the config file at /var/lib/container-puppet/container-puppet.json as a source for a JSON
# array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings
# Uses the config file at /var/lib/container-puppet/container-puppet.json
# as a source for a JSON array of
# [config_volume, puppet_tags, manifest, config_image, [volumes]] settings
# that can be used to generate config files or run ad-hoc puppet modules
# inside of a container.
import glob
import json
import logging
import multiprocessing
import os
import subprocess
import sys
import tempfile
import time
import multiprocessing
from paunch import runner as containers_runner
@ -45,7 +45,7 @@ def get_logger():
if logger is None:
logger = logging.getLogger()
ch = logging.StreamHandler(sys.stdout)
if os.environ.get('DEBUG') in ['True', 'true'] :
if os.environ.get('DEBUG') in ['True', 'true']:
logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
else:
@ -93,6 +93,7 @@ if (os.environ.get('MOUNT_HOST_PUPPET', 'true') == 'true' and
PUPPETS not in cli_dcmd):
cli_dcmd.extend(['--volume', PUPPETS])
# this is to match what we do in deployed-server
def short_hostname():
subproc = subprocess.Popen(['hostname', '-s'],
@ -188,6 +189,7 @@ def rm_container(name):
'No such container: {}\n'.format(name):
log.debug(cmd_stderr)
process_count = int(os.environ.get('PROCESS_COUNT',
multiprocessing.cpu_count()))
config_file = os.environ.get('CONFIG', '/var/lib/container-puppet/container-puppet.json')
@ -403,12 +405,11 @@ if not os.path.exists(sh_script):
""")
def mp_puppet_config(*args):
(config_volume,puppet_tags,manifest,config_image,volumes,privileged,check_mode) = args[0]
(config_volume, puppet_tags, manifest, config_image, volumes, privileged, check_mode) = args[0]
log = get_logger()
log.info('Starting configuration of %s using image %s' % (config_volume,
config_image))
log.info('Starting configuration of %s using image %s' %
(config_volume, config_image))
log.debug('config_volume %s' % config_volume)
log.debug('puppet_tags %s' % puppet_tags)
log.debug('manifest %s' % manifest)
@ -466,7 +467,6 @@ def mp_puppet_config(*args):
for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()):
env[k] = os.environ.get(k)
common_dcmd += cli_dcmd
if check_mode:
@ -483,10 +483,10 @@ def mp_puppet_config(*args):
if os.environ.get('NET_HOST', 'false') == 'true':
log.debug('NET_HOST enabled')
common_dcmd.extend(['--net', 'host', '--volume',
'/etc/hosts:/etc/hosts:ro'])
'/etc/hosts:/etc/hosts:ro'])
else:
log.debug('running without containers Networking')
dcmd.extend(['--net', 'none'])
common_dcmd.extend(['--net', 'none'])
# script injection as the last mount to make sure it's accessible
# https://github.com/containers/libpod/issues/1844
@ -512,7 +512,7 @@ def mp_puppet_config(*args):
retval = subproc.returncode
# puppet with --detailed-exitcodes will return 0 for success and no changes
# and 2 for success and resource changes. Other numbers are failures
if retval in [0,2]:
if retval in [0, 2]:
if cmd_stdout:
log.debug('%s run succeeded: %s' % (cmd, cmd_stdout))
if cmd_stderr:
@ -534,6 +534,7 @@ def mp_puppet_config(*args):
log.info('Finished processing puppet configs for %s' % (config_volume))
return retval
# Holds all the information for each process to consume.
# Instead of starting them all linearly we run them using a process
# pool. This creates a list of arguments for the above function
@ -608,4 +609,3 @@ for infile in infiles:
if not success:
sys.exit(1)

View File

@ -74,7 +74,7 @@ class PathManager(object):
try:
os.chown(self.path, target_uid, target_gid)
self._update()
except Exception as e:
except Exception:
LOG.exception('Could not change ownership of %s: ',
self.path)
else:
@ -172,5 +172,6 @@ class NovaStatedirOwnershipManager(object):
LOG.info('Nova statedir ownership complete')
if __name__ == '__main__':
NovaStatedirOwnershipManager('/var/lib/nova').run()

View File

@ -68,7 +68,7 @@ if __name__ == '__main__':
if os.path.isfile(nova_cfg):
try:
config.read(nova_cfg)
except Exception as e:
except Exception:
LOG.exception('Error while reading nova.conf:')
else:
LOG.error('Nova configuration file %s does not exist', nova_cfg)
@ -107,7 +107,7 @@ if __name__ == '__main__':
LOG.info('Nova-compute service registered')
sys.exit(0)
LOG.info('Waiting for nova-compute service to register')
except Exception as e:
except Exception:
LOG.exception(
'Error while waiting for nova-compute service to register')
time.sleep(timeout)

View File

@ -62,7 +62,7 @@ if __name__ == '__main__':
if os.path.isfile(nova_cfg):
try:
config.read(nova_cfg)
except Exception as e:
except Exception:
LOG.exception('Error while reading nova.conf:')
else:
LOG.error('Nova configuration file %s does not exist', nova_cfg)
@ -75,7 +75,7 @@ if __name__ == '__main__':
password=config.get('placement', 'password'),
project_name=config.get('placement', 'project_name'),
project_domain_name=config.get('placement', 'user_domain_name'),
auth_url=config.get('placement', 'auth_url')+'/v3')
auth_url=config.get('placement', 'auth_url') + '/v3')
sess = session.Session(auth=auth, verify=False)
keystone = client.Client(session=sess, interface='internal')
@ -97,7 +97,7 @@ if __name__ == '__main__':
LOG.error('Failed to get placement service endpoint!')
else:
break
except Exception as e:
except Exception:
LOG.exception('Retry - Failed to get placement service endpoint:')
time.sleep(timeout)
@ -113,7 +113,7 @@ if __name__ == '__main__':
while iterations > 1:
iterations -= 1
try:
r = requests.get(placement_endpoint_url+'/', verify=False)
r = requests.get(placement_endpoint_url + '/', verify=False)
if r.status_code == 200 and response_reg.match(r.text):
LOG.info('Placement service up! - %s', r.text)
sys.exit(0)
@ -123,7 +123,7 @@ if __name__ == '__main__':
LOG.info('Placement service not up - %s, %s',
r.status_code,
r.text)
except Exception as e:
except Exception:
LOG.exception('Error query the placement endpoint:')
time.sleep(timeout)

View File

@ -192,8 +192,8 @@ class PathManagerCase(base.BaseTestCase):
with fake_testtree(testtree):
pathinfo = PathManager('/var/lib/nova/instances/foo/baz')
self.assertTrue(pathinfo.has_owner(current_uid, current_gid))
pathinfo.chown(current_uid+1, current_gid)
assert_ids(testtree, pathinfo.path, current_uid+1, current_gid)
pathinfo.chown(current_uid + 1, current_gid)
assert_ids(testtree, pathinfo.path, current_uid + 1, current_gid)
def test_chgrp(self):
testtree = generate_testtree1(current_uid, current_gid)
@ -201,8 +201,8 @@ class PathManagerCase(base.BaseTestCase):
with fake_testtree(testtree):
pathinfo = PathManager('/var/lib/nova/instances/foo/baz')
self.assertTrue(pathinfo.has_owner(current_uid, current_gid))
pathinfo.chown(current_uid, current_gid+1)
assert_ids(testtree, pathinfo.path, current_uid, current_gid+1)
pathinfo.chown(current_uid, current_gid + 1)
assert_ids(testtree, pathinfo.path, current_uid, current_gid + 1)
def test_chown_chgrp(self):
testtree = generate_testtree1(current_uid, current_gid)
@ -210,8 +210,9 @@ class PathManagerCase(base.BaseTestCase):
with fake_testtree(testtree):
pathinfo = PathManager('/var/lib/nova/instances/foo/baz')
self.assertTrue(pathinfo.has_owner(current_uid, current_gid))
pathinfo.chown(current_uid+1, current_gid+1)
assert_ids(testtree, pathinfo.path, current_uid+1, current_gid+1)
pathinfo.chown(current_uid + 1, current_gid + 1)
assert_ids(testtree, pathinfo.path,
current_uid + 1, current_gid + 1)
class NovaStatedirOwnershipManagerTestCase(base.BaseTestCase):

View File

@ -1,9 +1,19 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import netaddr
import os
import openstack
import os
import subprocess
CTLPLANE_NETWORK_NAME = 'ctlplane'
@ -61,12 +71,15 @@ def _ensure_neutron_network(sdk):
return network
def _get_nameservers_for_version(servers, ipversion):
"""Get list of nameservers for an IP version"""
return [s for s in servers if netaddr.IPAddress(s).version == ipversion]
def _neutron_subnet_create(sdk, network_id, cidr, gateway, host_routes,
allocation_pools, name, segment_id, dns_nameservers):
allocation_pools, name, segment_id,
dns_nameservers):
try:
if netaddr.IPNetwork(cidr).version == 6:
subnet = sdk.network.create_subnet(
@ -137,6 +150,7 @@ def _neutron_add_subnet_segment_association(sdk, subnet_id, segment_id):
print('ERROR: Associationg segment with subnet %s failed.' % subnet_id)
raise
def _neutron_segment_create(sdk, name, network_id, phynet):
try:
segment = sdk.network.create_segment(
@ -145,7 +159,7 @@ def _neutron_segment_create(sdk, name, network_id, phynet):
physical_network=phynet,
network_type='flat')
print('INFO: Neutron Segment created %s' % segment)
except Exception as ex:
except Exception:
print('ERROR: Neutron Segment %s create failed.' % name)
raise
@ -173,7 +187,7 @@ def _ensure_neutron_router(sdk, name, subnet_id):
def _get_subnet(sdk, cidr, network_id):
try:
subnet = list(sdk.network.subnets(cidr=cidr, network_id=network_id))
except Exception as ex:
except Exception:
print('ERROR: Get subnet with cidr %s failed.' % cidr)
raise
@ -206,12 +220,13 @@ def _local_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs):
"""Create's and updates the ctlplane subnet on the segment that is local to
the underclud.
"""
s = CONF['subnets'][CONF['local_subnet']]
name = CONF['local_subnet']
subnet = _get_subnet(sdk, s['NetworkCidr'], ctlplane_id)
segment = _get_segment(sdk, CONF['physical_network'], ctlplane_id)
if subnet:
if CONF['enable_routed_networks'] and subnet.segment_id == None:
if CONF['enable_routed_networks'] and subnet.segment_id is None:
# The subnet exists and does not have a segment association. Since
# routed networks is enabled in the configuration, we need to
# migrate the existing non-routed networks subnet to a routed
@ -239,10 +254,12 @@ def _local_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs):
return net_cidrs
def _remote_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs):
"""Create's and updates the ctlplane subnet(s) on segments that is
not local to the undercloud.
"""
for name in CONF['subnets']:
s = CONF['subnets'][name]
if name == CONF['local_subnet']:
@ -274,6 +291,7 @@ def _remote_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs):
return net_cidrs
if 'true' not in _run_command(['hiera', 'neutron_api_enabled'],
name='hiera').lower():
print('WARNING: UndercloudCtlplaneNetworkDeployment : The Neutron API '

View File

@ -1,13 +1,24 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import openstack
import os
import subprocess
from keystoneauth1 import exceptions as ks_exceptions
from mistralclient.api import client as mistralclient
from mistralclient.api import base as mistralclient_exc
from mistralclient.api import client as mistralclient
CONF = json.loads(os.environ['config'])
@ -45,7 +56,7 @@ def _run_command(args, env=None, name=None):
def _configure_nova(sdk):
""" Disable nova quotas """
"""Disable nova quotas"""
sdk.set_compute_quotas('admin', cores='-1', instances='-1', ram='-1')
# Configure flavors.
@ -74,7 +85,7 @@ def _configure_nova(sdk):
def _create_default_keypair(sdk):
""" Set up a default keypair. """
"""Set up a default keypair."""
ssh_dir = os.path.join(CONF['home_dir'], '.ssh')
public_key_file = os.path.join(ssh_dir, 'id_rsa.pub')
if (not [True for kp in sdk.compute.keypairs() if kp.name == 'default'] and
@ -105,7 +116,7 @@ def _configure_workbooks_and_workflows(mistral):
def _store_passwords_in_mistral_env(mistral):
""" Store required passwords in a mistral environment """
"""Store required passwords in a mistral environment"""
env_name = 'tripleo.undercloud-config'
config_data = {
'undercloud_ceilometer_snmpd_password':
@ -153,7 +164,7 @@ def _create_default_plan(mistral):
nova_api_enabled = 'true' in _run_command(
['hiera', 'nova_api_enabled']).lower()
mistral_api_enabled = 'true' in _run_command(
['hiera','mistral_api_enabled']).lower()
['hiera', 'mistral_api_enabled']).lower()
tripleo_validations_enabled = 'true' in _run_command(
['hiera', 'tripleo_validations_enabled']).lower()

View File

@ -1,4 +1,15 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Generate the endpoint_map.yaml template from data in the endpoint_data.yaml
@ -13,12 +24,6 @@ mismatch is detected.
"""
from __future__ import print_function
__all__ = ['load_endpoint_data', 'generate_endpoint_map_template',
'write_template', 'build_endpoint_map', 'check_up_to_date']
import collections
import copy
import itertools
@ -27,6 +32,9 @@ import sys
import yaml
__all__ = ['load_endpoint_data', 'generate_endpoint_map_template',
'write_template', 'build_endpoint_map', 'check_up_to_date']
(IN_FILE, OUT_FILE) = ('endpoint_data.yaml', 'endpoint_map.yaml')
SUBST = (SUBST_IP_ADDRESS, SUBST_CLOUDNAME) = ('IP_ADDRESS', 'CLOUDNAME')
@ -88,7 +96,8 @@ def make_parameter(ptype, default, description=None):
def template_parameters(config):
params = collections.OrderedDict()
params[PARAM_NETIPMAP] = make_parameter('json', {}, 'The Net IP map')
params[PARAM_SERVICENETMAP] = make_parameter('json', {}, 'The Service Net map')
params[PARAM_SERVICENETMAP] = make_parameter('json', {},
'The Service Net map')
params[PARAM_ENDPOINTMAP] = make_parameter('json',
endpoint_map_default(config),
'Mapping of service endpoint '

View File

@ -1,15 +1,15 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import collections
@ -17,6 +17,7 @@ import datetime
import os
import re
import shutil
import six
import subprocess
import sys
import yaml
@ -62,7 +63,7 @@ def parse_opts(argv):
# FIXME: This duplicates code from tools/yaml-nic-config-2-script.py, we should
# refactor to share the common code
def to_commented_yaml(filename):
""" Convert comments into 'comments<num>: ...' YAML """
"""Convert comments into 'comments<num>: ...' YAML"""
out_str = ''
last_non_comment_spaces = ''
@ -108,7 +109,7 @@ def to_commented_yaml(filename):
# FIXME: This duplicates code from tools/yaml-nic-config-2-script.py, we should
# refactor to share the common code
def to_normal_yaml(filename):
""" Convert back to normal #commented YAML"""
"""Convert back to normal #commented YAML"""
with open(filename, 'r') as f:
data = f.read()
@ -168,14 +169,10 @@ class TemplateLoader(yaml.SafeLoader):
return collections.OrderedDict(self.construct_pairs(node))
if sys.version_info.major >= 3:
TemplateDumper.add_representer(str, TemplateDumper.description_presenter)
TemplateDumper.add_representer(bytes,
TemplateDumper.description_presenter)
else:
TemplateDumper.add_representer(str, TemplateDumper.description_presenter)
TemplateDumper.add_representer(unicode,
TemplateDumper.description_presenter)
TemplateDumper.add_representer(six.text_type,
TemplateDumper.description_presenter)
TemplateDumper.add_representer(six.binary_type,
TemplateDumper.description_presenter)
TemplateDumper.add_representer(collections.OrderedDict,
TemplateDumper.represent_ordered_dict)
@ -215,9 +212,10 @@ def process_templates_and_get_reference_parameters():
for x in roles_data
if x['name'] == OPTS.role_name))
except StopIteration:
raise RuntimeError('The role: {role_name} is not defined in roles '
'data file: {roles_data_file}'.format(
role_name=OPTS.role_name, roles_data_file=OPTS.roles_data))
raise RuntimeError(
'The role: {role_name} is not defined in roles '
'data file: {roles_data_file}'.format(
role_name=OPTS.role_name, roles_data_file=OPTS.roles_data))
refernce_file = '/'.join([temp_dir, 'network/config', NIC_CONFIG_REFERENCE,
nic_config_name])

View File

@ -1,15 +1,15 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import jinja2
@ -38,6 +38,7 @@ def _shutil_copy_if_not_same(src, dst):
else:
raise
def parse_opts(argv):
parser = argparse.ArgumentParser(
description='Configure host network interfaces using a JSON'
@ -49,7 +50,8 @@ def parse_opts(argv):
help="""relative path to the roles_data.yaml file.""",
default='roles_data.yaml')
parser.add_argument('-n', '--network-data', metavar='NETWORK_DATA',
help="""relative path to the network_data.yaml file.""",
help=("""relative path to the network_data.yaml """
"""file."""),
default='network_data.yaml')
parser.add_argument('--safe',
action='store_true',
@ -86,7 +88,8 @@ def _j2_render_to_file(j2_template, j2_data, outfile_name=None,
# Search for templates relative to the current template path first
template_base = os.path.dirname(yaml_f)
j2_loader = jinja2.loaders.FileSystemLoader([template_base, __tht_root_dir])
j2_loader = \
jinja2.loaders.FileSystemLoader([template_base, __tht_root_dir])
try:
# Render the j2 template
@ -102,6 +105,7 @@ def _j2_render_to_file(j2_template, j2_data, outfile_name=None,
with open(outfile_name, 'w') as out_f:
out_f.write(r_template)
def process_templates(template_path, role_data_path, output_dir,
network_data_path, overwrite, dry_run):
@ -163,9 +167,9 @@ def process_templates(template_path, role_data_path, output_dir,
out_dir = subdir
if output_dir:
if template_path != '.':
# strip out base path if not default
temp = out_dir.split(template_path)[1]
out_dir = temp[1:] if temp.startswith('/') else temp
# strip out base path if not default
temp = out_dir.split(template_path)[1]
out_dir = temp[1:] if temp.startswith('/') else temp
out_dir = os.path.join(output_dir, out_dir)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
@ -255,7 +259,8 @@ def process_templates(template_path, role_data_path, output_dir,
template_data = j2_template.read()
j2_data = {'roles': role_data,
'networks': network_data}
out_f = os.path.basename(f).replace('.j2.yaml', '.yaml')
out_f = os.path.basename(f).replace('.j2.yaml',
'.yaml')
out_f_path = os.path.join(out_dir, out_f)
_j2_render_to_file(template_data, j2_data, out_f_path,
overwrite, dry_run)
@ -265,6 +270,7 @@ def process_templates(template_path, role_data_path, output_dir,
else:
print('Unexpected argument %s' % template_path)
def clean_templates(base_path, role_data_path, network_data_path):
def delete(f):

View File

@ -42,6 +42,7 @@ def parse_opts(argv):
return opts
opts = parse_opts(sys.argv)
roles = collections.OrderedDict.fromkeys(opts.roles)

View File

@ -1,15 +1,15 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import yaml

View File

@ -1,32 +1,32 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import collections
import copy
import datetime
import os
import re
import shutil
import six
import sys
import traceback
import yaml
import six
import re
def parse_opts(argv):
parser = argparse.ArgumentParser(
description='Convert an old style NIC config file into the new format using '
'run-os-net-config.sh')
description='Convert an old style NIC config file into the new format '
'using run-os-net-config.sh')
parser.add_argument('--script-dir', metavar='<script directory>',
help="Relative path to run-os-net-config.sh",
default="network/scripts/run-os-net-config.sh")
@ -42,8 +42,9 @@ def parse_opts(argv):
return opts
#convert comments into 'comments<num>: ...' YAML
def to_commented_yaml(filename):
"""Convert comments into 'comments<num>: ...' YAML"""
out_str = ''
last_non_comment_spaces = ''
with open(filename, 'r') as f:
@ -51,38 +52,43 @@ def to_commented_yaml(filename):
for line in f:
# skip blank line
if line.isspace():
continue;
continue
char_count = 0
spaces = ''
for char in line:
char_count += 1
if char == ' ':
spaces+=' '
next;
spaces += ' '
next
elif char == '#':
last_non_comment_spaces = spaces
comment_count += 1
comment = line[char_count:-1]
out_str += "%scomment%i_%i: '%s'\n" % (last_non_comment_spaces, comment_count, len(spaces), comment)
break;
out_str += "%scomment%i_%i: '%s'\n" % \
(last_non_comment_spaces, comment_count, len(spaces),
comment)
break
else:
last_non_comment_spaces = spaces
out_str += line
#inline comments check
# inline comments check
m = re.match(".*:.*#(.*)", line)
if m:
comment_count += 1
out_str += "%s inline_comment%i: '%s'\n" % (last_non_comment_spaces, comment_count, m.group(1))
break;
out_str += "%s inline_comment%i: '%s'\n" % \
(last_non_comment_spaces, comment_count,
m.group(1))
break
with open(filename, 'w') as f:
f.write(out_str)
return out_str
#convert back to normal #commented YAML
def to_normal_yaml(filename):
"""Convert back to normal #commented YAML"""
with open(filename, 'r') as f:
data = f.read()
@ -92,8 +98,12 @@ def to_normal_yaml(filename):
for line in data.split('\n'):
# get_input not supported by run-os-net-config.sh script
line = line.replace('get_input: ', '')
m = re.match(" +comment[0-9]+_([0-9]+): '(.*)'.*", line) #normal comments
i = re.match(" +inline_comment[0-9]+: '(.*)'.*", line) #inline comments
# normal comments
m = re.match(" +comment[0-9]+_([0-9]+): '(.*)'.*", line)
# inline comments
i = re.match(" +inline_comment[0-9]+: '(.*)'.*", line)
if m:
if next_line_break:
out_str += '\n'
@ -122,9 +132,11 @@ def to_normal_yaml(filename):
class description(six.text_type):
pass
# FIXME: Some of this duplicates code from build_endpoint_map.py, we should
# refactor to share the common code
class TemplateDumper(yaml.SafeDumper):
def represent_ordered_dict(self, data):
return self.represent_dict(data.items())
@ -154,9 +166,12 @@ TemplateDumper.add_representer(collections.OrderedDict,
TemplateLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
TemplateLoader.construct_mapping)
def write_template(template, filename=None):
with open(filename, 'w') as f:
yaml.dump(template, f, TemplateDumper, width=120, default_flow_style=False)
yaml.dump(template, f, TemplateDumper, width=120,
default_flow_style=False)
def convert(filename, script_path):
print('Converting %s' % filename)
@ -170,7 +185,6 @@ def convert(filename, script_path):
if (r[1].get('type') == 'OS::Heat::StructuredConfig' and
r[1].get('properties', {}).get('group') == 'os-apply-config' and
r[1].get('properties', {}).get('config', {}).get('os_net_config')):
#print("match %s" % r[0])
new_r = collections.OrderedDict()
new_r['type'] = 'OS::Heat::SoftwareConfig'
new_r['properties'] = collections.OrderedDict()
@ -179,7 +193,8 @@ def convert(filename, script_path):
'properties', {}).get('config', {}).get('os_net_config')
new_config = {'str_replace': collections.OrderedDict()}
new_config['str_replace']['template'] = {'get_file': script_path}
new_config['str_replace']['params'] = {'$network_config': old_net_config}
new_config['str_replace']['params'] = \
{'$network_config': old_net_config}
new_r['properties']['config'] = new_config
tpl['resources'][r[0]] = new_r
else:
@ -195,18 +210,16 @@ def convert(filename, script_path):
od_result['parameters'] = tpl['parameters']
od_result['resources'] = tpl['resources']
od_result['outputs'] = tpl['outputs']
#print('Result:')
#print('%s' % yaml.dump(od_result, Dumper=TemplateDumper, width=120, default_flow_style=False))
#print('---')
write_template(od_result, filename)
return 1
def check_old_style(filename):
with open(filename, 'r') as f:
tpl = yaml.load(open(filename).read())
tpl = yaml.load(f.read())
if isinstance(tpl.get('resources', {}), dict):
for r in (tpl.get('resources', {})).items():
@ -217,6 +230,7 @@ def check_old_style(filename):
return False
opts = parse_opts(sys.argv)
exit_val = 0
num_converted = 0
@ -231,8 +245,8 @@ for base_path in opts.files:
script_paths = [opts.script_dir]
script_paths.append('../../scripts/run-os-net-config.sh')
script_paths.append('../network/scripts/run-os-net-config.sh')
script_paths.append(
'/usr/share/openstack-tripleo-heat-templates/network/scripts/run-os-net-config.sh')
script_paths.append('/usr/share/openstack-tripleo-heat-templates/'
'network/scripts/run-os-net-config.sh')
script_path = None
for p in script_paths:
@ -240,7 +254,8 @@ for base_path in opts.files:
script_path = p
break
if script_path is None:
print("Error couldn't find run-os-net-config.sh relative to filename")
print("Error couldn't find run-os-net-config.sh relative "
"to filename")
sys.exit(1)
print("Using script at %s" % script_path)
@ -248,14 +263,16 @@ for base_path in opts.files:
extension = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
backup_filename = os.path.realpath(base_path) + '.' + extension
print('The yaml file will be overwritten and the original saved as %s'
% backup_filename)
if not (opts.yes or input("Overwrite %s? [y/n] " % base_path).lower() == 'y'):
print('The yaml file will be overwritten and the original saved '
'as %s' % backup_filename)
if not (opts.yes or
input("Overwrite %s? [y/n] " % base_path).lower() == 'y'):
print("Skipping file %s" % base_path)
continue
if os.path.exists(backup_filename):
print("Backup file already exists, skipping file %s" % base_path)
print("Backup file already exists, skipping file %s" %
base_path)
continue
shutil.copyfile(base_path, backup_filename)
@ -264,11 +281,13 @@ for base_path in opts.files:
num_converted += convert(base_path, script_path)
to_normal_yaml(base_path)
else:
print('File %s is not using old style NIC configuration' % base_path)
print('File %s is not using old style NIC configuration' %
base_path)
else:
print('Unexpected argument %s' % base_path)
if num_converted == 0:
exit_val = 1
exit_val = 1
sys.exit(exit_val)

View File

@ -1,15 +1,15 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
@ -51,11 +51,12 @@ OPTIONAL_DOCKER_SECTIONS = ['container_puppet_tasks', 'upgrade_tasks',
'pre_upgrade_rolling_tasks',
'fast_forward_upgrade_tasks',
'fast_forward_post_upgrade_tasks',
'post_upgrade_tasks', 'update_tasks',
'post_upgrade_tasks', 'update_tasks',
'post_update_tasks', 'service_config_settings',
'host_prep_tasks', 'metadata_settings',
'kolla_config', 'global_config_settings',
'external_deploy_tasks', 'external_post_deploy_tasks',
'external_deploy_tasks',
'external_post_deploy_tasks',
'container_config_scripts', 'step_config',
'monitoring_subscription', 'scale_tasks',
'external_update_tasks', 'external_upgrade_tasks']
@ -63,139 +64,130 @@ OPTIONAL_DOCKER_SECTIONS = ['container_puppet_tasks', 'upgrade_tasks',
ANSIBLE_TASKS_SECTIONS = ['upgrade_tasks', 'pre_upgrade_rolling_tasks',
'fast_forward_upgrade_tasks',
'fast_forward_post_upgrade_tasks',
'post_upgrade_tasks', 'update_tasks',
'post_upgrade_tasks', 'update_tasks',
'post_update_tasks', 'host_prep_tasks',
'external_deploy_tasks',
'external_post_deploy_tasks' ]
'external_post_deploy_tasks']
REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
'config_image']
OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags', 'volumes' ]
OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = ['puppet_tags', 'volumes']
REQUIRED_DOCKER_LOGGING_OUTPUTS = ['config_settings', 'docker_config',
'volumes', 'host_prep_tasks']
# Mapping of parameter names to a list of the fields we should _not_ enforce
# consistency across files on. This should only contain parameters whose
# definition we cannot change for backwards compatibility reasons. New
# parameters to the templates should not be added to this list.
PARAMETER_DEFINITION_EXCLUSIONS = {'CephPools': ['description',
'type',
'default'],
'ManagementNetCidr': ['default'],
'ManagementAllocationPools': ['default'],
'ExternalNetCidr': ['default'],
'ExternalAllocationPools': ['default'],
'StorageNetCidr': ['default'],
'StorageAllocationPools': ['default'],
'StorageMgmtNetCidr': ['default'],
'StorageMgmtAllocationPools': ['default'],
'TenantNetCidr': ['default'],
'TenantAllocationPools': ['default'],
'InternalApiNetCidr': ['default'],
'InternalApiAllocationPools': ['default'],
'UpdateIdentifier': ['description'],
'key_name': ['default'],
'CeilometerAgentCentralLoggingSource': ['default'],
'CeilometerAgentIpmiLoggingSource': ['default'],
'CeilometerAgentNotificationLoggingSource': ['default'],
'CinderApiLoggingSource': ['default'],
'CinderSchedulerLoggingSource': ['default'],
'CinderVolumeLoggingSource': ['default'],
'DesignateApiLoggingSource': ['default'],
'DesignateCentralLoggingSource': ['default'],
'DesignateMiniDNSLoggingSource': ['default'],
'DesignateProducerLoggingSource': ['default'],
'DesignateSinkLoggingSource': ['default'],
'DesignateWorkerLoggingSource': ['default'],
'Ec2ApiLoggingSource': ['default'],
'GlanceApiLoggingSource': ['default'],
'GnocchiApiLoggingSource': ['default'],
'HeatApiCfnLoggingSource': ['default'],
'HeatApiLoggingSource': ['default'],
'HeatEngineLoggingSource': ['default'],
'KeystoneLoggingSource': ['default'],
'KeystoneErrorLoggingSource': ['default'],
'KeystoneAdminAccessLoggingSource': ['default'],
'KeystoneAdminErrorLoggingSource': ['default'],
'KeystoneMainAcccessLoggingSource': ['default'],
'KeystoneMainErrorLoggingSource': ['default'],
'NeutronApiLoggingSource': ['default'],
'NeutronDhcpAgentLoggingSource': ['default'],
'NeutronL3AgentLoggingSource': ['default'],
'NeutronMetadataAgentLoggingSource': ['default'],
'NeutronOpenVswitchAgentLoggingSource': ['default'],
'NovaApiLoggingSource': ['default'],
'NovaComputeLoggingSource': ['default'],
'NovaConductorLoggingSource': ['default'],
'NovaMetadataLoggingSource': ['default'],
'NovaSchedulerLoggingSource': ['default'],
'NovaVncproxyLoggingSource': ['default'],
'OctaviaApiLoggingSource': ['default'],
'OctaviaHealthManagerLoggingSource': ['default'],
'OctaviaHousekeepingLoggingSource': ['default'],
'OctaviaWorkerLoggingSource': ['default'],
'OvnMetadataAgentLoggingSource': ['default'],
'PlacementLoggingSource': ['default'],
'SaharaApiLoggingSource': ['default'],
'SaharaEngineLoggingSource': ['default'],
# There's one template that defines this
# differently, and I'm not sure if we can
# safely change it.
'ControlPlaneDefaultRoute': ['default'],
# TODO(bnemec): Address these existing
# inconsistencies.
'ServiceNetMap': ['description', 'default'],
'network': ['default'],
'ControlPlaneIP': ['default',
'description'],
'ControlPlaneIp': ['default',
'description'],
'NeutronBigswitchLLDPEnabled': ['default'],
'NeutronWorkers': ['description'],
'ServerMetadata': ['description'],
'server': ['description'],
'servers': ['description'],
'ExtraConfig': ['description'],
'DefaultPasswords': ['description',
'default'],
'BondInterfaceOvsOptions': ['description',
'default',
'constraints'],
# NOTE(anil): This is a temporary change and
# will be removed once bug #1767070 properly
# fixed. OVN supports only VLAN, geneve
# and flat for NeutronNetworkType. But VLAN
# tenant networks have a limited support
# in OVN. Till that is fixed, we restrict
# NeutronNetworkType to 'geneve'.
'NeutronNetworkType': ['description',
'default',
'constraints'],
'KeyName': ['constraints'],
'OVNSouthboundServerPort': ['description'],
'ExternalInterfaceDefaultRoute':
['description', 'default'],
'ManagementInterfaceDefaultRoute':
['description', 'default'],
'IPPool': ['description'],
'SSLCertificate': ['description',
'default',
'hidden'],
'NodeIndex': ['description'],
'name': ['description', 'default'],
'image': ['description', 'default'],
'NeutronBigswitchAgentEnabled': ['default'],
'EndpointMap': ['description', 'default'],
'ContainerManilaConfigImage': ['description',
'default'],
'replacement_policy': ['default'],
'CloudDomain': ['description', 'default'],
'EnableLoadBalancer': ['description'],
'ControllerExtraConfig': ['description'],
'NovaComputeExtraConfig': ['description'],
'controllerExtraConfig': ['description'],
'ContainerSwiftConfigImage': ['default'],
'input_values': ['default'],
'fixed_ips': ['default', 'type']
}
PARAMETER_DEFINITION_EXCLUSIONS = {
'CephPools': ['description', 'type', 'default'],
'ManagementNetCidr': ['default'],
'ManagementAllocationPools': ['default'],
'ExternalNetCidr': ['default'],
'ExternalAllocationPools': ['default'],
'StorageNetCidr': ['default'],
'StorageAllocationPools': ['default'],
'StorageMgmtNetCidr': ['default'],
'StorageMgmtAllocationPools': ['default'],
'TenantNetCidr': ['default'],
'TenantAllocationPools': ['default'],
'InternalApiNetCidr': ['default'],
'InternalApiAllocationPools': ['default'],
'UpdateIdentifier': ['description'],
'key_name': ['default'],
'CeilometerAgentCentralLoggingSource': ['default'],
'CeilometerAgentIpmiLoggingSource': ['default'],
'CeilometerAgentNotificationLoggingSource': ['default'],
'CinderApiLoggingSource': ['default'],
'CinderSchedulerLoggingSource': ['default'],
'CinderVolumeLoggingSource': ['default'],
'DesignateApiLoggingSource': ['default'],
'DesignateCentralLoggingSource': ['default'],
'DesignateMiniDNSLoggingSource': ['default'],
'DesignateProducerLoggingSource': ['default'],
'DesignateSinkLoggingSource': ['default'],
'DesignateWorkerLoggingSource': ['default'],
'Ec2ApiLoggingSource': ['default'],
'GlanceApiLoggingSource': ['default'],
'GnocchiApiLoggingSource': ['default'],
'HeatApiCfnLoggingSource': ['default'],
'HeatApiLoggingSource': ['default'],
'HeatEngineLoggingSource': ['default'],
'KeystoneLoggingSource': ['default'],
'KeystoneErrorLoggingSource': ['default'],
'KeystoneAdminAccessLoggingSource': ['default'],
'KeystoneAdminErrorLoggingSource': ['default'],
'KeystoneMainAcccessLoggingSource': ['default'],
'KeystoneMainErrorLoggingSource': ['default'],
'NeutronApiLoggingSource': ['default'],
'NeutronDhcpAgentLoggingSource': ['default'],
'NeutronL3AgentLoggingSource': ['default'],
'NeutronMetadataAgentLoggingSource': ['default'],
'NeutronOpenVswitchAgentLoggingSource': ['default'],
'NovaApiLoggingSource': ['default'],
'NovaComputeLoggingSource': ['default'],
'NovaConductorLoggingSource': ['default'],
'NovaMetadataLoggingSource': ['default'],
'NovaSchedulerLoggingSource': ['default'],
'NovaVncproxyLoggingSource': ['default'],
'OctaviaApiLoggingSource': ['default'],
'OctaviaHealthManagerLoggingSource': ['default'],
'OctaviaHousekeepingLoggingSource': ['default'],
'OctaviaWorkerLoggingSource': ['default'],
'OvnMetadataAgentLoggingSource': ['default'],
'PlacementLoggingSource': ['default'],
'SaharaApiLoggingSource': ['default'],
'SaharaEngineLoggingSource': ['default'],
# There's one template that defines this
# differently, and I'm not sure if we can
# safely change it.
'ControlPlaneDefaultRoute': ['default'],
# TODO(bnemec): Address these existing inconsistencies.
'ServiceNetMap': ['description', 'default'],
'network': ['default'],
'ControlPlaneIP': ['default',
'description'],
'ControlPlaneIp': ['default',
'description'],
'NeutronBigswitchLLDPEnabled': ['default'],
'NeutronWorkers': ['description'],
'ServerMetadata': ['description'],
'server': ['description'],
'servers': ['description'],
'ExtraConfig': ['description'],
'DefaultPasswords': ['description',
'default'],
'BondInterfaceOvsOptions': ['description',
'default',
'constraints'],
# NOTE(anil): This is a temporary change and
# will be removed once bug #1767070 properly
# fixed. OVN supports only VLAN, geneve
# and flat for NeutronNetworkType. But VLAN
# tenant networks have a limited support
# in OVN. Till that is fixed, we restrict
# NeutronNetworkType to 'geneve'.
'NeutronNetworkType': ['description', 'default', 'constraints'],
'KeyName': ['constraints'],
'OVNSouthboundServerPort': ['description'],
'ExternalInterfaceDefaultRoute': ['description', 'default'],
'ManagementInterfaceDefaultRoute': ['description', 'default'],
'IPPool': ['description'],
'SSLCertificate': ['description', 'default', 'hidden'],
'NodeIndex': ['description'],
'name': ['description', 'default'],
'image': ['description', 'default'],
'NeutronBigswitchAgentEnabled': ['default'],
'EndpointMap': ['description', 'default'],
'ContainerManilaConfigImage': ['description', 'default'],
'replacement_policy': ['default'],
'CloudDomain': ['description', 'default'],
'EnableLoadBalancer': ['description'],
'ControllerExtraConfig': ['description'],
'NovaComputeExtraConfig': ['description'],
'controllerExtraConfig': ['description'],
'ContainerSwiftConfigImage': ['default'],
'input_values': ['default'],
'fixed_ips': ['default', 'type']
}
PREFERRED_CAMEL_CASE = {
'ec2api': 'Ec2Api',
@ -270,6 +262,7 @@ HEAT_OUTPUTS_EXCLUSIONS = [
'./extraconfig/pre_network/host_config_and_reboot.yaml'
]
def exit_usage():
print('Usage %s <yaml file or directory>' % sys.argv[0])
sys.exit(1)
@ -308,9 +301,9 @@ def validate_endpoint_map(base_map, env_map):
def validate_role_name(filename):
role_data = yaml.load(open(filename).read())[0]
if role_data['name'] != os.path.basename(filename).split('.')[0]:
print('ERROR: role name should match file name for role : %s.'
% filename)
return 1
print('ERROR: role name should match file name for role : %s.'
% filename)
return 1
return 0
@ -363,9 +356,9 @@ def validate_controller_dashboard(filename, tpl):
def validate_hci_role(hci_role_filename, hci_role_tpl):
role_files = ['HciCephAll', 'HciCephFile', 'HciCephMon', 'HciCephObject']
if hci_role_filename in ['./roles/'+ x +'.yaml' for x in role_files]:
compute_role_filename = os.path.join(os.path.dirname(hci_role_filename),
'./Compute.yaml')
if hci_role_filename in ['./roles/' + x + '.yaml' for x in role_files]:
compute_role_filename = \
os.path.join(os.path.dirname(hci_role_filename), './Compute.yaml')
compute_role_tpl = yaml.load(open(compute_role_filename).read())
compute_role_services = compute_role_tpl[0]['ServicesDefault']
for role in hci_role_tpl:
@ -397,11 +390,12 @@ def validate_hci_role(hci_role_filename, hci_role_tpl):
return 1
return 0
def validate_ceph_role(ceph_role_filename, ceph_role_tpl):
role_files = ['CephAll', 'CephFile', 'CephMon', 'CephObject']
if ceph_role_filename in ['./roles/'+ x +'.yaml' for x in role_files]:
ceph_storage_role_filename = os.path.join(os.path.dirname(ceph_role_filename),
'./CephStorage.yaml')
if ceph_role_filename in ['./roles/' + x + '.yaml' for x in role_files]:
ceph_storage_role_filename = \
os.path.join(os.path.dirname(ceph_role_filename), './CephStorage.yaml')
ceph_storage_role_tpl = yaml.load(open(ceph_storage_role_filename).read())
ceph_storage_role_services = ceph_storage_role_tpl[0]['ServicesDefault']
for role in ceph_role_tpl:
@ -427,6 +421,7 @@ def validate_ceph_role(ceph_role_filename, ceph_role_tpl):
return 1
return 0
def validate_controller_no_ceph_role(filename, tpl):
control_role_filename = os.path.join(os.path.dirname(filename),
'./Controller.yaml')
@ -448,6 +443,7 @@ def validate_controller_no_ceph_role(filename, tpl):
return 1
return 0
def validate_with_compute_role_services(role_filename, role_tpl, exclude_service=()):
cmpt_filename = os.path.join(os.path.dirname(role_filename),
'./Compute.yaml')
@ -484,6 +480,7 @@ def validate_with_compute_role_services(role_filename, role_tpl, exclude_service
return 0
def validate_multiarch_compute_roles(role_filename, role_tpl):
errors = 0
roles_dir = os.path.dirname(role_filename)
@ -572,7 +569,7 @@ def validate_docker_service_mysql_usage(filename, tpl):
def read_all(incfile, inctpl):
# search for included content
content = inctpl['outputs']['role_data']['value'].get('config_settings',{})
content = inctpl['outputs']['role_data']['value'].get('config_settings', {})
all_content.append(content)
included_res[:] = []
if search(content, match_included_res, no_op):
@ -582,7 +579,7 @@ def validate_docker_service_mysql_usage(filename, tpl):
# disregard class names, only consider file names
if 'OS::' in f:
continue
newfile = os.path.normpath(os.path.dirname(incfile)+'/'+f)
newfile = os.path.normpath(os.path.dirname(incfile) + '/' + f)
newtmp = yaml.load(open(newfile).read())
read_all(newfile, newtmp)
@ -667,18 +664,19 @@ def validate_docker_service(filename, tpl):
if key in OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS:
continue
else:
print('ERROR: %s should not be in puppet_config section.'
% key)
return 1
print('ERROR: %s should not be in puppet_config section.'
% key)
return 1
for key in REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS:
if key not in puppet_config:
print('ERROR: %s is required in puppet_config for %s.'
% (key, filename))
return 1
if key not in puppet_config:
print('ERROR: %s is required in puppet_config for %s.'
% (key, filename))
return 1
config_volume = puppet_config.get('config_volume')
expected_config_image_parameter = "Container%sConfigImage" % to_camel_case(config_volume)
if config_volume and not expected_config_image_parameter in tpl.get('parameters', []):
expected_config_image_parameter = \
"Container%sConfigImage" % to_camel_case(config_volume)
if config_volume and expected_config_image_parameter not in tpl.get('parameters', []):
print('ERROR: Missing %s heat parameter for %s config_volume.'
% (expected_config_image_parameter, config_volume))
return 1
@ -699,8 +697,9 @@ def validate_docker_service(filename, tpl):
command = ' '.join(map(str, command))
if 'bootstrap_host_exec' in command \
and container.get('user') != 'root':
print('ERROR: bootstrap_host_exec needs to run as the root user.')
return 1
print('ERROR: bootstrap_host_exec needs to run '
'as the root user.')
return 1
if 'upgrade_tasks' in role_data and role_data['upgrade_tasks']:
if (validate_upgrade_tasks(role_data['upgrade_tasks']) or
@ -793,7 +792,7 @@ def validate_service(filename, tpl):
def _rsearch_keys(d, pattern, search_keynames=False, enter_lists=False):
""" Deep regex search through a dict for k or v matching a pattern
"""Deep regex search through a dict for k or v matching a pattern
Returns a list of the matched parent keys. Nested keypaths are
represented as lists. Looks for either values (default) or keys mathching
@ -871,8 +870,9 @@ def _rsearch_keys(d, pattern, search_keynames=False, enter_lists=False):
result = []
return _rsearch_keys_nested(d, pattern, search_keynames, enter_lists)
def _get(d, path):
""" Get a value (or None) from a dict by path given as a list
"""Get a value (or None) from a dict by path given as a list
Integer values represent indexes in lists, string values are for dict keys
"""
@ -885,8 +885,9 @@ def _get(d, path):
return None
return d
def validate_service_hiera_interpol(f, tpl):
""" Validate service templates for hiera interpolation rules
"""Validate service templates for hiera interpolation rules
Find all {get_param: [ServiceNetMap, ...]} missing hiera
interpolation of IP addresses or network ranges vs
@ -979,6 +980,7 @@ def validate_service_hiera_interpol(f, tpl):
else:
return 0
def validate_upgrade_tasks_duplicate_whens(filename):
"""Take a heat template and starting at the upgrade_tasks
try to detect duplicate 'when:' statements
@ -1188,6 +1190,7 @@ def validate(filename, param_map):
return retval
def validate_upgrade_tasks(upgrade_tasks):
# some templates define its upgrade_tasks via list_concat
if isinstance(upgrade_tasks, dict):
@ -1200,15 +1203,20 @@ def validate_upgrade_tasks(upgrade_tasks):
task_name = task.get("name", "")
whenline = task.get("when", "")
if (type(whenline) == list):
if any('step|int ' in condition for condition in whenline) and ('step|int == ' not in whenline[0]):
print('ERROR: \'step|int ==\' condition should be evaluated first in when conditions for task (%s)' % (task))
return 1
if any('step|int ' in condition for condition in whenline) \
and ('step|int == ' not in whenline[0]):
print('ERROR: \'step|int ==\' condition should be evaluated '
'first in when conditions for task (%s)' % (task))
return 1
else:
if (' and ' in whenline) and (' or ' not in whenline) \
and args.quiet < 2:
print("Warning: Consider specifying \'and\' conditions as a list to improve readability in task: \"%s\"" % (task_name))
print("Warning: Consider specifying \'and\' conditions as "
"a list to improve readability in task: \"%s\""
% (task_name))
return 0
def validate_network_data_file(data_file_path):
try:
data_file = yaml.load(open(data_file_path).read())
@ -1227,6 +1235,7 @@ def validate_network_data_file(data_file_path):
return 1
return 0
def validate_nic_config_file(filename, tpl):
try:
if isinstance(tpl.get('resources', {}), dict):
@ -1243,6 +1252,7 @@ def validate_nic_config_file(filename, tpl):
return 1
return 0
def parse_args():
p = argparse.ArgumentParser()
@ -1256,6 +1266,7 @@ def parse_args():
return p.parse_args()
args = parse_args()
path_args = args.path_args
quiet = args.quiet
@ -1273,9 +1284,9 @@ for base_path in path_args:
for f in files:
file_path = os.path.join(subdir, f)
if 'environments/services-docker' in file_path:
print("ERROR: environments/services-docker should not be used "
"any more, use environments/services instead: %s " %
file_path)
print("ERROR: environments/services-docker should not be "
"used any more, use environments/services instead: "
"%s " % file_path)
failed_files.append(file_path)
exit_val |= 1

29
tox.ini
View File

@ -26,12 +26,35 @@ commands =
python ./tools/yaml-validate.py .
bash -c ./tools/roles-data-validation.sh
bash -c ./tools/check-up-to-date.sh
flake8 ./container_config_scripts/
flake8 --exclude releasenotes --ignore E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405,W503,W504,E501,E731,W605
[testenv:flake8]
basepython = python3
commands =
flake8 ./container_config_scripts/
# E125 is deliberately excluded. See
# https://github.com/jcrocholl/pep8/issues/126. It's just wrong.
#
# Most of the whitespace related rules (E12* and E131) are excluded
# because while they are often useful guidelines, strict adherence to
# them ends up causing some really odd code formatting and forced
# extra line breaks. Updating code to enforce these will be a hard sell.
#
# H405 is another one that is good as a guideline, but sometimes
# multiline doc strings just don't have a natural summary
# line. Rejecting code for this reason is wrong.
#
# E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301
#
# The following two are also ignored that we don't think it is useful.
# W503 line break before binary operator
# W504 line break after binary operator
#
# The following rules are currently ignored, but will be enforced
# in the future
# E501 line too long
# E731 do not assign a lambda expression, use a def
# W605 invalid escape sequence
ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405,W503,W504,E501,E731,W605
exclude = releasenotes
[testenv:templates]
basepython = python3

View File

@ -762,4 +762,5 @@ resource_registry:
self.nested_output)
self.assertEqual(expected, f.read())
GeneratorTestCase.generate_scenarios()

View File

@ -12,15 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import io
import tempfile
import mock
from oslotest import base
import six
import testscenarios
import yaml
import yaql
class YAQLTestCase(base.BaseTestCase):
@ -32,4 +25,3 @@ class YAQLTestCase(base.BaseTestCase):
for i in path.split('.'):
data = data[i]
return data['yaql']['expression']