Enforce pep8/pyflakes rule on python codes

This change makes sure that we apply pyflake8 checks on all python
codes to improve its readability.

Note that there are some rules applied for other OpenStack projects,
but not yet turned on, which should be enabled in the future.

Change-Id: Iaf0299983d3a3fe48e3beb8f47bd33c21deb4972
This commit is contained in:
Takashi Kajinami 2019-08-29 20:49:40 +09:00
parent 9b88629d63
commit f47dfe1059
17 changed files with 412 additions and 321 deletions

View File

@ -1,32 +1,32 @@
#!/usr/bin/env python #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # http://www.apache.org/licenses/LICENSE-2.0
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# Unless required by applicable law or agreed to in writing, software # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # License for the specific language governing permissions and limitations
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # under the License.
# License for the specific language governing permissions and limitations
# under the License.
# Shell script tool to run puppet inside of the given container image. # Shell script tool to run puppet inside of the given container image.
# Uses the config file at /var/lib/container-puppet/container-puppet.json as a source for a JSON # Uses the config file at /var/lib/container-puppet/container-puppet.json
# array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings # as a source for a JSON array of
# [config_volume, puppet_tags, manifest, config_image, [volumes]] settings
# that can be used to generate config files or run ad-hoc puppet modules # that can be used to generate config files or run ad-hoc puppet modules
# inside of a container. # inside of a container.
import glob import glob
import json import json
import logging import logging
import multiprocessing
import os import os
import subprocess import subprocess
import sys import sys
import tempfile import tempfile
import time import time
import multiprocessing
from paunch import runner as containers_runner from paunch import runner as containers_runner
@ -45,7 +45,7 @@ def get_logger():
if logger is None: if logger is None:
logger = logging.getLogger() logger = logging.getLogger()
ch = logging.StreamHandler(sys.stdout) ch = logging.StreamHandler(sys.stdout)
if os.environ.get('DEBUG') in ['True', 'true'] : if os.environ.get('DEBUG') in ['True', 'true']:
logger.setLevel(logging.DEBUG) logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG)
else: else:
@ -93,6 +93,7 @@ if (os.environ.get('MOUNT_HOST_PUPPET', 'true') == 'true' and
PUPPETS not in cli_dcmd): PUPPETS not in cli_dcmd):
cli_dcmd.extend(['--volume', PUPPETS]) cli_dcmd.extend(['--volume', PUPPETS])
# this is to match what we do in deployed-server # this is to match what we do in deployed-server
def short_hostname(): def short_hostname():
subproc = subprocess.Popen(['hostname', '-s'], subproc = subprocess.Popen(['hostname', '-s'],
@ -188,6 +189,7 @@ def rm_container(name):
'No such container: {}\n'.format(name): 'No such container: {}\n'.format(name):
log.debug(cmd_stderr) log.debug(cmd_stderr)
process_count = int(os.environ.get('PROCESS_COUNT', process_count = int(os.environ.get('PROCESS_COUNT',
multiprocessing.cpu_count())) multiprocessing.cpu_count()))
config_file = os.environ.get('CONFIG', '/var/lib/container-puppet/container-puppet.json') config_file = os.environ.get('CONFIG', '/var/lib/container-puppet/container-puppet.json')
@ -403,12 +405,11 @@ if not os.path.exists(sh_script):
""") """)
def mp_puppet_config(*args): def mp_puppet_config(*args):
(config_volume,puppet_tags,manifest,config_image,volumes,privileged,check_mode) = args[0] (config_volume, puppet_tags, manifest, config_image, volumes, privileged, check_mode) = args[0]
log = get_logger() log = get_logger()
log.info('Starting configuration of %s using image %s' % (config_volume, log.info('Starting configuration of %s using image %s' %
config_image)) (config_volume, config_image))
log.debug('config_volume %s' % config_volume) log.debug('config_volume %s' % config_volume)
log.debug('puppet_tags %s' % puppet_tags) log.debug('puppet_tags %s' % puppet_tags)
log.debug('manifest %s' % manifest) log.debug('manifest %s' % manifest)
@ -466,7 +467,6 @@ def mp_puppet_config(*args):
for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()): for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()):
env[k] = os.environ.get(k) env[k] = os.environ.get(k)
common_dcmd += cli_dcmd common_dcmd += cli_dcmd
if check_mode: if check_mode:
@ -483,10 +483,10 @@ def mp_puppet_config(*args):
if os.environ.get('NET_HOST', 'false') == 'true': if os.environ.get('NET_HOST', 'false') == 'true':
log.debug('NET_HOST enabled') log.debug('NET_HOST enabled')
common_dcmd.extend(['--net', 'host', '--volume', common_dcmd.extend(['--net', 'host', '--volume',
'/etc/hosts:/etc/hosts:ro']) '/etc/hosts:/etc/hosts:ro'])
else: else:
log.debug('running without containers Networking') log.debug('running without containers Networking')
dcmd.extend(['--net', 'none']) common_dcmd.extend(['--net', 'none'])
# script injection as the last mount to make sure it's accessible # script injection as the last mount to make sure it's accessible
# https://github.com/containers/libpod/issues/1844 # https://github.com/containers/libpod/issues/1844
@ -512,7 +512,7 @@ def mp_puppet_config(*args):
retval = subproc.returncode retval = subproc.returncode
# puppet with --detailed-exitcodes will return 0 for success and no changes # puppet with --detailed-exitcodes will return 0 for success and no changes
# and 2 for success and resource changes. Other numbers are failures # and 2 for success and resource changes. Other numbers are failures
if retval in [0,2]: if retval in [0, 2]:
if cmd_stdout: if cmd_stdout:
log.debug('%s run succeeded: %s' % (cmd, cmd_stdout)) log.debug('%s run succeeded: %s' % (cmd, cmd_stdout))
if cmd_stderr: if cmd_stderr:
@ -534,6 +534,7 @@ def mp_puppet_config(*args):
log.info('Finished processing puppet configs for %s' % (config_volume)) log.info('Finished processing puppet configs for %s' % (config_volume))
return retval return retval
# Holds all the information for each process to consume. # Holds all the information for each process to consume.
# Instead of starting them all linearly we run them using a process # Instead of starting them all linearly we run them using a process
# pool. This creates a list of arguments for the above function # pool. This creates a list of arguments for the above function
@ -608,4 +609,3 @@ for infile in infiles:
if not success: if not success:
sys.exit(1) sys.exit(1)

View File

@ -74,7 +74,7 @@ class PathManager(object):
try: try:
os.chown(self.path, target_uid, target_gid) os.chown(self.path, target_uid, target_gid)
self._update() self._update()
except Exception as e: except Exception:
LOG.exception('Could not change ownership of %s: ', LOG.exception('Could not change ownership of %s: ',
self.path) self.path)
else: else:
@ -172,5 +172,6 @@ class NovaStatedirOwnershipManager(object):
LOG.info('Nova statedir ownership complete') LOG.info('Nova statedir ownership complete')
if __name__ == '__main__': if __name__ == '__main__':
NovaStatedirOwnershipManager('/var/lib/nova').run() NovaStatedirOwnershipManager('/var/lib/nova').run()

View File

@ -68,7 +68,7 @@ if __name__ == '__main__':
if os.path.isfile(nova_cfg): if os.path.isfile(nova_cfg):
try: try:
config.read(nova_cfg) config.read(nova_cfg)
except Exception as e: except Exception:
LOG.exception('Error while reading nova.conf:') LOG.exception('Error while reading nova.conf:')
else: else:
LOG.error('Nova configuration file %s does not exist', nova_cfg) LOG.error('Nova configuration file %s does not exist', nova_cfg)
@ -107,7 +107,7 @@ if __name__ == '__main__':
LOG.info('Nova-compute service registered') LOG.info('Nova-compute service registered')
sys.exit(0) sys.exit(0)
LOG.info('Waiting for nova-compute service to register') LOG.info('Waiting for nova-compute service to register')
except Exception as e: except Exception:
LOG.exception( LOG.exception(
'Error while waiting for nova-compute service to register') 'Error while waiting for nova-compute service to register')
time.sleep(timeout) time.sleep(timeout)

View File

@ -62,7 +62,7 @@ if __name__ == '__main__':
if os.path.isfile(nova_cfg): if os.path.isfile(nova_cfg):
try: try:
config.read(nova_cfg) config.read(nova_cfg)
except Exception as e: except Exception:
LOG.exception('Error while reading nova.conf:') LOG.exception('Error while reading nova.conf:')
else: else:
LOG.error('Nova configuration file %s does not exist', nova_cfg) LOG.error('Nova configuration file %s does not exist', nova_cfg)
@ -75,7 +75,7 @@ if __name__ == '__main__':
password=config.get('placement', 'password'), password=config.get('placement', 'password'),
project_name=config.get('placement', 'project_name'), project_name=config.get('placement', 'project_name'),
project_domain_name=config.get('placement', 'user_domain_name'), project_domain_name=config.get('placement', 'user_domain_name'),
auth_url=config.get('placement', 'auth_url')+'/v3') auth_url=config.get('placement', 'auth_url') + '/v3')
sess = session.Session(auth=auth, verify=False) sess = session.Session(auth=auth, verify=False)
keystone = client.Client(session=sess, interface='internal') keystone = client.Client(session=sess, interface='internal')
@ -97,7 +97,7 @@ if __name__ == '__main__':
LOG.error('Failed to get placement service endpoint!') LOG.error('Failed to get placement service endpoint!')
else: else:
break break
except Exception as e: except Exception:
LOG.exception('Retry - Failed to get placement service endpoint:') LOG.exception('Retry - Failed to get placement service endpoint:')
time.sleep(timeout) time.sleep(timeout)
@ -113,7 +113,7 @@ if __name__ == '__main__':
while iterations > 1: while iterations > 1:
iterations -= 1 iterations -= 1
try: try:
r = requests.get(placement_endpoint_url+'/', verify=False) r = requests.get(placement_endpoint_url + '/', verify=False)
if r.status_code == 200 and response_reg.match(r.text): if r.status_code == 200 and response_reg.match(r.text):
LOG.info('Placement service up! - %s', r.text) LOG.info('Placement service up! - %s', r.text)
sys.exit(0) sys.exit(0)
@ -123,7 +123,7 @@ if __name__ == '__main__':
LOG.info('Placement service not up - %s, %s', LOG.info('Placement service not up - %s, %s',
r.status_code, r.status_code,
r.text) r.text)
except Exception as e: except Exception:
LOG.exception('Error query the placement endpoint:') LOG.exception('Error query the placement endpoint:')
time.sleep(timeout) time.sleep(timeout)

View File

@ -192,8 +192,8 @@ class PathManagerCase(base.BaseTestCase):
with fake_testtree(testtree): with fake_testtree(testtree):
pathinfo = PathManager('/var/lib/nova/instances/foo/baz') pathinfo = PathManager('/var/lib/nova/instances/foo/baz')
self.assertTrue(pathinfo.has_owner(current_uid, current_gid)) self.assertTrue(pathinfo.has_owner(current_uid, current_gid))
pathinfo.chown(current_uid+1, current_gid) pathinfo.chown(current_uid + 1, current_gid)
assert_ids(testtree, pathinfo.path, current_uid+1, current_gid) assert_ids(testtree, pathinfo.path, current_uid + 1, current_gid)
def test_chgrp(self): def test_chgrp(self):
testtree = generate_testtree1(current_uid, current_gid) testtree = generate_testtree1(current_uid, current_gid)
@ -201,8 +201,8 @@ class PathManagerCase(base.BaseTestCase):
with fake_testtree(testtree): with fake_testtree(testtree):
pathinfo = PathManager('/var/lib/nova/instances/foo/baz') pathinfo = PathManager('/var/lib/nova/instances/foo/baz')
self.assertTrue(pathinfo.has_owner(current_uid, current_gid)) self.assertTrue(pathinfo.has_owner(current_uid, current_gid))
pathinfo.chown(current_uid, current_gid+1) pathinfo.chown(current_uid, current_gid + 1)
assert_ids(testtree, pathinfo.path, current_uid, current_gid+1) assert_ids(testtree, pathinfo.path, current_uid, current_gid + 1)
def test_chown_chgrp(self): def test_chown_chgrp(self):
testtree = generate_testtree1(current_uid, current_gid) testtree = generate_testtree1(current_uid, current_gid)
@ -210,8 +210,9 @@ class PathManagerCase(base.BaseTestCase):
with fake_testtree(testtree): with fake_testtree(testtree):
pathinfo = PathManager('/var/lib/nova/instances/foo/baz') pathinfo = PathManager('/var/lib/nova/instances/foo/baz')
self.assertTrue(pathinfo.has_owner(current_uid, current_gid)) self.assertTrue(pathinfo.has_owner(current_uid, current_gid))
pathinfo.chown(current_uid+1, current_gid+1) pathinfo.chown(current_uid + 1, current_gid + 1)
assert_ids(testtree, pathinfo.path, current_uid+1, current_gid+1) assert_ids(testtree, pathinfo.path,
current_uid + 1, current_gid + 1)
class NovaStatedirOwnershipManagerTestCase(base.BaseTestCase): class NovaStatedirOwnershipManagerTestCase(base.BaseTestCase):

View File

@ -1,9 +1,19 @@
#!/usr/bin/env python #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json import json
import netaddr import netaddr
import os
import openstack import openstack
import os
import subprocess import subprocess
CTLPLANE_NETWORK_NAME = 'ctlplane' CTLPLANE_NETWORK_NAME = 'ctlplane'
@ -61,12 +71,15 @@ def _ensure_neutron_network(sdk):
return network return network
def _get_nameservers_for_version(servers, ipversion): def _get_nameservers_for_version(servers, ipversion):
"""Get list of nameservers for an IP version""" """Get list of nameservers for an IP version"""
return [s for s in servers if netaddr.IPAddress(s).version == ipversion] return [s for s in servers if netaddr.IPAddress(s).version == ipversion]
def _neutron_subnet_create(sdk, network_id, cidr, gateway, host_routes, def _neutron_subnet_create(sdk, network_id, cidr, gateway, host_routes,
allocation_pools, name, segment_id, dns_nameservers): allocation_pools, name, segment_id,
dns_nameservers):
try: try:
if netaddr.IPNetwork(cidr).version == 6: if netaddr.IPNetwork(cidr).version == 6:
subnet = sdk.network.create_subnet( subnet = sdk.network.create_subnet(
@ -137,6 +150,7 @@ def _neutron_add_subnet_segment_association(sdk, subnet_id, segment_id):
print('ERROR: Associationg segment with subnet %s failed.' % subnet_id) print('ERROR: Associationg segment with subnet %s failed.' % subnet_id)
raise raise
def _neutron_segment_create(sdk, name, network_id, phynet): def _neutron_segment_create(sdk, name, network_id, phynet):
try: try:
segment = sdk.network.create_segment( segment = sdk.network.create_segment(
@ -145,7 +159,7 @@ def _neutron_segment_create(sdk, name, network_id, phynet):
physical_network=phynet, physical_network=phynet,
network_type='flat') network_type='flat')
print('INFO: Neutron Segment created %s' % segment) print('INFO: Neutron Segment created %s' % segment)
except Exception as ex: except Exception:
print('ERROR: Neutron Segment %s create failed.' % name) print('ERROR: Neutron Segment %s create failed.' % name)
raise raise
@ -173,7 +187,7 @@ def _ensure_neutron_router(sdk, name, subnet_id):
def _get_subnet(sdk, cidr, network_id): def _get_subnet(sdk, cidr, network_id):
try: try:
subnet = list(sdk.network.subnets(cidr=cidr, network_id=network_id)) subnet = list(sdk.network.subnets(cidr=cidr, network_id=network_id))
except Exception as ex: except Exception:
print('ERROR: Get subnet with cidr %s failed.' % cidr) print('ERROR: Get subnet with cidr %s failed.' % cidr)
raise raise
@ -206,12 +220,13 @@ def _local_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs):
"""Create's and updates the ctlplane subnet on the segment that is local to """Create's and updates the ctlplane subnet on the segment that is local to
the underclud. the underclud.
""" """
s = CONF['subnets'][CONF['local_subnet']] s = CONF['subnets'][CONF['local_subnet']]
name = CONF['local_subnet'] name = CONF['local_subnet']
subnet = _get_subnet(sdk, s['NetworkCidr'], ctlplane_id) subnet = _get_subnet(sdk, s['NetworkCidr'], ctlplane_id)
segment = _get_segment(sdk, CONF['physical_network'], ctlplane_id) segment = _get_segment(sdk, CONF['physical_network'], ctlplane_id)
if subnet: if subnet:
if CONF['enable_routed_networks'] and subnet.segment_id == None: if CONF['enable_routed_networks'] and subnet.segment_id is None:
# The subnet exists and does not have a segment association. Since # The subnet exists and does not have a segment association. Since
# routed networks is enabled in the configuration, we need to # routed networks is enabled in the configuration, we need to
# migrate the existing non-routed networks subnet to a routed # migrate the existing non-routed networks subnet to a routed
@ -239,10 +254,12 @@ def _local_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs):
return net_cidrs return net_cidrs
def _remote_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs): def _remote_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs):
"""Create's and updates the ctlplane subnet(s) on segments that is """Create's and updates the ctlplane subnet(s) on segments that is
not local to the undercloud. not local to the undercloud.
""" """
for name in CONF['subnets']: for name in CONF['subnets']:
s = CONF['subnets'][name] s = CONF['subnets'][name]
if name == CONF['local_subnet']: if name == CONF['local_subnet']:
@ -274,6 +291,7 @@ def _remote_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs):
return net_cidrs return net_cidrs
if 'true' not in _run_command(['hiera', 'neutron_api_enabled'], if 'true' not in _run_command(['hiera', 'neutron_api_enabled'],
name='hiera').lower(): name='hiera').lower():
print('WARNING: UndercloudCtlplaneNetworkDeployment : The Neutron API ' print('WARNING: UndercloudCtlplaneNetworkDeployment : The Neutron API '

View File

@ -1,13 +1,24 @@
#!/usr/bin/env python #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json import json
import os
import openstack import openstack
import os
import subprocess import subprocess
from keystoneauth1 import exceptions as ks_exceptions from keystoneauth1 import exceptions as ks_exceptions
from mistralclient.api import client as mistralclient
from mistralclient.api import base as mistralclient_exc from mistralclient.api import base as mistralclient_exc
from mistralclient.api import client as mistralclient
CONF = json.loads(os.environ['config']) CONF = json.loads(os.environ['config'])
@ -45,7 +56,7 @@ def _run_command(args, env=None, name=None):
def _configure_nova(sdk): def _configure_nova(sdk):
""" Disable nova quotas """ """Disable nova quotas"""
sdk.set_compute_quotas('admin', cores='-1', instances='-1', ram='-1') sdk.set_compute_quotas('admin', cores='-1', instances='-1', ram='-1')
# Configure flavors. # Configure flavors.
@ -74,7 +85,7 @@ def _configure_nova(sdk):
def _create_default_keypair(sdk): def _create_default_keypair(sdk):
""" Set up a default keypair. """ """Set up a default keypair."""
ssh_dir = os.path.join(CONF['home_dir'], '.ssh') ssh_dir = os.path.join(CONF['home_dir'], '.ssh')
public_key_file = os.path.join(ssh_dir, 'id_rsa.pub') public_key_file = os.path.join(ssh_dir, 'id_rsa.pub')
if (not [True for kp in sdk.compute.keypairs() if kp.name == 'default'] and if (not [True for kp in sdk.compute.keypairs() if kp.name == 'default'] and
@ -105,7 +116,7 @@ def _configure_workbooks_and_workflows(mistral):
def _store_passwords_in_mistral_env(mistral): def _store_passwords_in_mistral_env(mistral):
""" Store required passwords in a mistral environment """ """Store required passwords in a mistral environment"""
env_name = 'tripleo.undercloud-config' env_name = 'tripleo.undercloud-config'
config_data = { config_data = {
'undercloud_ceilometer_snmpd_password': 'undercloud_ceilometer_snmpd_password':
@ -153,7 +164,7 @@ def _create_default_plan(mistral):
nova_api_enabled = 'true' in _run_command( nova_api_enabled = 'true' in _run_command(
['hiera', 'nova_api_enabled']).lower() ['hiera', 'nova_api_enabled']).lower()
mistral_api_enabled = 'true' in _run_command( mistral_api_enabled = 'true' in _run_command(
['hiera','mistral_api_enabled']).lower() ['hiera', 'mistral_api_enabled']).lower()
tripleo_validations_enabled = 'true' in _run_command( tripleo_validations_enabled = 'true' in _run_command(
['hiera', 'tripleo_validations_enabled']).lower() ['hiera', 'tripleo_validations_enabled']).lower()

View File

@ -1,4 +1,15 @@
#!/usr/bin/env python #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" """
Generate the endpoint_map.yaml template from data in the endpoint_data.yaml Generate the endpoint_map.yaml template from data in the endpoint_data.yaml
@ -13,12 +24,6 @@ mismatch is detected.
""" """
from __future__ import print_function from __future__ import print_function
__all__ = ['load_endpoint_data', 'generate_endpoint_map_template',
'write_template', 'build_endpoint_map', 'check_up_to_date']
import collections import collections
import copy import copy
import itertools import itertools
@ -27,6 +32,9 @@ import sys
import yaml import yaml
__all__ = ['load_endpoint_data', 'generate_endpoint_map_template',
'write_template', 'build_endpoint_map', 'check_up_to_date']
(IN_FILE, OUT_FILE) = ('endpoint_data.yaml', 'endpoint_map.yaml') (IN_FILE, OUT_FILE) = ('endpoint_data.yaml', 'endpoint_map.yaml')
SUBST = (SUBST_IP_ADDRESS, SUBST_CLOUDNAME) = ('IP_ADDRESS', 'CLOUDNAME') SUBST = (SUBST_IP_ADDRESS, SUBST_CLOUDNAME) = ('IP_ADDRESS', 'CLOUDNAME')
@ -88,7 +96,8 @@ def make_parameter(ptype, default, description=None):
def template_parameters(config): def template_parameters(config):
params = collections.OrderedDict() params = collections.OrderedDict()
params[PARAM_NETIPMAP] = make_parameter('json', {}, 'The Net IP map') params[PARAM_NETIPMAP] = make_parameter('json', {}, 'The Net IP map')
params[PARAM_SERVICENETMAP] = make_parameter('json', {}, 'The Service Net map') params[PARAM_SERVICENETMAP] = make_parameter('json', {},
'The Service Net map')
params[PARAM_ENDPOINTMAP] = make_parameter('json', params[PARAM_ENDPOINTMAP] = make_parameter('json',
endpoint_map_default(config), endpoint_map_default(config),
'Mapping of service endpoint ' 'Mapping of service endpoint '

View File

@ -1,15 +1,15 @@
#!/usr/bin/env python #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
# a copy of the License at # a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import argparse import argparse
import collections import collections
@ -17,6 +17,7 @@ import datetime
import os import os
import re import re
import shutil import shutil
import six
import subprocess import subprocess
import sys import sys
import yaml import yaml
@ -62,7 +63,7 @@ def parse_opts(argv):
# FIXME: This duplicates code from tools/yaml-nic-config-2-script.py, we should # FIXME: This duplicates code from tools/yaml-nic-config-2-script.py, we should
# refactor to share the common code # refactor to share the common code
def to_commented_yaml(filename): def to_commented_yaml(filename):
""" Convert comments into 'comments<num>: ...' YAML """ """Convert comments into 'comments<num>: ...' YAML"""
out_str = '' out_str = ''
last_non_comment_spaces = '' last_non_comment_spaces = ''
@ -108,7 +109,7 @@ def to_commented_yaml(filename):
# FIXME: This duplicates code from tools/yaml-nic-config-2-script.py, we should # FIXME: This duplicates code from tools/yaml-nic-config-2-script.py, we should
# refactor to share the common code # refactor to share the common code
def to_normal_yaml(filename): def to_normal_yaml(filename):
""" Convert back to normal #commented YAML""" """Convert back to normal #commented YAML"""
with open(filename, 'r') as f: with open(filename, 'r') as f:
data = f.read() data = f.read()
@ -168,14 +169,10 @@ class TemplateLoader(yaml.SafeLoader):
return collections.OrderedDict(self.construct_pairs(node)) return collections.OrderedDict(self.construct_pairs(node))
if sys.version_info.major >= 3: TemplateDumper.add_representer(six.text_type,
TemplateDumper.add_representer(str, TemplateDumper.description_presenter) TemplateDumper.description_presenter)
TemplateDumper.add_representer(bytes, TemplateDumper.add_representer(six.binary_type,
TemplateDumper.description_presenter) TemplateDumper.description_presenter)
else:
TemplateDumper.add_representer(str, TemplateDumper.description_presenter)
TemplateDumper.add_representer(unicode,
TemplateDumper.description_presenter)
TemplateDumper.add_representer(collections.OrderedDict, TemplateDumper.add_representer(collections.OrderedDict,
TemplateDumper.represent_ordered_dict) TemplateDumper.represent_ordered_dict)
@ -215,9 +212,10 @@ def process_templates_and_get_reference_parameters():
for x in roles_data for x in roles_data
if x['name'] == OPTS.role_name)) if x['name'] == OPTS.role_name))
except StopIteration: except StopIteration:
raise RuntimeError('The role: {role_name} is not defined in roles ' raise RuntimeError(
'data file: {roles_data_file}'.format( 'The role: {role_name} is not defined in roles '
role_name=OPTS.role_name, roles_data_file=OPTS.roles_data)) 'data file: {roles_data_file}'.format(
role_name=OPTS.role_name, roles_data_file=OPTS.roles_data))
refernce_file = '/'.join([temp_dir, 'network/config', NIC_CONFIG_REFERENCE, refernce_file = '/'.join([temp_dir, 'network/config', NIC_CONFIG_REFERENCE,
nic_config_name]) nic_config_name])

View File

@ -1,15 +1,15 @@
#!/usr/bin/env python #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
# a copy of the License at # a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import argparse import argparse
import jinja2 import jinja2
@ -38,6 +38,7 @@ def _shutil_copy_if_not_same(src, dst):
else: else:
raise raise
def parse_opts(argv): def parse_opts(argv):
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Configure host network interfaces using a JSON' description='Configure host network interfaces using a JSON'
@ -49,7 +50,8 @@ def parse_opts(argv):
help="""relative path to the roles_data.yaml file.""", help="""relative path to the roles_data.yaml file.""",
default='roles_data.yaml') default='roles_data.yaml')
parser.add_argument('-n', '--network-data', metavar='NETWORK_DATA', parser.add_argument('-n', '--network-data', metavar='NETWORK_DATA',
help="""relative path to the network_data.yaml file.""", help=("""relative path to the network_data.yaml """
"""file."""),
default='network_data.yaml') default='network_data.yaml')
parser.add_argument('--safe', parser.add_argument('--safe',
action='store_true', action='store_true',
@ -86,7 +88,8 @@ def _j2_render_to_file(j2_template, j2_data, outfile_name=None,
# Search for templates relative to the current template path first # Search for templates relative to the current template path first
template_base = os.path.dirname(yaml_f) template_base = os.path.dirname(yaml_f)
j2_loader = jinja2.loaders.FileSystemLoader([template_base, __tht_root_dir]) j2_loader = \
jinja2.loaders.FileSystemLoader([template_base, __tht_root_dir])
try: try:
# Render the j2 template # Render the j2 template
@ -102,6 +105,7 @@ def _j2_render_to_file(j2_template, j2_data, outfile_name=None,
with open(outfile_name, 'w') as out_f: with open(outfile_name, 'w') as out_f:
out_f.write(r_template) out_f.write(r_template)
def process_templates(template_path, role_data_path, output_dir, def process_templates(template_path, role_data_path, output_dir,
network_data_path, overwrite, dry_run): network_data_path, overwrite, dry_run):
@ -163,9 +167,9 @@ def process_templates(template_path, role_data_path, output_dir,
out_dir = subdir out_dir = subdir
if output_dir: if output_dir:
if template_path != '.': if template_path != '.':
# strip out base path if not default # strip out base path if not default
temp = out_dir.split(template_path)[1] temp = out_dir.split(template_path)[1]
out_dir = temp[1:] if temp.startswith('/') else temp out_dir = temp[1:] if temp.startswith('/') else temp
out_dir = os.path.join(output_dir, out_dir) out_dir = os.path.join(output_dir, out_dir)
if not os.path.exists(out_dir): if not os.path.exists(out_dir):
os.mkdir(out_dir) os.mkdir(out_dir)
@ -255,7 +259,8 @@ def process_templates(template_path, role_data_path, output_dir,
template_data = j2_template.read() template_data = j2_template.read()
j2_data = {'roles': role_data, j2_data = {'roles': role_data,
'networks': network_data} 'networks': network_data}
out_f = os.path.basename(f).replace('.j2.yaml', '.yaml') out_f = os.path.basename(f).replace('.j2.yaml',
'.yaml')
out_f_path = os.path.join(out_dir, out_f) out_f_path = os.path.join(out_dir, out_f)
_j2_render_to_file(template_data, j2_data, out_f_path, _j2_render_to_file(template_data, j2_data, out_f_path,
overwrite, dry_run) overwrite, dry_run)
@ -265,6 +270,7 @@ def process_templates(template_path, role_data_path, output_dir,
else: else:
print('Unexpected argument %s' % template_path) print('Unexpected argument %s' % template_path)
def clean_templates(base_path, role_data_path, network_data_path): def clean_templates(base_path, role_data_path, network_data_path):
def delete(f): def delete(f):

View File

@ -42,6 +42,7 @@ def parse_opts(argv):
return opts return opts
opts = parse_opts(sys.argv) opts = parse_opts(sys.argv)
roles = collections.OrderedDict.fromkeys(opts.roles) roles = collections.OrderedDict.fromkeys(opts.roles)

View File

@ -1,15 +1,15 @@
#!/usr/bin/env python #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
# a copy of the License at # a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import sys import sys
import yaml import yaml

View File

@ -1,32 +1,32 @@
#!/usr/bin/env python #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
# a copy of the License at # a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import argparse import argparse
import collections import collections
import copy
import datetime import datetime
import os import os
import re
import shutil import shutil
import six
import sys import sys
import traceback import traceback
import yaml import yaml
import six
import re
def parse_opts(argv): def parse_opts(argv):
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Convert an old style NIC config file into the new format using ' description='Convert an old style NIC config file into the new format '
'run-os-net-config.sh') 'using run-os-net-config.sh')
parser.add_argument('--script-dir', metavar='<script directory>', parser.add_argument('--script-dir', metavar='<script directory>',
help="Relative path to run-os-net-config.sh", help="Relative path to run-os-net-config.sh",
default="network/scripts/run-os-net-config.sh") default="network/scripts/run-os-net-config.sh")
@ -42,8 +42,9 @@ def parse_opts(argv):
return opts return opts
#convert comments into 'comments<num>: ...' YAML
def to_commented_yaml(filename): def to_commented_yaml(filename):
"""Convert comments into 'comments<num>: ...' YAML"""
out_str = '' out_str = ''
last_non_comment_spaces = '' last_non_comment_spaces = ''
with open(filename, 'r') as f: with open(filename, 'r') as f:
@ -51,38 +52,43 @@ def to_commented_yaml(filename):
for line in f: for line in f:
# skip blank line # skip blank line
if line.isspace(): if line.isspace():
continue; continue
char_count = 0 char_count = 0
spaces = '' spaces = ''
for char in line: for char in line:
char_count += 1 char_count += 1
if char == ' ': if char == ' ':
spaces+=' ' spaces += ' '
next; next
elif char == '#': elif char == '#':
last_non_comment_spaces = spaces last_non_comment_spaces = spaces
comment_count += 1 comment_count += 1
comment = line[char_count:-1] comment = line[char_count:-1]
out_str += "%scomment%i_%i: '%s'\n" % (last_non_comment_spaces, comment_count, len(spaces), comment) out_str += "%scomment%i_%i: '%s'\n" % \
break; (last_non_comment_spaces, comment_count, len(spaces),
comment)
break
else: else:
last_non_comment_spaces = spaces last_non_comment_spaces = spaces
out_str += line out_str += line
#inline comments check # inline comments check
m = re.match(".*:.*#(.*)", line) m = re.match(".*:.*#(.*)", line)
if m: if m:
comment_count += 1 comment_count += 1
out_str += "%s inline_comment%i: '%s'\n" % (last_non_comment_spaces, comment_count, m.group(1)) out_str += "%s inline_comment%i: '%s'\n" % \
break; (last_non_comment_spaces, comment_count,
m.group(1))
break
with open(filename, 'w') as f: with open(filename, 'w') as f:
f.write(out_str) f.write(out_str)
return out_str return out_str
#convert back to normal #commented YAML
def to_normal_yaml(filename): def to_normal_yaml(filename):
"""Convert back to normal #commented YAML"""
with open(filename, 'r') as f: with open(filename, 'r') as f:
data = f.read() data = f.read()
@ -92,8 +98,12 @@ def to_normal_yaml(filename):
for line in data.split('\n'): for line in data.split('\n'):
# get_input not supported by run-os-net-config.sh script # get_input not supported by run-os-net-config.sh script
line = line.replace('get_input: ', '') line = line.replace('get_input: ', '')
m = re.match(" +comment[0-9]+_([0-9]+): '(.*)'.*", line) #normal comments
i = re.match(" +inline_comment[0-9]+: '(.*)'.*", line) #inline comments # normal comments
m = re.match(" +comment[0-9]+_([0-9]+): '(.*)'.*", line)
# inline comments
i = re.match(" +inline_comment[0-9]+: '(.*)'.*", line)
if m: if m:
if next_line_break: if next_line_break:
out_str += '\n' out_str += '\n'
@ -122,9 +132,11 @@ def to_normal_yaml(filename):
class description(six.text_type): class description(six.text_type):
pass pass
# FIXME: Some of this duplicates code from build_endpoint_map.py, we should # FIXME: Some of this duplicates code from build_endpoint_map.py, we should
# refactor to share the common code # refactor to share the common code
class TemplateDumper(yaml.SafeDumper): class TemplateDumper(yaml.SafeDumper):
def represent_ordered_dict(self, data): def represent_ordered_dict(self, data):
return self.represent_dict(data.items()) return self.represent_dict(data.items())
@ -154,9 +166,12 @@ TemplateDumper.add_representer(collections.OrderedDict,
TemplateLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, TemplateLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
TemplateLoader.construct_mapping) TemplateLoader.construct_mapping)
def write_template(template, filename=None): def write_template(template, filename=None):
with open(filename, 'w') as f: with open(filename, 'w') as f:
yaml.dump(template, f, TemplateDumper, width=120, default_flow_style=False) yaml.dump(template, f, TemplateDumper, width=120,
default_flow_style=False)
def convert(filename, script_path): def convert(filename, script_path):
print('Converting %s' % filename) print('Converting %s' % filename)
@ -170,7 +185,6 @@ def convert(filename, script_path):
if (r[1].get('type') == 'OS::Heat::StructuredConfig' and if (r[1].get('type') == 'OS::Heat::StructuredConfig' and
r[1].get('properties', {}).get('group') == 'os-apply-config' and r[1].get('properties', {}).get('group') == 'os-apply-config' and
r[1].get('properties', {}).get('config', {}).get('os_net_config')): r[1].get('properties', {}).get('config', {}).get('os_net_config')):
#print("match %s" % r[0])
new_r = collections.OrderedDict() new_r = collections.OrderedDict()
new_r['type'] = 'OS::Heat::SoftwareConfig' new_r['type'] = 'OS::Heat::SoftwareConfig'
new_r['properties'] = collections.OrderedDict() new_r['properties'] = collections.OrderedDict()
@ -179,7 +193,8 @@ def convert(filename, script_path):
'properties', {}).get('config', {}).get('os_net_config') 'properties', {}).get('config', {}).get('os_net_config')
new_config = {'str_replace': collections.OrderedDict()} new_config = {'str_replace': collections.OrderedDict()}
new_config['str_replace']['template'] = {'get_file': script_path} new_config['str_replace']['template'] = {'get_file': script_path}
new_config['str_replace']['params'] = {'$network_config': old_net_config} new_config['str_replace']['params'] = \
{'$network_config': old_net_config}
new_r['properties']['config'] = new_config new_r['properties']['config'] = new_config
tpl['resources'][r[0]] = new_r tpl['resources'][r[0]] = new_r
else: else:
@ -195,18 +210,16 @@ def convert(filename, script_path):
od_result['parameters'] = tpl['parameters'] od_result['parameters'] = tpl['parameters']
od_result['resources'] = tpl['resources'] od_result['resources'] = tpl['resources']
od_result['outputs'] = tpl['outputs'] od_result['outputs'] = tpl['outputs']
#print('Result:')
#print('%s' % yaml.dump(od_result, Dumper=TemplateDumper, width=120, default_flow_style=False))
#print('---')
write_template(od_result, filename) write_template(od_result, filename)
return 1 return 1
def check_old_style(filename): def check_old_style(filename):
with open(filename, 'r') as f: with open(filename, 'r') as f:
tpl = yaml.load(open(filename).read()) tpl = yaml.load(f.read())
if isinstance(tpl.get('resources', {}), dict): if isinstance(tpl.get('resources', {}), dict):
for r in (tpl.get('resources', {})).items(): for r in (tpl.get('resources', {})).items():
@ -217,6 +230,7 @@ def check_old_style(filename):
return False return False
opts = parse_opts(sys.argv) opts = parse_opts(sys.argv)
exit_val = 0 exit_val = 0
num_converted = 0 num_converted = 0
@ -231,8 +245,8 @@ for base_path in opts.files:
script_paths = [opts.script_dir] script_paths = [opts.script_dir]
script_paths.append('../../scripts/run-os-net-config.sh') script_paths.append('../../scripts/run-os-net-config.sh')
script_paths.append('../network/scripts/run-os-net-config.sh') script_paths.append('../network/scripts/run-os-net-config.sh')
script_paths.append( script_paths.append('/usr/share/openstack-tripleo-heat-templates/'
'/usr/share/openstack-tripleo-heat-templates/network/scripts/run-os-net-config.sh') 'network/scripts/run-os-net-config.sh')
script_path = None script_path = None
for p in script_paths: for p in script_paths:
@ -240,7 +254,8 @@ for base_path in opts.files:
script_path = p script_path = p
break break
if script_path is None: if script_path is None:
print("Error couldn't find run-os-net-config.sh relative to filename") print("Error couldn't find run-os-net-config.sh relative "
"to filename")
sys.exit(1) sys.exit(1)
print("Using script at %s" % script_path) print("Using script at %s" % script_path)
@ -248,14 +263,16 @@ for base_path in opts.files:
extension = datetime.datetime.now().strftime('%Y%m%d%H%M%S') extension = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
backup_filename = os.path.realpath(base_path) + '.' + extension backup_filename = os.path.realpath(base_path) + '.' + extension
print('The yaml file will be overwritten and the original saved as %s' print('The yaml file will be overwritten and the original saved '
% backup_filename) 'as %s' % backup_filename)
if not (opts.yes or input("Overwrite %s? [y/n] " % base_path).lower() == 'y'): if not (opts.yes or
input("Overwrite %s? [y/n] " % base_path).lower() == 'y'):
print("Skipping file %s" % base_path) print("Skipping file %s" % base_path)
continue continue
if os.path.exists(backup_filename): if os.path.exists(backup_filename):
print("Backup file already exists, skipping file %s" % base_path) print("Backup file already exists, skipping file %s" %
base_path)
continue continue
shutil.copyfile(base_path, backup_filename) shutil.copyfile(base_path, backup_filename)
@ -264,11 +281,13 @@ for base_path in opts.files:
num_converted += convert(base_path, script_path) num_converted += convert(base_path, script_path)
to_normal_yaml(base_path) to_normal_yaml(base_path)
else: else:
print('File %s is not using old style NIC configuration' % base_path) print('File %s is not using old style NIC configuration' %
base_path)
else: else:
print('Unexpected argument %s' % base_path) print('Unexpected argument %s' % base_path)
if num_converted == 0: if num_converted == 0:
exit_val = 1 exit_val = 1
sys.exit(exit_val) sys.exit(exit_val)

View File

@ -1,15 +1,15 @@
#!/usr/bin/env python #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
# a copy of the License at # a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import argparse import argparse
import os import os
@ -51,11 +51,12 @@ OPTIONAL_DOCKER_SECTIONS = ['container_puppet_tasks', 'upgrade_tasks',
'pre_upgrade_rolling_tasks', 'pre_upgrade_rolling_tasks',
'fast_forward_upgrade_tasks', 'fast_forward_upgrade_tasks',
'fast_forward_post_upgrade_tasks', 'fast_forward_post_upgrade_tasks',
'post_upgrade_tasks', 'update_tasks', 'post_upgrade_tasks', 'update_tasks',
'post_update_tasks', 'service_config_settings', 'post_update_tasks', 'service_config_settings',
'host_prep_tasks', 'metadata_settings', 'host_prep_tasks', 'metadata_settings',
'kolla_config', 'global_config_settings', 'kolla_config', 'global_config_settings',
'external_deploy_tasks', 'external_post_deploy_tasks', 'external_deploy_tasks',
'external_post_deploy_tasks',
'container_config_scripts', 'step_config', 'container_config_scripts', 'step_config',
'monitoring_subscription', 'scale_tasks', 'monitoring_subscription', 'scale_tasks',
'external_update_tasks', 'external_upgrade_tasks'] 'external_update_tasks', 'external_upgrade_tasks']
@ -63,139 +64,130 @@ OPTIONAL_DOCKER_SECTIONS = ['container_puppet_tasks', 'upgrade_tasks',
ANSIBLE_TASKS_SECTIONS = ['upgrade_tasks', 'pre_upgrade_rolling_tasks', ANSIBLE_TASKS_SECTIONS = ['upgrade_tasks', 'pre_upgrade_rolling_tasks',
'fast_forward_upgrade_tasks', 'fast_forward_upgrade_tasks',
'fast_forward_post_upgrade_tasks', 'fast_forward_post_upgrade_tasks',
'post_upgrade_tasks', 'update_tasks', 'post_upgrade_tasks', 'update_tasks',
'post_update_tasks', 'host_prep_tasks', 'post_update_tasks', 'host_prep_tasks',
'external_deploy_tasks', 'external_deploy_tasks',
'external_post_deploy_tasks' ] 'external_post_deploy_tasks']
REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config', REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
'config_image'] 'config_image']
OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags', 'volumes' ] OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = ['puppet_tags', 'volumes']
REQUIRED_DOCKER_LOGGING_OUTPUTS = ['config_settings', 'docker_config', REQUIRED_DOCKER_LOGGING_OUTPUTS = ['config_settings', 'docker_config',
'volumes', 'host_prep_tasks'] 'volumes', 'host_prep_tasks']
# Mapping of parameter names to a list of the fields we should _not_ enforce # Mapping of parameter names to a list of the fields we should _not_ enforce
# consistency across files on. This should only contain parameters whose # consistency across files on. This should only contain parameters whose
# definition we cannot change for backwards compatibility reasons. New # definition we cannot change for backwards compatibility reasons. New
# parameters to the templates should not be added to this list. # parameters to the templates should not be added to this list.
PARAMETER_DEFINITION_EXCLUSIONS = {'CephPools': ['description', PARAMETER_DEFINITION_EXCLUSIONS = {
'type', 'CephPools': ['description', 'type', 'default'],
'default'], 'ManagementNetCidr': ['default'],
'ManagementNetCidr': ['default'], 'ManagementAllocationPools': ['default'],
'ManagementAllocationPools': ['default'], 'ExternalNetCidr': ['default'],
'ExternalNetCidr': ['default'], 'ExternalAllocationPools': ['default'],
'ExternalAllocationPools': ['default'], 'StorageNetCidr': ['default'],
'StorageNetCidr': ['default'], 'StorageAllocationPools': ['default'],
'StorageAllocationPools': ['default'], 'StorageMgmtNetCidr': ['default'],
'StorageMgmtNetCidr': ['default'], 'StorageMgmtAllocationPools': ['default'],
'StorageMgmtAllocationPools': ['default'], 'TenantNetCidr': ['default'],
'TenantNetCidr': ['default'], 'TenantAllocationPools': ['default'],
'TenantAllocationPools': ['default'], 'InternalApiNetCidr': ['default'],
'InternalApiNetCidr': ['default'], 'InternalApiAllocationPools': ['default'],
'InternalApiAllocationPools': ['default'], 'UpdateIdentifier': ['description'],
'UpdateIdentifier': ['description'], 'key_name': ['default'],
'key_name': ['default'], 'CeilometerAgentCentralLoggingSource': ['default'],
'CeilometerAgentCentralLoggingSource': ['default'], 'CeilometerAgentIpmiLoggingSource': ['default'],
'CeilometerAgentIpmiLoggingSource': ['default'], 'CeilometerAgentNotificationLoggingSource': ['default'],
'CeilometerAgentNotificationLoggingSource': ['default'], 'CinderApiLoggingSource': ['default'],
'CinderApiLoggingSource': ['default'], 'CinderSchedulerLoggingSource': ['default'],
'CinderSchedulerLoggingSource': ['default'], 'CinderVolumeLoggingSource': ['default'],
'CinderVolumeLoggingSource': ['default'], 'DesignateApiLoggingSource': ['default'],
'DesignateApiLoggingSource': ['default'], 'DesignateCentralLoggingSource': ['default'],
'DesignateCentralLoggingSource': ['default'], 'DesignateMiniDNSLoggingSource': ['default'],
'DesignateMiniDNSLoggingSource': ['default'], 'DesignateProducerLoggingSource': ['default'],
'DesignateProducerLoggingSource': ['default'], 'DesignateSinkLoggingSource': ['default'],
'DesignateSinkLoggingSource': ['default'], 'DesignateWorkerLoggingSource': ['default'],
'DesignateWorkerLoggingSource': ['default'], 'Ec2ApiLoggingSource': ['default'],
'Ec2ApiLoggingSource': ['default'], 'GlanceApiLoggingSource': ['default'],
'GlanceApiLoggingSource': ['default'], 'GnocchiApiLoggingSource': ['default'],
'GnocchiApiLoggingSource': ['default'], 'HeatApiCfnLoggingSource': ['default'],
'HeatApiCfnLoggingSource': ['default'], 'HeatApiLoggingSource': ['default'],
'HeatApiLoggingSource': ['default'], 'HeatEngineLoggingSource': ['default'],
'HeatEngineLoggingSource': ['default'], 'KeystoneLoggingSource': ['default'],
'KeystoneLoggingSource': ['default'], 'KeystoneErrorLoggingSource': ['default'],
'KeystoneErrorLoggingSource': ['default'], 'KeystoneAdminAccessLoggingSource': ['default'],
'KeystoneAdminAccessLoggingSource': ['default'], 'KeystoneAdminErrorLoggingSource': ['default'],
'KeystoneAdminErrorLoggingSource': ['default'], 'KeystoneMainAcccessLoggingSource': ['default'],
'KeystoneMainAcccessLoggingSource': ['default'], 'KeystoneMainErrorLoggingSource': ['default'],
'KeystoneMainErrorLoggingSource': ['default'], 'NeutronApiLoggingSource': ['default'],
'NeutronApiLoggingSource': ['default'], 'NeutronDhcpAgentLoggingSource': ['default'],
'NeutronDhcpAgentLoggingSource': ['default'], 'NeutronL3AgentLoggingSource': ['default'],
'NeutronL3AgentLoggingSource': ['default'], 'NeutronMetadataAgentLoggingSource': ['default'],
'NeutronMetadataAgentLoggingSource': ['default'], 'NeutronOpenVswitchAgentLoggingSource': ['default'],
'NeutronOpenVswitchAgentLoggingSource': ['default'], 'NovaApiLoggingSource': ['default'],
'NovaApiLoggingSource': ['default'], 'NovaComputeLoggingSource': ['default'],
'NovaComputeLoggingSource': ['default'], 'NovaConductorLoggingSource': ['default'],
'NovaConductorLoggingSource': ['default'], 'NovaMetadataLoggingSource': ['default'],
'NovaMetadataLoggingSource': ['default'], 'NovaSchedulerLoggingSource': ['default'],
'NovaSchedulerLoggingSource': ['default'], 'NovaVncproxyLoggingSource': ['default'],
'NovaVncproxyLoggingSource': ['default'], 'OctaviaApiLoggingSource': ['default'],
'OctaviaApiLoggingSource': ['default'], 'OctaviaHealthManagerLoggingSource': ['default'],
'OctaviaHealthManagerLoggingSource': ['default'], 'OctaviaHousekeepingLoggingSource': ['default'],
'OctaviaHousekeepingLoggingSource': ['default'], 'OctaviaWorkerLoggingSource': ['default'],
'OctaviaWorkerLoggingSource': ['default'], 'OvnMetadataAgentLoggingSource': ['default'],
'OvnMetadataAgentLoggingSource': ['default'], 'PlacementLoggingSource': ['default'],
'PlacementLoggingSource': ['default'], 'SaharaApiLoggingSource': ['default'],
'SaharaApiLoggingSource': ['default'], 'SaharaEngineLoggingSource': ['default'],
'SaharaEngineLoggingSource': ['default'], # There's one template that defines this
# There's one template that defines this # differently, and I'm not sure if we can
# differently, and I'm not sure if we can # safely change it.
# safely change it. 'ControlPlaneDefaultRoute': ['default'],
'ControlPlaneDefaultRoute': ['default'], # TODO(bnemec): Address these existing inconsistencies.
# TODO(bnemec): Address these existing 'ServiceNetMap': ['description', 'default'],
# inconsistencies. 'network': ['default'],
'ServiceNetMap': ['description', 'default'], 'ControlPlaneIP': ['default',
'network': ['default'], 'description'],
'ControlPlaneIP': ['default', 'ControlPlaneIp': ['default',
'description'], 'description'],
'ControlPlaneIp': ['default', 'NeutronBigswitchLLDPEnabled': ['default'],
'description'], 'NeutronWorkers': ['description'],
'NeutronBigswitchLLDPEnabled': ['default'], 'ServerMetadata': ['description'],
'NeutronWorkers': ['description'], 'server': ['description'],
'ServerMetadata': ['description'], 'servers': ['description'],
'server': ['description'], 'ExtraConfig': ['description'],
'servers': ['description'], 'DefaultPasswords': ['description',
'ExtraConfig': ['description'], 'default'],
'DefaultPasswords': ['description', 'BondInterfaceOvsOptions': ['description',
'default'], 'default',
'BondInterfaceOvsOptions': ['description', 'constraints'],
'default', # NOTE(anil): This is a temporary change and
'constraints'], # will be removed once bug #1767070 properly
# NOTE(anil): This is a temporary change and # fixed. OVN supports only VLAN, geneve
# will be removed once bug #1767070 properly # and flat for NeutronNetworkType. But VLAN
# fixed. OVN supports only VLAN, geneve # tenant networks have a limited support
# and flat for NeutronNetworkType. But VLAN # in OVN. Till that is fixed, we restrict
# tenant networks have a limited support # NeutronNetworkType to 'geneve'.
# in OVN. Till that is fixed, we restrict 'NeutronNetworkType': ['description', 'default', 'constraints'],
# NeutronNetworkType to 'geneve'. 'KeyName': ['constraints'],
'NeutronNetworkType': ['description', 'OVNSouthboundServerPort': ['description'],
'default', 'ExternalInterfaceDefaultRoute': ['description', 'default'],
'constraints'], 'ManagementInterfaceDefaultRoute': ['description', 'default'],
'KeyName': ['constraints'], 'IPPool': ['description'],
'OVNSouthboundServerPort': ['description'], 'SSLCertificate': ['description', 'default', 'hidden'],
'ExternalInterfaceDefaultRoute': 'NodeIndex': ['description'],
['description', 'default'], 'name': ['description', 'default'],
'ManagementInterfaceDefaultRoute': 'image': ['description', 'default'],
['description', 'default'], 'NeutronBigswitchAgentEnabled': ['default'],
'IPPool': ['description'], 'EndpointMap': ['description', 'default'],
'SSLCertificate': ['description', 'ContainerManilaConfigImage': ['description', 'default'],
'default', 'replacement_policy': ['default'],
'hidden'], 'CloudDomain': ['description', 'default'],
'NodeIndex': ['description'], 'EnableLoadBalancer': ['description'],
'name': ['description', 'default'], 'ControllerExtraConfig': ['description'],
'image': ['description', 'default'], 'NovaComputeExtraConfig': ['description'],
'NeutronBigswitchAgentEnabled': ['default'], 'controllerExtraConfig': ['description'],
'EndpointMap': ['description', 'default'], 'ContainerSwiftConfigImage': ['default'],
'ContainerManilaConfigImage': ['description', 'input_values': ['default'],
'default'], 'fixed_ips': ['default', 'type']
'replacement_policy': ['default'], }
'CloudDomain': ['description', 'default'],
'EnableLoadBalancer': ['description'],
'ControllerExtraConfig': ['description'],
'NovaComputeExtraConfig': ['description'],
'controllerExtraConfig': ['description'],
'ContainerSwiftConfigImage': ['default'],
'input_values': ['default'],
'fixed_ips': ['default', 'type']
}
PREFERRED_CAMEL_CASE = { PREFERRED_CAMEL_CASE = {
'ec2api': 'Ec2Api', 'ec2api': 'Ec2Api',
@ -270,6 +262,7 @@ HEAT_OUTPUTS_EXCLUSIONS = [
'./extraconfig/pre_network/host_config_and_reboot.yaml' './extraconfig/pre_network/host_config_and_reboot.yaml'
] ]
def exit_usage(): def exit_usage():
print('Usage %s <yaml file or directory>' % sys.argv[0]) print('Usage %s <yaml file or directory>' % sys.argv[0])
sys.exit(1) sys.exit(1)
@ -308,9 +301,9 @@ def validate_endpoint_map(base_map, env_map):
def validate_role_name(filename): def validate_role_name(filename):
role_data = yaml.load(open(filename).read())[0] role_data = yaml.load(open(filename).read())[0]
if role_data['name'] != os.path.basename(filename).split('.')[0]: if role_data['name'] != os.path.basename(filename).split('.')[0]:
print('ERROR: role name should match file name for role : %s.' print('ERROR: role name should match file name for role : %s.'
% filename) % filename)
return 1 return 1
return 0 return 0
@ -363,9 +356,9 @@ def validate_controller_dashboard(filename, tpl):
def validate_hci_role(hci_role_filename, hci_role_tpl): def validate_hci_role(hci_role_filename, hci_role_tpl):
role_files = ['HciCephAll', 'HciCephFile', 'HciCephMon', 'HciCephObject'] role_files = ['HciCephAll', 'HciCephFile', 'HciCephMon', 'HciCephObject']
if hci_role_filename in ['./roles/'+ x +'.yaml' for x in role_files]: if hci_role_filename in ['./roles/' + x + '.yaml' for x in role_files]:
compute_role_filename = os.path.join(os.path.dirname(hci_role_filename), compute_role_filename = \
'./Compute.yaml') os.path.join(os.path.dirname(hci_role_filename), './Compute.yaml')
compute_role_tpl = yaml.load(open(compute_role_filename).read()) compute_role_tpl = yaml.load(open(compute_role_filename).read())
compute_role_services = compute_role_tpl[0]['ServicesDefault'] compute_role_services = compute_role_tpl[0]['ServicesDefault']
for role in hci_role_tpl: for role in hci_role_tpl:
@ -397,11 +390,12 @@ def validate_hci_role(hci_role_filename, hci_role_tpl):
return 1 return 1
return 0 return 0
def validate_ceph_role(ceph_role_filename, ceph_role_tpl): def validate_ceph_role(ceph_role_filename, ceph_role_tpl):
role_files = ['CephAll', 'CephFile', 'CephMon', 'CephObject'] role_files = ['CephAll', 'CephFile', 'CephMon', 'CephObject']
if ceph_role_filename in ['./roles/'+ x +'.yaml' for x in role_files]: if ceph_role_filename in ['./roles/' + x + '.yaml' for x in role_files]:
ceph_storage_role_filename = os.path.join(os.path.dirname(ceph_role_filename), ceph_storage_role_filename = \
'./CephStorage.yaml') os.path.join(os.path.dirname(ceph_role_filename), './CephStorage.yaml')
ceph_storage_role_tpl = yaml.load(open(ceph_storage_role_filename).read()) ceph_storage_role_tpl = yaml.load(open(ceph_storage_role_filename).read())
ceph_storage_role_services = ceph_storage_role_tpl[0]['ServicesDefault'] ceph_storage_role_services = ceph_storage_role_tpl[0]['ServicesDefault']
for role in ceph_role_tpl: for role in ceph_role_tpl:
@ -427,6 +421,7 @@ def validate_ceph_role(ceph_role_filename, ceph_role_tpl):
return 1 return 1
return 0 return 0
def validate_controller_no_ceph_role(filename, tpl): def validate_controller_no_ceph_role(filename, tpl):
control_role_filename = os.path.join(os.path.dirname(filename), control_role_filename = os.path.join(os.path.dirname(filename),
'./Controller.yaml') './Controller.yaml')
@ -448,6 +443,7 @@ def validate_controller_no_ceph_role(filename, tpl):
return 1 return 1
return 0 return 0
def validate_with_compute_role_services(role_filename, role_tpl, exclude_service=()): def validate_with_compute_role_services(role_filename, role_tpl, exclude_service=()):
cmpt_filename = os.path.join(os.path.dirname(role_filename), cmpt_filename = os.path.join(os.path.dirname(role_filename),
'./Compute.yaml') './Compute.yaml')
@ -484,6 +480,7 @@ def validate_with_compute_role_services(role_filename, role_tpl, exclude_service
return 0 return 0
def validate_multiarch_compute_roles(role_filename, role_tpl): def validate_multiarch_compute_roles(role_filename, role_tpl):
errors = 0 errors = 0
roles_dir = os.path.dirname(role_filename) roles_dir = os.path.dirname(role_filename)
@ -572,7 +569,7 @@ def validate_docker_service_mysql_usage(filename, tpl):
def read_all(incfile, inctpl): def read_all(incfile, inctpl):
# search for included content # search for included content
content = inctpl['outputs']['role_data']['value'].get('config_settings',{}) content = inctpl['outputs']['role_data']['value'].get('config_settings', {})
all_content.append(content) all_content.append(content)
included_res[:] = [] included_res[:] = []
if search(content, match_included_res, no_op): if search(content, match_included_res, no_op):
@ -582,7 +579,7 @@ def validate_docker_service_mysql_usage(filename, tpl):
# disregard class names, only consider file names # disregard class names, only consider file names
if 'OS::' in f: if 'OS::' in f:
continue continue
newfile = os.path.normpath(os.path.dirname(incfile)+'/'+f) newfile = os.path.normpath(os.path.dirname(incfile) + '/' + f)
newtmp = yaml.load(open(newfile).read()) newtmp = yaml.load(open(newfile).read())
read_all(newfile, newtmp) read_all(newfile, newtmp)
@ -667,18 +664,19 @@ def validate_docker_service(filename, tpl):
if key in OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS: if key in OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS:
continue continue
else: else:
print('ERROR: %s should not be in puppet_config section.' print('ERROR: %s should not be in puppet_config section.'
% key) % key)
return 1 return 1
for key in REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS: for key in REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS:
if key not in puppet_config: if key not in puppet_config:
print('ERROR: %s is required in puppet_config for %s.' print('ERROR: %s is required in puppet_config for %s.'
% (key, filename)) % (key, filename))
return 1 return 1
config_volume = puppet_config.get('config_volume') config_volume = puppet_config.get('config_volume')
expected_config_image_parameter = "Container%sConfigImage" % to_camel_case(config_volume) expected_config_image_parameter = \
if config_volume and not expected_config_image_parameter in tpl.get('parameters', []): "Container%sConfigImage" % to_camel_case(config_volume)
if config_volume and expected_config_image_parameter not in tpl.get('parameters', []):
print('ERROR: Missing %s heat parameter for %s config_volume.' print('ERROR: Missing %s heat parameter for %s config_volume.'
% (expected_config_image_parameter, config_volume)) % (expected_config_image_parameter, config_volume))
return 1 return 1
@ -699,8 +697,9 @@ def validate_docker_service(filename, tpl):
command = ' '.join(map(str, command)) command = ' '.join(map(str, command))
if 'bootstrap_host_exec' in command \ if 'bootstrap_host_exec' in command \
and container.get('user') != 'root': and container.get('user') != 'root':
print('ERROR: bootstrap_host_exec needs to run as the root user.') print('ERROR: bootstrap_host_exec needs to run '
return 1 'as the root user.')
return 1
if 'upgrade_tasks' in role_data and role_data['upgrade_tasks']: if 'upgrade_tasks' in role_data and role_data['upgrade_tasks']:
if (validate_upgrade_tasks(role_data['upgrade_tasks']) or if (validate_upgrade_tasks(role_data['upgrade_tasks']) or
@ -793,7 +792,7 @@ def validate_service(filename, tpl):
def _rsearch_keys(d, pattern, search_keynames=False, enter_lists=False): def _rsearch_keys(d, pattern, search_keynames=False, enter_lists=False):
""" Deep regex search through a dict for k or v matching a pattern """Deep regex search through a dict for k or v matching a pattern
Returns a list of the matched parent keys. Nested keypaths are Returns a list of the matched parent keys. Nested keypaths are
represented as lists. Looks for either values (default) or keys mathching represented as lists. Looks for either values (default) or keys mathching
@ -871,8 +870,9 @@ def _rsearch_keys(d, pattern, search_keynames=False, enter_lists=False):
result = [] result = []
return _rsearch_keys_nested(d, pattern, search_keynames, enter_lists) return _rsearch_keys_nested(d, pattern, search_keynames, enter_lists)
def _get(d, path): def _get(d, path):
""" Get a value (or None) from a dict by path given as a list """Get a value (or None) from a dict by path given as a list
Integer values represent indexes in lists, string values are for dict keys Integer values represent indexes in lists, string values are for dict keys
""" """
@ -885,8 +885,9 @@ def _get(d, path):
return None return None
return d return d
def validate_service_hiera_interpol(f, tpl): def validate_service_hiera_interpol(f, tpl):
""" Validate service templates for hiera interpolation rules """Validate service templates for hiera interpolation rules
Find all {get_param: [ServiceNetMap, ...]} missing hiera Find all {get_param: [ServiceNetMap, ...]} missing hiera
interpolation of IP addresses or network ranges vs interpolation of IP addresses or network ranges vs
@ -979,6 +980,7 @@ def validate_service_hiera_interpol(f, tpl):
else: else:
return 0 return 0
def validate_upgrade_tasks_duplicate_whens(filename): def validate_upgrade_tasks_duplicate_whens(filename):
"""Take a heat template and starting at the upgrade_tasks """Take a heat template and starting at the upgrade_tasks
try to detect duplicate 'when:' statements try to detect duplicate 'when:' statements
@ -1188,6 +1190,7 @@ def validate(filename, param_map):
return retval return retval
def validate_upgrade_tasks(upgrade_tasks): def validate_upgrade_tasks(upgrade_tasks):
# some templates define its upgrade_tasks via list_concat # some templates define its upgrade_tasks via list_concat
if isinstance(upgrade_tasks, dict): if isinstance(upgrade_tasks, dict):
@ -1200,15 +1203,20 @@ def validate_upgrade_tasks(upgrade_tasks):
task_name = task.get("name", "") task_name = task.get("name", "")
whenline = task.get("when", "") whenline = task.get("when", "")
if (type(whenline) == list): if (type(whenline) == list):
if any('step|int ' in condition for condition in whenline) and ('step|int == ' not in whenline[0]): if any('step|int ' in condition for condition in whenline) \
print('ERROR: \'step|int ==\' condition should be evaluated first in when conditions for task (%s)' % (task)) and ('step|int == ' not in whenline[0]):
return 1 print('ERROR: \'step|int ==\' condition should be evaluated '
'first in when conditions for task (%s)' % (task))
return 1
else: else:
if (' and ' in whenline) and (' or ' not in whenline) \ if (' and ' in whenline) and (' or ' not in whenline) \
and args.quiet < 2: and args.quiet < 2:
print("Warning: Consider specifying \'and\' conditions as a list to improve readability in task: \"%s\"" % (task_name)) print("Warning: Consider specifying \'and\' conditions as "
"a list to improve readability in task: \"%s\""
% (task_name))
return 0 return 0
def validate_network_data_file(data_file_path): def validate_network_data_file(data_file_path):
try: try:
data_file = yaml.load(open(data_file_path).read()) data_file = yaml.load(open(data_file_path).read())
@ -1227,6 +1235,7 @@ def validate_network_data_file(data_file_path):
return 1 return 1
return 0 return 0
def validate_nic_config_file(filename, tpl): def validate_nic_config_file(filename, tpl):
try: try:
if isinstance(tpl.get('resources', {}), dict): if isinstance(tpl.get('resources', {}), dict):
@ -1243,6 +1252,7 @@ def validate_nic_config_file(filename, tpl):
return 1 return 1
return 0 return 0
def parse_args(): def parse_args():
p = argparse.ArgumentParser() p = argparse.ArgumentParser()
@ -1256,6 +1266,7 @@ def parse_args():
return p.parse_args() return p.parse_args()
args = parse_args() args = parse_args()
path_args = args.path_args path_args = args.path_args
quiet = args.quiet quiet = args.quiet
@ -1273,9 +1284,9 @@ for base_path in path_args:
for f in files: for f in files:
file_path = os.path.join(subdir, f) file_path = os.path.join(subdir, f)
if 'environments/services-docker' in file_path: if 'environments/services-docker' in file_path:
print("ERROR: environments/services-docker should not be used " print("ERROR: environments/services-docker should not be "
"any more, use environments/services instead: %s " % "used any more, use environments/services instead: "
file_path) "%s " % file_path)
failed_files.append(file_path) failed_files.append(file_path)
exit_val |= 1 exit_val |= 1

29
tox.ini
View File

@ -26,12 +26,35 @@ commands =
python ./tools/yaml-validate.py . python ./tools/yaml-validate.py .
bash -c ./tools/roles-data-validation.sh bash -c ./tools/roles-data-validation.sh
bash -c ./tools/check-up-to-date.sh bash -c ./tools/check-up-to-date.sh
flake8 ./container_config_scripts/ flake8 --exclude releasenotes --ignore E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405,W503,W504,E501,E731,W605
[testenv:flake8] [testenv:flake8]
basepython = python3 basepython = python3
commands = # E125 is deliberately excluded. See
flake8 ./container_config_scripts/ # https://github.com/jcrocholl/pep8/issues/126. It's just wrong.
#
# Most of the whitespace related rules (E12* and E131) are excluded
# because while they are often useful guidelines, strict adherence to
# them ends up causing some really odd code formatting and forced
# extra line breaks. Updating code to enforce these will be a hard sell.
#
# H405 is another one that is good as a guideline, but sometimes
# multiline doc strings just don't have a natural summary
# line. Rejecting code for this reason is wrong.
#
# E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301
#
# The following two are also ignored that we don't think it is useful.
# W503 line break before binary operator
# W504 line break after binary operator
#
# The following rules are currently ignored, but will be enforced
# in the future
# E501 line too long
# E731 do not assign a lambda expression, use a def
# W605 invalid escape sequence
ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405,W503,W504,E501,E731,W605
exclude = releasenotes
[testenv:templates] [testenv:templates]
basepython = python3 basepython = python3

View File

@ -762,4 +762,5 @@ resource_registry:
self.nested_output) self.nested_output)
self.assertEqual(expected, f.read()) self.assertEqual(expected, f.read())
GeneratorTestCase.generate_scenarios() GeneratorTestCase.generate_scenarios()

View File

@ -12,15 +12,8 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import io
import tempfile
import mock
from oslotest import base from oslotest import base
import six
import testscenarios
import yaml import yaml
import yaql
class YAQLTestCase(base.BaseTestCase): class YAQLTestCase(base.BaseTestCase):
@ -32,4 +25,3 @@ class YAQLTestCase(base.BaseTestCase):
for i in path.split('.'): for i in path.split('.'):
data = data[i] data = data[i]
return data['yaql']['expression'] return data['yaql']['expression']