Enable H904: wrapping using parantheses

Enabled H904 on tox and changes made in relevant files to adhere to H904
guideline. Backlash based line wrapping modified to use bracket based or quote
based wrapping mechanism with modified indentations.

Change-Id: I10b1ce7fbc230ff3167adbb06e0982b4cc0b0bb7
Closes-Bug: #1398555
This commit is contained in:
Madhu Mohan N S 2014-12-19 16:07:21 +05:30
parent 215ffbc898
commit c02255fcdb
19 changed files with 139 additions and 138 deletions

View File

@ -575,11 +575,11 @@ class DataSourceDriver(deepsix.deepSix):
@classmethod
def _compare_subtranslator(cls, x, y):
if cls.PARENT_KEY not in x[cls.TRANSLATOR] \
and cls.PARENT_KEY in y[cls.TRANSLATOR]:
if (cls.PARENT_KEY not in x[cls.TRANSLATOR]
and cls.PARENT_KEY in y[cls.TRANSLATOR]):
return -1
elif cls.PARENT_KEY in x[cls.TRANSLATOR] \
and cls.PARENT_KEY not in y[cls.TRANSLATOR]:
elif (cls.PARENT_KEY in x[cls.TRANSLATOR]
and cls.PARENT_KEY not in y[cls.TRANSLATOR]):
return 1
else:
return cmp(x, y)
@ -624,8 +624,8 @@ class DataSourceDriver(deepsix.deepSix):
else:
tuples, row_hash = cls.convert_obj(o, subtrans)
assert row_hash, "LIST's subtranslator must have row_hash"
assert cls.need_column_for_subtable_id(subtrans), \
"LIST's subtranslator should have id"
assert cls.need_column_for_subtable_id(subtrans), (
"LIST's subtranslator should have id")
if tuples:
new_tuples.extend(tuples)

View File

@ -467,20 +467,20 @@ class PlexxiDriver(DataSourceDriver):
username=creds['username'],
password=creds['password'])
except requests.exceptions.HTTPError as error:
if int(error.response.status_code) == 401 or\
int(error.response.status_code) == 403:
msg = "Incorrect username/password combination. Passed" + \
"in username was " + creds['username'] + ", " + \
"password was " + creds['password']
if (int(error.response.status_code) == 401 or
int(error.response.status_code) == 403):
msg = ("Incorrect username/password combination. Passed" +
"in username was " + creds['username'] + ", " +
"password was " + creds['password'])
raise Exception(requests.exceptions.HTTPErrror(msg))
else:
raise Exception(requests.exceptions.HTTPError(error))
except requests.exceptions.ConnectionError:
msg = "Cannot connect to PlexxiCore at " + creds['auth_url'] + \
" with the username " + creds['username'] + " and the " + \
"password " + creds['password']
msg = ("Cannot connect to PlexxiCore at " + creds['auth_url'] +
" with the username " + creds['username'] + " and the " +
"password " + creds['password'])
raise Exception(requests.exceptions.ConnectionError(msg))
@ -491,9 +491,11 @@ class PlexxiDriver(DataSourceDriver):
VMs that have the same name in the Plexxi table and the Nova Table.
"""
repeated_name_rule = '{"rule": "RepeatedName(vname,pvuuid)' +\
':- plexxi:vms(pvuuid,vname,phuuid,pvip,pvmaccount,pvaffin),' +\
'nova:servers(nvuuid,vname,a,nstatus,b,c,d,num)"}'
repeated_name_rule = ('{"rule": "RepeatedName(vname,pvuuid)' +
':- plexxi:vms(pvuuid,vname,phuuid,' +
'pvip,pvmaccount,pvaffin),' +
'nova:servers(nvuuid,vname,' +
'a,nstatus,b,c,d,num)"}')
try:
requests.post(self.api_address + '/policies/classification/rules',
data=repeated_name_rule)
@ -525,11 +527,11 @@ class PlexxiDriver(DataSourceDriver):
for plexxivm in plexxivms:
if (plexxivm.getForeignUuid() == vmuuid):
new_vm_name = "Conflict-" + vmname
desc = "Congress has found a VM with the same " +\
"name on the nova network. This vm " +\
"will now be renamed to " + new_vm_name
job_name = " Congress Driver:Changing virtual" +\
"machine, " + vmname + "\'s name"
desc = ("Congress has found a VM with the same " +
"name on the nova network. This vm " +
"will now be renamed to " + new_vm_name)
job_name = (" Congress Driver:Changing virtual" +
"machine, " + vmname + "\'s name")
changenamejob = Job.create(name=job_name,
description=desc + ".",
session=self.exchange)

View File

@ -35,16 +35,16 @@ class Node():
if destination in self.children[word].destinations:
self.children[word].destinations.remove(destination)
if len(self.children[word].destinations) == 0 and \
len(self.children[word].children) == 0:
if (len(self.children[word].destinations) == 0 and
len(self.children[word].children) == 0):
del self.children[word]
else:
self.children[word]._remove(patternList[1:], destination)
if len(self.children[word].destinations) == 0 and \
len(self.children[word].children) == 0:
if (len(self.children[word].destinations) == 0 and
len(self.children[word].children) == 0):
del self.children[word]

View File

@ -446,8 +446,8 @@ class deepSix(greenthread.GreenThread):
for corruuid in self.subdata.keys():
if key == self.subdata[corruuid].key and \
dataindex == self.subdata[corruuid].dataindex:
if (key == self.subdata[corruuid].key and
dataindex == self.subdata[corruuid].dataindex):
if corruuid in self.scheduuids:
self.scheduuids.remove(corruuid)

View File

@ -99,8 +99,9 @@ class DseRuntime (runtime.Runtime, deepsix.deepSix):
msg.header['dataindex'], runtime.iterstr(msg.body.data))
events = msg.body.data
for event in events:
assert compile.is_atom(event.formula), \
"receive_data_update received non-atom: " + str(event.formula)
assert compile.is_atom(event.formula), (
"receive_data_update received non-atom: " +
str(event.formula))
# prefix tablename with data source
event.target = msg.replyTo
(permitted, changes) = self.update(events)

View File

@ -612,8 +612,9 @@ class TopDownTheory(Theory):
# LOG.debug("%s is negated", lit)
# recurse on the negation of the literal
plugged = lit.plug(context.binding)
assert plugged.is_ground(), \
"Negated literal not ground when evaluated: " + str(plugged)
assert plugged.is_ground(), (
"Negated literal not ground when evaluated: " +
str(plugged))
self.print_call(lit, context.binding, context.depth)
new_context = self.TopDownContext(
[lit.complement()], 0, context.binding, None,
@ -648,10 +649,10 @@ class TopDownTheory(Theory):
# create args for function
args = []
for i in xrange(0, builtin.num_inputs):
assert plugged.arguments[i].is_object(), \
assert plugged.arguments[i].is_object(), (
("Builtins must be evaluated only after their "
"inputs are ground: {} with num-inputs {}".format(
str(plugged), builtin.num_inputs))
str(plugged), builtin.num_inputs)))
args.append(plugged.arguments[i].name)
# evaluate builtin: must return number, string, or iterable
# of numbers/strings
@ -1480,8 +1481,8 @@ class DeltaRuleTheory (Theory):
"""Insert a compile.Rule into the theory.
Return True iff the theory changed.
"""
assert compile.is_regular_rule(rule), \
"DeltaRuleTheory only takes rules"
assert compile.is_regular_rule(rule), (
"DeltaRuleTheory only takes rules")
self.log(rule.tablename(), "Insert: %s", rule)
if rule in self.originals:
self.log(None, iterstr(self.originals))
@ -1621,9 +1622,9 @@ class DeltaRuleTheory (Theory):
if tablearity not in global_self_joins:
global_self_joins[tablearity] = 1
else:
global_self_joins[tablearity] = \
global_self_joins[tablearity] = (
max(occurrences[tablearity] - 1,
global_self_joins[tablearity])
global_self_joins[tablearity]))
results.append(rule)
LOG.debug("final rule: %s", rule)
# add definitions for new tables
@ -1722,8 +1723,8 @@ class MaterializedViewTheory(TopDownTheory):
Does not check if EVENTS would cause errors.
"""
for event in events:
assert compile.is_datalog(event.formula), \
"Non-formula not allowed: {}".format(str(event.formula))
assert compile.is_datalog(event.formula), (
"Non-formula not allowed: {}".format(str(event.formula)))
self.enqueue_any(event)
changes = self.process_queue()
if changes:
@ -1739,8 +1740,8 @@ class MaterializedViewTheory(TopDownTheory):
current = set(self.policy()) # copy so can modify and discard
# compute new rule set
for event in events:
assert compile.is_datalog(event.formula), \
"update_would_cause_errors operates only on objects"
assert compile.is_datalog(event.formula), (
"update_would_cause_errors operates only on objects")
self.log(None, "Updating %s", event.formula)
if event.formula.is_atom():
errors.extend(compile.fact_errors(
@ -1831,8 +1832,9 @@ class MaterializedViewTheory(TopDownTheory):
formula = event.formula
if formula.is_atom():
self.log(formula.tablename(), "compute/enq: atom %s", formula)
assert not self.is_view(formula.table), \
"Cannot directly modify tables computed from other tables"
assert not self.is_view(formula.table), (
"Cannot directly modify tables" +
" computed from other tables")
# self.log(formula.table, "%s: %s", text, formula)
self.enqueue(event)
return []
@ -2319,8 +2321,8 @@ class Runtime (object):
create_network(17), options:value(17, "name", "net1") :- true
"""
assert self.get_target(theory) is not None, "Theory must be known"
assert self.get_target(action_theory) is not None, \
"Action theory must be known"
assert self.get_target(action_theory) is not None, (
"Action theory must be known")
if isinstance(query, basestring) and isinstance(sequence, basestring):
return self.simulate_string(query, theory, sequence, action_theory,
delta, trace)
@ -2487,9 +2489,9 @@ class Runtime (object):
# select
def select_string(self, policy_string, theory, trace):
policy = self.parse(policy_string)
assert len(policy) == 1, \
assert (len(policy) == 1), (
"Queries can have only 1 statement: {}".format(
[str(x) for x in policy])
[str(x) for x in policy]))
results = self.select_obj(policy[0], theory, trace)
if trace:
return (compile.formulas_to_string(results[0]), results[1])
@ -2595,8 +2597,8 @@ class Runtime (object):
assert compile.is_datalog(query), "Query must be formula"
# Each action is represented as a rule with the actual action
# in the head and its supporting data (e.g. options) in the body
assert all(compile.is_extended_datalog(x) for x in sequence), \
"Sequence must be an iterable of Rules"
assert all(compile.is_extended_datalog(x) for x in sequence), (
"Sequence must be an iterable of Rules")
th_object = self.get_target(theory)
if trace:
@ -2726,10 +2728,10 @@ class Runtime (object):
self.table_log(tablename, "Projecting %s", formula)
# define extension of current Actions theory
if formula.is_atom():
assert formula.is_ground(), \
"Projection atomic updates must be ground"
assert not formula.is_negated(), \
"Projection atomic updates must be positive"
assert formula.is_ground(), (
"Projection atomic updates must be ground")
assert not formula.is_negated(), (
"Projection atomic updates must be positive")
newth.define([formula])
else:
# instantiate action using prior results

View File

@ -42,29 +42,29 @@ class NovaFakeClient(mock.MagicMock):
return server
def get_server_list(self):
server_one = \
server_one = (
self.get_mock_server(1234, 'sample-server',
"e4d909c290d0fb1ca068ffaddf22cbd0",
'BUILD',
'50e14867-7c64-4ec9-be8d-ed2470ca1d24',
'33ea0494-2bdf-4382-a445-9068997430b9',
{"id": 1}, {"id": 2})
{"id": 1}, {"id": 2}))
server_two = \
server_two = (
self.get_mock_server(5678, 'sample-server2',
"9e107d9d372bb6826bd81d3542a419d6",
'ACTIVE',
'50e14867-7c64-4ec9-be8d-ed2470ca1d24',
'33ea0494-2bdf-4382-a445-9068997430b9',
{"id": 1}, {"id": 2})
{"id": 1}, {"id": 2}))
server_three = \
server_three = (
self.get_mock_server(9012, 'sample-server3',
"9e107d9d372bb6826bd81d3542a419d6",
'ACTIVE',
'50e14867-7c64-4ec9-be8d-ed2470ca1d24',
'33ea0494-2bdf-4382-a445-9068997430b9',
{"id": 1}, {"id": 2})
{"id": 1}, {"id": 2}))
return [server_one, server_two, server_three]

View File

@ -34,8 +34,8 @@ class TestNeutronDriver(base.TestCase):
self.neutron_client.list_networks.return_value = network_response
self.neutron_client.list_ports.return_value = port_response
self.neutron_client.list_routers.return_value = router_response
self.neutron_client.list_security_groups.return_value = \
security_group_response
self.neutron_client.list_security_groups.return_value = (
security_group_response)
args = helper.datasource_openstack_args()
args['poll_time'] = 0
self.driver = NeutronDriver(args=args)
@ -61,17 +61,17 @@ class TestNeutronDriver(base.TestCase):
subnet_tuple_guid = network_tuple[key_to_index['subnet_group_id']]
name = network_tuple[key_to_index['name']]
status = network_tuple[key_to_index['status']]
provider_physical_network = \
network_tuple[key_to_index['provider:physical_network']]
provider_physical_network = (
network_tuple[key_to_index['provider:physical_network']])
admin_state_up = network_tuple[key_to_index['admin_state_up']]
tenant_id = network_tuple[key_to_index['tenant_id']]
provider_network_type = \
network_tuple[key_to_index['provider:network_type']]
provider_network_type = (
network_tuple[key_to_index['provider:network_type']])
router_external = network_tuple[key_to_index['router:external']]
shared = network_tuple[key_to_index['shared']]
id = network_tuple[key_to_index['id']]
provider_segmentation_id = \
network_tuple[key_to_index['provider:segmentation_id']]
provider_segmentation_id = (
network_tuple[key_to_index['provider:segmentation_id']])
# properties of first subnet
network_subnet_tuple = network_subnet_tuples.pop()
@ -365,16 +365,16 @@ class TestDataSourceDriver(base.TestCase):
'neutron:networks({})'.format(args2)]
# answer to query above for network1
datalog1 = \
('p("240ff9df-df35-43ae-9df5-27fae87f2492") '
'p("340ff9df-df35-43ae-9df5-27fae87f2492") '
'p("440ff9df-df35-43ae-9df5-27fae87f2492")')
datalog1 = (
'p("240ff9df-df35-43ae-9df5-27fae87f2492") '
'p("340ff9df-df35-43ae-9df5-27fae87f2492") '
'p("440ff9df-df35-43ae-9df5-27fae87f2492")')
# answer to query above for network2
datalog2 = \
('p("240ff9df-df35-43ae-9df5-27fae87f2492") '
'p("640ff9df-df35-43ae-9df5-27fae87f2492") '
'p("540ff9df-df35-43ae-9df5-27fae87f2492")')
datalog2 = (
'p("240ff9df-df35-43ae-9df5-27fae87f2492") '
'p("640ff9df-df35-43ae-9df5-27fae87f2492") '
'p("540ff9df-df35-43ae-9df5-27fae87f2492")')
# return value
self.info = {}
@ -615,8 +615,8 @@ network2 = {'networks': [
# Sample responses from neutron-client, after parsing
network_response = \
{'networks':
network_response = {
'networks':
[{'status': 'ACTIVE',
'subnets': ['4cef03d0-1d02-40bb-8c99-2f442aac6ab0'],
'name': 'test-network',
@ -629,8 +629,8 @@ network_response = \
'id': '240ff9df-df35-43ae-9df5-27fae87f2492',
'provider:segmentation_id': 4}]}
port_response = \
{"ports":
port_response = {
"ports":
[{"status": "ACTIVE",
"binding:host_id": "havana",
"name": "",
@ -653,8 +653,8 @@ port_response = \
'25ea0516-11ec-46e9-9e8e-7d1b6e3d7523'],
"device_id": "864e4acf-bf8e-4664-8cf7-ad5daa95681e"}]}
router_response = \
{'routers':
router_response = {
'routers':
[{u'status': u'ACTIVE',
u'external_gateway_info':
{u'network_id': u'a821b8d3-af1f-4d79-9b8e-3da9674338ae',
@ -665,8 +665,8 @@ router_response = \
u'routes': [],
u'id': u'4598c424-d608-4366-9beb-139adbd7cff5'}]}
security_group_response = \
{'security_groups':
security_group_response = {
'security_groups':
[{u'tenant_id': u'abb53cc6636848218f46d01f22bf1060',
u'name': u'default',
u'description': u'default',

View File

@ -1,5 +1,5 @@
PANEL = 'datasources'
PANEL_DASHBOARD = 'admin'
PANEL_GROUP = 'policy'
ADD_PANEL = \
'openstack_dashboard.dashboards.admin.datasources.panel.DataSources'
DASH_BOARDS = 'openstack_dashboard.dashboards'
ADD_PANEL = DASH_BOARDS + '.admin.datasources.panel.DataSources'

View File

@ -17,10 +17,10 @@ from django.conf.urls import url
from openstack_dashboard.dashboards.admin.datasources import views
PLUGINS = \
r'^plugins/(?P<datasource_name>[^/]+)/(?P<table_name>[^/]+)/%s$'
POLICIES = \
r'^policies/(?P<datasource_name>[^/]+)/(?P<policy_table_name>[^/]+)/%s$'
PLUGINS = (
r'^plugins/(?P<datasource_name>[^/]+)/(?P<table_name>[^/]+)/%s$')
POLICIES = (
r'^policies/(?P<datasource_name>[^/]+)/(?P<policy_table_name>[^/]+)/%s$')
urlpatterns = patterns(

View File

@ -22,8 +22,8 @@ from horizon import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard.api import congress
from openstack_dashboard.dashboards.admin.datasources \
import tables as datasources_tables
from openstack_dashboard.dashboards.admin.datasources import (
tables as datasources_tables)
logger = logging.getLogger(__name__)

View File

@ -18,8 +18,8 @@ from horizon import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard.api import congress
from openstack_dashboard.dashboards.admin.policies \
import tables as policies_tables
from openstack_dashboard.dashboards.admin.policies import (
tables as policies_tables)
class IndexView(tables.DataTableView):

View File

@ -44,14 +44,14 @@ class TestCeilometerDriver(manager_congress.ScenarioPolicyBase):
meter_map = {}
for meter in meters:
meter_map[meter['meter_id']] = meter
meter_schema = \
meter_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
'ceilometer', 'meters')['columns']
'ceilometer', 'meters')['columns'])
def _check_data_table_ceilometer_meters():
results = \
results = (
self.admin_manager.congress_client.list_datasource_rows(
'ceilometer', 'meters')
'ceilometer', 'meters'))
for row in results['results']:
meter_row = meter_map[row['data'][0]]
for index in range(len(meter_schema)):

View File

@ -52,14 +52,14 @@ class TestGlanceV2Driver(manager_congress.ScenarioPolicyBase):
for image in images:
image_map[image['id']] = image
image_schema = \
image_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
'glancev2', 'images')['columns']
'glancev2', 'images')['columns'])
def _check_data_table_glancev2_images():
results = \
results = (
self.admin_manager.congress_client.list_datasource_rows(
'glancev2', 'images')
'glancev2', 'images'))
for row in results['results']:
image_row = image_map[row['data'][0]]
for index in range(len(image_schema)):
@ -92,9 +92,9 @@ class TestGlanceV2Driver(manager_congress.ScenarioPolicyBase):
image_tag_map[image['id']] = image['tags']
def _check_data_table_glance_images():
results = \
results = (
self.admin_manager.congress_client.list_datasource_rows(
'glancev2', 'tags')
'glancev2', 'tags'))
for row in results['results']:
image_id, tag = row['data'][0], row['data'][1]
glance_image_tags = image_tag_map.get(image_id)

View File

@ -48,14 +48,14 @@ class TestKeystoneV2Driver(manager_congress.ScenarioPolicyBase):
for user in users:
user_map[user['id']] = user
user_schema = \
user_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
'keystone', 'users')['columns']
'keystone', 'users')['columns'])
def _check_data_table_keystone_users():
results = \
results = (
self.admin_manager.congress_client.list_datasource_rows(
'keystone', 'users')
'keystone', 'users'))
for row in results['results']:
user_row = user_map[row['data'][4]]
for index in range(len(user_schema)):
@ -76,14 +76,14 @@ class TestKeystoneV2Driver(manager_congress.ScenarioPolicyBase):
for role in roles:
roles_map[role['id']] = role
role_schema = \
role_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
'keystone', 'roles')['columns']
'keystone', 'roles')['columns'])
def _check_data_table_keystone_roles():
results = \
results = (
self.admin_manager.congress_client.list_datasource_rows(
'keystone', 'roles')
'keystone', 'roles'))
for row in results['results']:
role_row = roles_map[row['data'][0]]
for index in range(len(role_schema)):
@ -104,14 +104,14 @@ class TestKeystoneV2Driver(manager_congress.ScenarioPolicyBase):
for tenant in tenants:
tenants_map[tenant['id']] = tenant
tenant_schema = \
tenant_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
'keystone', 'tenants')['columns']
'keystone', 'tenants')['columns'])
def _check_data_table_keystone_tenants():
results = \
results = (
self.admin_manager.congress_client.list_datasource_rows(
'keystone', 'tenants')
'keystone', 'tenants'))
for row in results['results']:
tenant_row = tenants_map[row['data'][3]]
for index in range(len(tenant_schema)):

View File

@ -46,9 +46,9 @@ class TestNovaDriver(manager_congress.ScenarioPolicyBase):
self._setup_network_and_servers()
def _check_data_table_nova_servers():
results = \
results = (
self.admin_manager.congress_client.list_datasource_rows(
'nova', 'servers')
'nova', 'servers'))
keys = ['id', 'name', 'hostId', 'status', 'tenant_id',
'user_id', 'image', 'flavor']
for row in results['results']:
@ -80,9 +80,9 @@ class TestNovaDriver(manager_congress.ScenarioPolicyBase):
flavor_id_map[flavor['id']] = flavor
def _check_data_table_nova_flavors():
results = \
results = (
self.admin_manager.congress_client.list_datasource_rows(
'nova', 'flavors')
'nova', 'flavors'))
keys = ['id', 'name', 'vcpus', 'ram', 'disk',
'OS-FLV-EXT-DATA:ephemeral', 'rxtx_factor']
for row in results['results']:

View File

@ -42,8 +42,8 @@ class ScenarioPolicyBase(manager.NetworkScenarioTest):
cls.admin_credentials())
def _setup_network_and_servers(self):
self.security_group = \
self._create_security_group(tenant_id=self.tenant_id)
self.security_group = (self._create_security_group
(tenant_id=self.tenant_id))
self.network, self.subnet, self.router = self.create_networks()
self.check_networks()
@ -103,10 +103,9 @@ class ScenarioPolicyBase(manager.NetworkScenarioTest):
ssh_login = CONF.compute.image_ssh_user
for server in self.servers:
# call the common method in the parent class
super(ScenarioPolicyBase, self).\
_check_tenant_network_connectivity(
server, ssh_login, self._get_server_key(server),
servers_for_debug=self.servers)
super(ScenarioPolicyBase, self)._check_tenant_network_connectivity(
server, ssh_login, self._get_server_key(server),
servers_for_debug=self.servers)
def _create_and_associate_floating_ips(self, server):
public_network_id = CONF.network.public_network_id

View File

@ -55,9 +55,8 @@ class TestPolicyBasicOps(manager_congress.ScenarioPolicyBase):
"security_group_id), neutron:security_groups("
"tenant_id2, security_group_name, desc2, "
"security_group_id)"}
results = \
self.admin_manager.congress_client.create_policy_rule(
'classification', body)
results = self.admin_manager.congress_client.create_policy_rule(
'classification', body)
rule_id = results['id']
self.addCleanup(
self.admin_manager.congress_client.delete_policy_rule,
@ -67,9 +66,8 @@ class TestPolicyBasicOps(manager_congress.ScenarioPolicyBase):
ports = self._list_ports(device_id=self.servers[0]['id'])
def check_data():
results = \
self.admin_manager.congress_client.list_policy_rows(
'classification', 'port_security_group')
results = self.admin_manager.congress_client.list_policy_rows(
'classification', 'port_security_group')
for row in results['results']:
if (row['data'][0] == ports[0]['id'] and
row['data'][1] ==
@ -100,9 +98,9 @@ class TestCongressDataSources(manager_congress.ScenarioPolicyBase):
def _check_all_datasources_are_initialized():
for datasource in datasources['results']:
results = \
results = (
self.admin_manager.congress_client.list_datasource_status(
datasource['id'])
datasource['id']))
for result in results['results']:
if result['key'] == 'initialized':
if result['value'] != 'True':
@ -120,9 +118,9 @@ class TestCongressDataSources(manager_congress.ScenarioPolicyBase):
def check_data():
for datasource in datasources['results']:
results = \
results = (
self.admin_manager.congress_client.list_datasource_tables(
datasource['id'])
datasource['id']))
# NOTE(arosen): if there are no results here we return false as
# there is something wrong with a driver as it doesn't expose
# any tables.

View File

@ -36,11 +36,10 @@ commands = python setup.py testr --no-parallel --testr-args='test_benchmark {pos
[flake8]
# H237 module is removed in Python 3
# H405 multi line docstring summary not separated with an empty line
# H904 Wrap long lines in parentheses instead of a backslash
# H302 import only modules
show-source = True
ignore = H237,H405,H904,H302
ignore = H237,H405,H302
builtins = _
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,*thirdparty/*,CongressLexer.py,CongressParser.py