Enable H904 style checking rule

Use parentheses instead of backslash for line continuation.

Plus, usage of escaped quotes in long strings is avoided where it makes
sense.

Change-Id: If2e78012b85a4430c6f03f65784cac2d032cf116
changes/58/144758/9
Pavlo Shchelokovskyy 8 years ago
parent 4279bd2923
commit cb8712281d
  1. 30
      contrib/extraroute/extraroute/tests/test_extraroute.py
  2. 4
      contrib/rackspace/rackspace/resources/cloud_loadbalancer.py
  3. 8
      contrib/rackspace/rackspace/resources/cloud_server.py
  4. 30
      contrib/rackspace/rackspace/tests/test_cloud_loadbalancer.py
  5. 12
      contrib/rackspace/rackspace/tests/test_rackspace_cloud_server.py
  6. 2
      heat/common/auth_password.py
  7. 8
      heat/common/config.py
  8. 7
      heat/common/environment_format.py
  9. 8
      heat/common/exception.py
  10. 2
      heat/common/heat_keystoneclient.py
  11. 10
      heat/common/wsgi.py
  12. 123
      heat/db/sqlalchemy/api.py
  13. 13
      heat/db/sqlalchemy/migrate_repo/versions/022_stack_event_soft_delete.py
  14. 4
      heat/db/sqlalchemy/migrate_repo/versions/035_event_uuid_to_id.py
  15. 4
      heat/db/sqlalchemy/migrate_repo/versions/045_stack_backup.py
  16. 12
      heat/engine/cfn/template.py
  17. 4
      heat/engine/environment.py
  18. 8
      heat/engine/event.py
  19. 11
      heat/engine/hot/template.py
  20. 4
      heat/engine/parameters.py
  21. 4
      heat/engine/resources/aws/autoscaling_group.py
  22. 6
      heat/engine/resources/ceilometer/alarm.py
  23. 7
      heat/engine/resources/eip.py
  24. 10
      heat/engine/resources/instance.py
  25. 4
      heat/engine/resources/loadbalancer.py
  26. 8
      heat/engine/resources/neutron/port.py
  27. 4
      heat/engine/resources/nova_floatingip.py
  28. 4
      heat/engine/resources/openstack/volume.py
  29. 10
      heat/engine/resources/openstack/wait_condition_handle.py
  30. 24
      heat/engine/resources/resource_group.py
  31. 8
      heat/engine/resources/server.py
  32. 4
      heat/engine/resources/software_config/software_component.py
  33. 4
      heat/engine/resources/software_config/software_deployment.py
  34. 2
      heat/engine/resources/swiftsignal.py
  35. 4
      heat/engine/rsrc_defn.py
  36. 4
      heat/engine/stack.py
  37. 4
      heat/engine/stack_lock.py
  38. 4
      heat/engine/stack_resource.py
  39. 4
      heat/tests/aws/test_waitcondition.py
  40. 4
      heat/tests/common.py
  41. 20
      heat/tests/fakes.py
  42. 4
      heat/tests/generic_resource.py
  43. 4
      heat/tests/openstack/test_waitcondition.py
  44. 10
      heat/tests/test_api_openstack_v1.py
  45. 12
      heat/tests/test_autoscaling_update_policy.py
  46. 4
      heat/tests/test_ceilometer_alarm.py
  47. 8
      heat/tests/test_cinder_client.py
  48. 10
      heat/tests/test_eip.py
  49. 41
      heat/tests/test_engine_service.py
  50. 8
      heat/tests/test_fault_middleware.py
  51. 12
      heat/tests/test_glance_client.py
  52. 10
      heat/tests/test_heatclient.py
  53. 4
      heat/tests/test_hot.py
  54. 4
      heat/tests/test_identifier.py
  55. 14
      heat/tests/test_instance.py
  56. 12
      heat/tests/test_instance_group_update_policy.py
  57. 4
      heat/tests/test_lifecycle_plugin_utils.py
  58. 4
      heat/tests/test_loadbalancer.py
  59. 106
      heat/tests/test_nested_stack.py
  60. 56
      heat/tests/test_neutron_autoscaling.py
  61. 4
      heat/tests/test_nokey.py
  62. 8
      heat/tests/test_notifications.py
  63. 4
      heat/tests/test_parameters.py
  64. 10
      heat/tests/test_parser.py
  65. 29
      heat/tests/test_properties.py
  66. 25
      heat/tests/test_provider_template.py
  67. 8
      heat/tests/test_remote_stack.py
  68. 2
      heat/tests/test_resource_group.py
  69. 112
      heat/tests/test_server.py
  70. 4
      heat/tests/test_software_component.py
  71. 4
      heat/tests/test_sqlalchemy_api.py
  72. 8
      heat/tests/test_stack_resource.py
  73. 8
      heat/tests/test_swiftsignal.py
  74. 490
      heat/tests/test_validate.py
  75. 9
      heat/tests/test_version_negotiation_middleware.py
  76. 4
      heat/tests/test_watch.py
  77. 6
      heat/tests/test_wsgi.py
  78. 3
      tox.ini

@ -76,8 +76,8 @@ class NeutronExtraRouteTest(common.HeatTestCase):
def test_extraroute(self):
# add first route
neutronclient.Client.show_router(
'3e46229d-8fce-4733-819a-b5fe630550f8')\
.AndReturn({'router': {'routes': []}})
'3e46229d-8fce-4733-819a-b5fe630550f8'
).AndReturn({'router': {'routes': []}})
neutronclient.Client.update_router(
'3e46229d-8fce-4733-819a-b5fe630550f8',
{"router": {
@ -87,9 +87,9 @@ class NeutronExtraRouteTest(common.HeatTestCase):
}}).AndReturn(None)
# add second route
neutronclient.Client.show_router(
'3e46229d-8fce-4733-819a-b5fe630550f8')\
.AndReturn({'router': {'routes': [{"destination": "192.168.0.0/24",
"nexthop": "1.1.1.1"}]}})
'3e46229d-8fce-4733-819a-b5fe630550f8'
).AndReturn({'router': {'routes': [{"destination": "192.168.0.0/24",
"nexthop": "1.1.1.1"}]}})
neutronclient.Client.update_router(
'3e46229d-8fce-4733-819a-b5fe630550f8',
{"router": {
@ -100,12 +100,12 @@ class NeutronExtraRouteTest(common.HeatTestCase):
}}).AndReturn(None)
# first delete
neutronclient.Client.show_router(
'3e46229d-8fce-4733-819a-b5fe630550f8')\
.AndReturn({'router':
{'routes': [{"destination": "192.168.0.0/24",
"nexthop": "1.1.1.1"},
{"destination": "192.168.255.0/24",
"nexthop": "1.1.1.1"}]}})
'3e46229d-8fce-4733-819a-b5fe630550f8'
).AndReturn({'router':
{'routes': [{"destination": "192.168.0.0/24",
"nexthop": "1.1.1.1"},
{"destination": "192.168.255.0/24",
"nexthop": "1.1.1.1"}]}})
neutronclient.Client.update_router(
'3e46229d-8fce-4733-819a-b5fe630550f8',
{"router": {
@ -115,10 +115,10 @@ class NeutronExtraRouteTest(common.HeatTestCase):
}}).AndReturn(None)
# second delete
neutronclient.Client.show_router(
'3e46229d-8fce-4733-819a-b5fe630550f8')\
.AndReturn({'router':
{'routes': [{"destination": "192.168.255.0/24",
"nexthop": "1.1.1.1"}]}})
'3e46229d-8fce-4733-819a-b5fe630550f8'
).AndReturn({'router':
{'routes': [{"destination": "192.168.255.0/24",
"nexthop": "1.1.1.1"}]}})
self.m.ReplayAll()
t = template_format.parse(neutron_template)
stack = utils.parse_stack(t)

@ -541,8 +541,8 @@ class CloudLoadBalancer(resource.Resource):
virtual_ips = self._setup_properties(vips, self.clb.VirtualIP)
(session_persistence, connection_logging, metadata) = \
self._alter_properties_for_api()
(session_persistence, connection_logging, metadata
) = self._alter_properties_for_api()
lb_body = {
'port': self.properties[self.PORT],

@ -208,12 +208,12 @@ class CloudServer(server.Server):
self.client_plugin().refresh_server(server)
if 'rack_connect' in self.context.roles and not \
self._check_rack_connect_complete(server):
if ('rack_connect' in self.context.roles and not
self._check_rack_connect_complete(server)):
return False
if 'rax_managed' in self.context.roles and not \
self._check_managed_cloud_complete(server):
if ('rax_managed' in self.context.roles and not
self._check_managed_cloud_complete(server)):
return False
return True

@ -32,8 +32,7 @@ from ..resources import cloud_loadbalancer as lb # noqa
# The following fakes are for pyrax
cert = """\
-----BEGIN CERTIFICATE-----
cert = """-----BEGIN CERTIFICATE-----
MIIFBjCCAu4CCQDWdcR5LY/+/jANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB
VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0
cyBQdHkgTHRkMB4XDTE0MTAxNjE3MDYxNVoXDTE1MTAxNjE3MDYxNVowRTELMAkG
@ -63,8 +62,7 @@ eF5whPl36/GK8HUixCibkCyqEOBBuNqhOz7nVLM0eg5L+TE5coizEBagxVCovYSj
fQ9zkIgaC5oeH6L0C1FFG1vRNSWokheBk14ztVoJCJyFr6p0/6pD7SeR
-----END CERTIFICATE-----"""
private_key = """\
-----BEGIN PRIVATE KEY-----
private_key = """-----BEGIN PRIVATE KEY-----
MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDJuTXD9LTCh25U
+lHdZPE8Wff/Ljh8FDT27xbL0sgrqY9CdLxgk427gtiOU/wl0bZyxCLfxGq5TQKn
I2wwlrUshCrN8w5ppK3qCAxGvKcgENsnLAlxjMQzfexd/8JS2WoFDTNBcBhy2VgY
@ -1032,8 +1030,8 @@ class LoadBalancerTest(common.HeatTestCase):
def test_update_session_persistence_delete(self):
template = copy.deepcopy(self.lb_template)
lb_name = template['Resources'].keys()[0]
template['Resources'][lb_name]['Properties']['sessionPersistence'] = \
"SOURCE_IP"
template['Resources'][lb_name]['Properties'][
'sessionPersistence'] = "SOURCE_IP"
expected_body = copy.deepcopy(self.expected_body)
expected_body['sessionPersistence'] = {'persistenceType': "SOURCE_IP"}
rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
@ -1240,8 +1238,8 @@ class LoadBalancerTest(common.HeatTestCase):
def test_update_connection_logging_delete(self):
template = copy.deepcopy(self.lb_template)
lb_name = template['Resources'].keys()[0]
template['Resources'][lb_name]['Properties']['connectionLogging'] = \
True
template['Resources'][lb_name]['Properties'][
'connectionLogging'] = True
expected_body = copy.deepcopy(self.expected_body)
expected_body['connectionLogging'] = {'enabled': True}
rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
@ -1267,8 +1265,8 @@ class LoadBalancerTest(common.HeatTestCase):
def test_update_connection_logging_disable(self):
template = copy.deepcopy(self.lb_template)
lb_name = template['Resources'].keys()[0]
template['Resources'][lb_name]['Properties']['connectionLogging'] = \
True
template['Resources'][lb_name]['Properties'][
'connectionLogging'] = True
expected_body = copy.deepcopy(self.expected_body)
expected_body['connectionLogging'] = {'enabled': True}
rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
@ -1317,8 +1315,8 @@ class LoadBalancerTest(common.HeatTestCase):
def test_update_connection_throttle_delete(self):
template = copy.deepcopy(self.lb_template)
lb_name = template['Resources'].keys()[0]
template['Resources'][lb_name]['Properties']['connectionThrottle'] = \
{'maxConnections': 1000}
template['Resources'][lb_name]['Properties'][
'connectionThrottle'] = {'maxConnections': 1000}
expected_body = copy.deepcopy(self.expected_body)
expected_body['connectionThrottle'] = {
'maxConnections': 1000, 'maxConnectionRate': None,
@ -1368,8 +1366,8 @@ class LoadBalancerTest(common.HeatTestCase):
def test_update_content_caching_deleted(self):
template = copy.deepcopy(self.lb_template)
lb_name = template['Resources'].keys()[0]
template['Resources'][lb_name]['Properties']['contentCaching'] = \
'ENABLED'
template['Resources'][lb_name]['Properties'][
'contentCaching'] = 'ENABLED'
# Enabling the content cache is done post-creation, so no need
# to modify self.expected_body
rsrc, fake_loadbalancer = self._mock_loadbalancer(template,
@ -1396,8 +1394,8 @@ class LoadBalancerTest(common.HeatTestCase):
def test_update_content_caching_disable(self):
template = copy.deepcopy(self.lb_template)
lb_name = template['Resources'].keys()[0]
template['Resources'][lb_name]['Properties']['contentCaching'] = \
'ENABLED'
template['Resources'][lb_name]['Properties'][
'contentCaching'] = 'ENABLED'
# Enabling the content cache is done post-creation, so no need
# to modify self.expected_body
rsrc, fake_loadbalancer = self._mock_loadbalancer(template,

@ -101,15 +101,15 @@ class CloudServersTest(common.HeatTestCase):
stack_name = '%s_s' % name
(tmpl, stack) = self._setup_test_stack(stack_name)
tmpl.t['Resources']['WebServer']['Properties']['image'] = \
image_id or 'CentOS 5.2'
tmpl.t['Resources']['WebServer']['Properties']['flavor'] = \
'256 MB Server'
tmpl.t['Resources']['WebServer']['Properties'][
'image'] = image_id or 'CentOS 5.2'
tmpl.t['Resources']['WebServer']['Properties'][
'flavor'] = '256 MB Server'
server_name = '%s' % name
if override_name:
tmpl.t['Resources']['WebServer']['Properties']['name'] = \
server_name
tmpl.t['Resources']['WebServer']['Properties'][
'name'] = server_name
resource_defns = tmpl.resource_definitions(stack)
server = cloud_server.CloudServer(server_name,

@ -65,7 +65,7 @@ class KeystonePasswordAuthProtocol(object):
def _reject_request(self, env, start_response, auth_url):
"""Redirect client to auth server."""
headers = [('WWW-Authenticate', 'Keystone uri=\'%s\'' % auth_url)]
headers = [('WWW-Authenticate', "Keystone uri='%s'" % auth_url)]
resp = exc.HTTPUnauthorized('Authentication required', headers)
return resp(env, start_response)

@ -124,10 +124,10 @@ engine_opts = [
'retries.')),
cfg.IntOpt('event_purge_batch_size',
default=10,
help=_('Controls how many events will be pruned whenever a '
' stack\'s events exceed max_events_per_stack. Set this'
' lower to keep more events at the expense of more'
' frequent purges.')),
help=_("Controls how many events will be pruned whenever a "
"stack's events exceed max_events_per_stack. Set this "
"lower to keep more events at the expense of more "
"frequent purges.")),
cfg.IntOpt('max_events_per_stack',
default=1000,
help=_('Maximum events that will be available per stack. Older'

@ -15,8 +15,11 @@ from heat.common.i18n import _
from heat.common import template_format
SECTIONS = (PARAMETERS, RESOURCE_REGISTRY, PARAMETER_DEFAULTS) = \
('parameters', 'resource_registry', 'parameter_defaults')
SECTIONS = (
PARAMETERS, RESOURCE_REGISTRY, PARAMETER_DEFAULTS
) = (
'parameters', 'resource_registry', 'parameter_defaults'
)
def parse(env_str):

@ -135,8 +135,8 @@ class MissingCredentialError(HeatException):
class BadAuthStrategy(HeatException):
msg_fmt = _("Incorrect auth strategy, expected \"%(expected)s\" but "
"received \"%(received)s\"")
msg_fmt = _('Incorrect auth strategy, expected "%(expected)s" but '
'received "%(received)s"')
class AuthBadRequest(HeatException):
@ -216,8 +216,8 @@ class InvalidTemplateAttribute(HeatException):
class InvalidTemplateReference(HeatException):
msg_fmt = _("The specified reference \"%(resource)s\" (in %(key)s)"
" is incorrect.")
msg_fmt = _('The specified reference "%(resource)s" (in %(key)s)'
' is incorrect.')
class UserKeyPairMissing(HeatException):

@ -326,7 +326,7 @@ class KeystoneClientV3(object):
if len(domains) == 1:
return domains[0].id
elif len(domains) == 0:
msg = _('Can\'t find domain id for %s!')
msg = _("Can't find domain id for %s!")
LOG.error(msg, domain_name)
raise exception.Error(msg % domain_name)
else:

@ -543,8 +543,8 @@ def is_json_content_type(request):
# for back compatible for null or plain content type
if not content_type or content_type.startswith('text/plain'):
content_type = 'application/json'
if content_type in ('JSON', 'application/json')\
and request.body.startswith('{'):
if (content_type in ('JSON', 'application/json')
and request.body.startswith('{')):
return True
return False
@ -565,9 +565,9 @@ class JSONRequestDeserializer(object):
try:
if len(datastring) > cfg.CONF.max_json_body_size:
msg = _('JSON body size (%(len)s bytes) exceeds maximum '
'allowed size (%(limit)s bytes).') % \
{'len': len(datastring),
'limit': cfg.CONF.max_json_body_size}
'allowed size (%(limit)s bytes).'
) % {'len': len(datastring),
'limit': cfg.CONF.max_json_body_size}
raise exception.RequestLimitExceeded(message=msg)
return json.loads(datastring)
except ValueError as ex:

@ -123,10 +123,13 @@ def resource_get(context, resource_id):
def resource_get_by_name_and_stack(context, resource_name, stack_id):
result = model_query(context, models.Resource).\
filter_by(name=resource_name).\
filter_by(stack_id=stack_id).\
options(orm.joinedload("data")).first()
result = model_query(
context, models.Resource
).filter_by(
name=resource_name
).filter_by(
stack_id=stack_id
).options(orm.joinedload("data")).first()
return result
@ -258,9 +261,11 @@ def resource_create(context, values):
def resource_get_all_by_stack(context, stack_id):
results = model_query(context, models.Resource).\
filter_by(stack_id=stack_id).\
options(orm.joinedload("data")).all()
results = model_query(
context, models.Resource
).filter_by(
stack_id=stack_id
).options(orm.joinedload("data")).all()
if not results:
raise exception.NotFound(_("no resources for stack_id %s were found")
@ -269,23 +274,22 @@ def resource_get_all_by_stack(context, stack_id):
def stack_get_by_name_and_owner_id(context, stack_name, owner_id):
query = soft_delete_aware_query(context, models.Stack).\
filter(sqlalchemy.or_(
models.Stack.tenant == context.tenant_id,
models.Stack.stack_user_project_id == context.tenant_id
)).\
filter_by(name=stack_name).\
filter_by(owner_id=owner_id)
query = soft_delete_aware_query(
context, models.Stack
).filter(sqlalchemy.or_(
models.Stack.tenant == context.tenant_id,
models.Stack.stack_user_project_id == context.tenant_id)
).filter_by(name=stack_name).filter_by(owner_id=owner_id)
return query.first()
def stack_get_by_name(context, stack_name):
query = soft_delete_aware_query(context, models.Stack).\
filter(sqlalchemy.or_(
models.Stack.tenant == context.tenant_id,
models.Stack.stack_user_project_id == context.tenant_id
)).\
filter_by(name=stack_name)
query = soft_delete_aware_query(
context, models.Stack
).filter(sqlalchemy.or_(
models.Stack.tenant == context.tenant_id,
models.Stack.stack_user_project_id == context.tenant_id)
).filter_by(name=stack_name)
return query.first()
@ -310,8 +314,8 @@ def stack_get(context, stack_id, show_deleted=False, tenant_safe=True,
def stack_get_all_by_owner_id(context, owner_id):
results = soft_delete_aware_query(context, models.Stack).\
filter_by(owner_id=owner_id).all()
results = soft_delete_aware_query(
context, models.Stack).filter_by(owner_id=owner_id).all()
return results
@ -353,13 +357,13 @@ def _paginate_query(context, query, model, limit=None, sort_keys=None,
def _query_stack_get_all(context, tenant_safe=True, show_deleted=False,
show_nested=False):
if show_nested:
query = soft_delete_aware_query(context, models.Stack,
show_deleted=show_deleted).\
filter_by(backup=False)
query = soft_delete_aware_query(
context, models.Stack, show_deleted=show_deleted
).filter_by(backup=False)
else:
query = soft_delete_aware_query(context, models.Stack,
show_deleted=show_deleted).\
filter_by(owner_id=None)
query = soft_delete_aware_query(
context, models.Stack, show_deleted=show_deleted
).filter_by(owner_id=None)
if tenant_safe:
query = query.filter_by(tenant=context.tenant_id)
@ -450,9 +454,10 @@ def stack_lock_steal(stack_id, old_engine_id, new_engine_id):
session = get_session()
with session.begin():
lock = session.query(models.StackLock).get(stack_id)
rows_affected = session.query(models.StackLock).\
filter_by(stack_id=stack_id, engine_id=old_engine_id).\
update({"engine_id": new_engine_id})
rows_affected = session.query(
models.StackLock
).filter_by(stack_id=stack_id, engine_id=old_engine_id
).update({"engine_id": new_engine_id})
if not rows_affected:
return lock.engine_id if lock is not None else True
@ -460,9 +465,9 @@ def stack_lock_steal(stack_id, old_engine_id, new_engine_id):
def stack_lock_release(stack_id, engine_id):
session = get_session()
with session.begin():
rows_affected = session.query(models.StackLock).\
filter_by(stack_id=stack_id, engine_id=engine_id).\
delete()
rows_affected = session.query(
models.StackLock
).filter_by(stack_id=stack_id, engine_id=engine_id).delete()
if not rows_affected:
return True
@ -523,8 +528,9 @@ def event_get(context, event_id):
def event_get_all(context):
stacks = soft_delete_aware_query(context, models.Stack)
stack_ids = [stack.id for stack in stacks]
results = model_query(context, models.Event).\
filter(models.Event.stack_id.in_(stack_ids)).all()
results = model_query(
context, models.Event
).filter(models.Event.stack_id.in_(stack_ids)).all()
return results
@ -532,16 +538,16 @@ def event_get_all_by_tenant(context, limit=None, marker=None,
sort_keys=None, sort_dir=None, filters=None):
query = model_query(context, models.Event)
query = db_filters.exact_filter(query, models.Event, filters)
query = query.join(models.Event.stack).\
filter_by(tenant=context.tenant_id).filter_by(deleted_at=None)
query = query.join(
models.Event.stack
).filter_by(tenant=context.tenant_id).filter_by(deleted_at=None)
filters = None
return _events_filter_and_page_query(context, query, limit, marker,
sort_keys, sort_dir, filters).all()
def _query_all_by_stack(context, stack_id):
query = model_query(context, models.Event).\
filter_by(stack_id=stack_id)
query = model_query(context, models.Event).filter_by(stack_id=stack_id)
return query
@ -568,8 +574,8 @@ def _events_paginate_query(context, query, model, limit=None, sort_keys=None,
if marker:
# not to use model_query(context, model).get(marker), because
# user can only see the ID(column 'uuid') and the ID as the marker
model_marker = model_query(context, model).filter_by(uuid=marker).\
first()
model_marker = model_query(
context, model).filter_by(uuid=marker).first()
try:
query = utils.paginate_query(query, model, limit, sort_keys,
model_marker, sort_dir)
@ -634,8 +640,8 @@ def watch_rule_get(context, watch_rule_id):
def watch_rule_get_by_name(context, watch_rule_name):
result = model_query(context, models.WatchRule).\
filter_by(name=watch_rule_name).first()
result = model_query(
context, models.WatchRule).filter_by(name=watch_rule_name).first()
return result
@ -645,8 +651,8 @@ def watch_rule_get_all(context):
def watch_rule_get_all_by_stack(context, stack_id):
results = model_query(context, models.WatchRule).\
filter_by(stack_id=stack_id).all()
results = model_query(
context, models.WatchRule).filter_by(stack_id=stack_id).all()
return results
@ -745,12 +751,12 @@ def software_deployment_get(context, deployment_id):
def software_deployment_get_all(context, server_id=None):
sd = models.SoftwareDeployment
query = model_query(context, sd).\
filter(sqlalchemy.or_(
sd.tenant == context.tenant_id,
sd.stack_user_project_id == context.tenant_id
)).\
order_by(sd.created_at)
query = model_query(
context, sd
).filter(sqlalchemy.or_(
sd.tenant == context.tenant_id,
sd.stack_user_project_id == context.tenant_id)
).order_by(sd.created_at)
if server_id:
query = query.filter_by(server_id=server_id)
return query.all()
@ -837,10 +843,11 @@ def purge_deleted(age, granularity='days'):
raw_template = sqlalchemy.Table('raw_template', meta, autoload=True)
user_creds = sqlalchemy.Table('user_creds', meta, autoload=True)
stmt = sqlalchemy.select([stack.c.id,
stack.c.raw_template_id,
stack.c.user_creds_id]).\
where(stack.c.deleted_at < time_line)
stmt = sqlalchemy.select(
[stack.c.id,
stack.c.raw_template_id,
stack.c.user_creds_id]
).where(stack.c.deleted_at < time_line)
deleted_stacks = engine.execute(stmt)
for s in deleted_stacks:
@ -848,8 +855,8 @@ def purge_deleted(age, granularity='days'):
engine.execute(event_del)
stack_del = stack.delete().where(stack.c.id == s[0])
engine.execute(stack_del)
raw_template_del = raw_template.delete().\
where(raw_template.c.id == s[1])
raw_template_del = raw_template.delete().where(
raw_template.c.id == s[1])
engine.execute(raw_template_del)
user_creds_del = user_creds.delete().where(user_creds.c.id == s[2])
engine.execute(user_creds_del)

@ -32,18 +32,19 @@ def downgrade(migrate_engine):
# Remove soft deleted data
not_deleted = None
stmt = sqlalchemy.select([stack.c.id,
stack.c.raw_template_id,
stack.c.user_creds_id]).\
where(stack.c.deleted_at != not_deleted)
stmt = sqlalchemy.select(
[stack.c.id,
stack.c.raw_template_id,
stack.c.user_creds_id]
).where(stack.c.deleted_at != not_deleted)
deleted_stacks = migrate_engine.execute(stmt)
for s in deleted_stacks:
event_del = event.delete().where(event.c.stack_id == s[0])
migrate_engine.execute(event_del)
stack_del = stack.delete().where(stack.c.id == s[0])
migrate_engine.execute(stack_del)
raw_template_del = raw_template.delete().\
where(raw_template.c.id == s[1])
raw_template_del = raw_template.delete(
).where(raw_template.c.id == s[1])
migrate_engine.execute(raw_template_del)
user_creds_del = user_creds.delete().where(user_creds.c.id == s[2])
migrate_engine.execute(user_creds_del)

@ -88,8 +88,8 @@ def upgrade(migrate_engine):
# NOTE(chenxiao): For DB2, setting "ID" column "autoincrement=True"
# can't make sense after above "tmp_id=>id" transformation,
# so should work around it.
sql = "ALTER TABLE EVENT ALTER COLUMN ID SET GENERATED BY " \
"DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1)"
sql = ("ALTER TABLE EVENT ALTER COLUMN ID SET GENERATED BY "
"DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1)")
migrate_engine.execute(sql)
else:
event_table.c.tmp_id.alter(sqlalchemy.Integer, autoincrement=True)

@ -24,8 +24,8 @@ def upgrade(migrate_engine):
# Set backup flag for backup stacks, which are the only ones named "foo*"
not_deleted = None
stmt = sqlalchemy.select([stack.c.id,
stack.c.name]).\
where(stack.c.deleted_at == not_deleted)
stack.c.name]
).where(stack.c.deleted_at == not_deleted)
stacks = migrate_engine.execute(stmt)
for s in stacks:
if s.name.endswith('*'):

@ -35,11 +35,13 @@ _RESOURCE_KEYS = (
class CfnTemplate(template.Template):
'''A stack template.'''
SECTIONS = (VERSION, ALTERNATE_VERSION, DESCRIPTION, MAPPINGS,
PARAMETERS, RESOURCES, OUTPUTS) = \
('AWSTemplateFormatVersion', 'HeatTemplateFormatVersion',
'Description', 'Mappings', 'Parameters', 'Resources', 'Outputs'
)
SECTIONS = (
VERSION, ALTERNATE_VERSION,
DESCRIPTION, MAPPINGS, PARAMETERS, RESOURCES, OUTPUTS
) = (
'AWSTemplateFormatVersion', 'HeatTemplateFormatVersion',
'Description', 'Mappings', 'Parameters', 'Resources', 'Outputs'
)
SECTIONS_NO_DIRECT_ACCESS = set([PARAMETERS, VERSION, ALTERNATE_VERSION])

@ -197,8 +197,8 @@ class ResourceRegistry(object):
if name.endswith('*'):
# delete all matching entries.
for res_name in registry.keys():
if isinstance(registry[res_name], ResourceInfo) and \
res_name.startswith(name[:-1]):
if (isinstance(registry[res_name], ResourceInfo) and
res_name.startswith(name[:-1])):
LOG.warn(_LW('Removing %(item)s from %(path)s'), {
'item': res_name,
'path': descriptive_path})

@ -51,14 +51,14 @@ class Event(object):
'''Retrieve an Event from the database.'''
from heat.engine import stack as parser
ev = event if event is not None else\
db_api.event_get(context, event_id)
ev = (event if event is not None else
db_api.event_get(context, event_id))
if ev is None:
message = _('No event exists with id "%s"') % str(event_id)
raise exception.NotFound(message)
st = stack if stack is not None else\
parser.Stack.load(context, ev.stack_id)
st = (stack if stack is not None else
parser.Stack.load(context, ev.stack_id))
return cls(context, st, ev.resource_action, ev.resource_status,
ev.resource_status_reason, ev.physical_resource_id,

@ -39,10 +39,13 @@ class HOTemplate20130523(template.Template):
A Heat Orchestration Template format stack template.
"""
SECTIONS = (VERSION, DESCRIPTION, PARAMETER_GROUPS, PARAMETERS,
RESOURCES, OUTPUTS, MAPPINGS) = \
('heat_template_version', 'description', 'parameter_groups',
'parameters', 'resources', 'outputs', '__undefined__')
SECTIONS = (
VERSION, DESCRIPTION, PARAMETER_GROUPS,
PARAMETERS, RESOURCES, OUTPUTS, MAPPINGS
) = (
'heat_template_version', 'description', 'parameter_groups',
'parameters', 'resources', 'outputs', '__undefined__'
)
SECTIONS_NO_DIRECT_ACCESS = set([PARAMETERS, VERSION])

@ -522,8 +522,8 @@ class Parameters(collections.Mapping):
raise exception.InvalidTemplateParameter(key=name)
def _pseudo_parameters(self, stack_identifier):
stack_id = stack_identifier.arn() \
if stack_identifier is not None else 'None'
stack_id = (stack_identifier.arn()
if stack_identifier is not None else 'None')
stack_name = stack_identifier and stack_identifier.stack_name
yield Parameter(self.PARAM_STACK_ID,

@ -339,8 +339,8 @@ class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):
# availability zones, it will be possible to specify multiple subnets.
# For now, only one subnet can be specified. The bug #1096017 tracks
# this issue.
if self.properties.get(self.VPCZONE_IDENTIFIER) and \
len(self.properties[self.VPCZONE_IDENTIFIER]) != 1:
if (self.properties.get(self.VPCZONE_IDENTIFIER) and
len(self.properties[self.VPCZONE_IDENTIFIER]) != 1):
raise exception.NotSupported(feature=_("Anything other than one "
"VPCZoneIdentifier"))

@ -63,9 +63,9 @@ common_properties_schema = {
),
REPEAT_ACTIONS: properties.Schema(
properties.Schema.BOOLEAN,
_('False to trigger actions when the threshold is reached AND '
'the alarm\'s state has changed. By default, actions are called '
'each time the threshold is reached.'),
_("False to trigger actions when the threshold is reached AND "
"the alarm's state has changed. By default, actions are called "
"each time the threshold is reached."),
default='true',
update_allowed=True
)

@ -140,8 +140,8 @@ class ElasticIp(resource.Resource):
server.remove_floating_ip(self._ipaddress())
except Exception as e:
is_not_found = self.client_plugin('nova').is_not_found(e)
is_unprocessable_entity = self.client_plugin('nova').\
is_unprocessable_entity(e)
is_unprocessable_entity = self.client_plugin(
'nova').is_unprocessable_entity(e)
if (not is_not_found and not is_unprocessable_entity):
raise
@ -259,8 +259,7 @@ class ElasticIpAssociation(resource.Resource):
router = vpc.VPC.router_for_vpc(self.neutron(), network_id)
if router is not None:
floatingip = self.neutron().show_floatingip(float_id)
floating_net_id = \
floatingip['floatingip']['floating_network_id']
floating_net_id = floatingip['floatingip']['floating_network_id']
self.neutron().add_gateway_router(
router['id'], {'network_id': floating_net_id})

@ -510,9 +510,8 @@ class Instance(resource.Resource):
# if SubnetId property in Instance, ensure subnet exists
if subnet_id:
neutronclient = self.neutron()
network_id = \
self.client_plugin('neutron').network_id_from_subnet_id(
subnet_id)
network_id = self.client_plugin(
'neutron').network_id_from_subnet_id(subnet_id)
# if subnet verified, create a port to use this subnet
# if port is not created explicitly, nova will choose
# the first subnet in the given network.
@ -525,9 +524,8 @@ class Instance(resource.Resource):
}
if security_groups:
props['security_groups'] = \
self.client_plugin('neutron').get_secgroup_uuids(
security_groups)
props['security_groups'] = self.client_plugin(
'neutron').get_secgroup_uuids(security_groups)
port = neutronclient.create_port({'port': props})['port']

@ -526,8 +526,8 @@ backend servers
if res:
return res
if cfg.CONF.loadbalancer_template and \
not os.access(cfg.CONF.loadbalancer_template, os.R_OK):
if (cfg.CONF.loadbalancer_template and
not os.access(cfg.CONF.loadbalancer_template, os.R_OK)):
msg = _('Custom LoadBalancer template can not be found')
raise exception.StackValidationFailed(message=msg)

@ -282,13 +282,13 @@ class Port(neutron.NeutronResource):
# 'default' securityGroup. If has the 'security_groups' and the
# value is [], which means to create the port without securityGroup.
if props.get(self.SECURITY_GROUPS) is not None:
props[self.SECURITY_GROUPS] = self.client_plugin().\
get_secgroup_uuids(props.get(self.SECURITY_GROUPS))
props[self.SECURITY_GROUPS] = self.client_plugin(
).get_secgroup_uuids(props.get(self.SECURITY_GROUPS))
else:
# And the update should has the same behavior.
if prepare_for_update:
props[self.SECURITY_GROUPS] = self.client_plugin().\
get_secgroup_uuids(['default'])
props[self.SECURITY_GROUPS] = self.client_plugin(
).get_secgroup_uuids(['default'])
if not props[self.FIXED_IPS]:
del(props[self.FIXED_IPS])

@ -143,8 +143,8 @@ class NovaFloatingIpAssociation(resource.Resource):
try:
server = self.nova().servers.get(self.properties[self.SERVER])
if server:
fl_ip = self.nova().floating_ips.\
get(self.properties[self.FLOATING_IP])
fl_ip = self.nova().floating_ips.get(
self.properties[self.FLOATING_IP])
self.nova().servers.remove_floating_ip(server, fl_ip.ip)
except Exception as e:
self.client_plugin().ignore_not_found(e)

@ -340,8 +340,8 @@ class CinderVolume(aws_vol.Volume):
return res
# Scheduler hints are only supported from Cinder API v2
if self.properties.get(self.CINDER_SCHEDULER_HINTS) \
and self.cinder().volume_api_version == 1:
if (self.properties.get(self.CINDER_SCHEDULER_HINTS)
and self.cinder().volume_api_version == 1):
raise exception.StackValidationFailed(
message=_('Scheduler hints are not supported by the current '
'volume API.'))

@ -101,11 +101,11 @@ class HeatWaitConditionHandle(wc_base.BaseWaitConditionHandle):
return self.data().get('endpoint')
elif key == self.CURL_CLI:
# Construct curl command for template-author convenience
return ('curl -i -X POST '
'-H \'X-Auth-Token: %(token)s\' '
'-H \'Content-Type: application/json\' '
'-H \'Accept: application/json\' '
'%(endpoint)s' %
return ("curl -i -X POST "
"-H 'X-Auth-Token: %(token)s' "
"-H 'Content-Type: application/json' "
"-H 'Accept: application/json' "
"%(endpoint)s" %
dict(token=self.data().get('token'),
endpoint=self.data().get('endpoint')))

@ -146,18 +146,18 @@ class ResourceGroup(stack_resource.StackResource):
schema={
REMOVAL_RSRC_LIST: properties.Schema(
properties.Schema.LIST,
_('List of resources to be removed '
'when doing an update which requires removal of '
'specific resources. '
'The resource may be specified several ways: '
'(1) The resource name, as in the nested stack, '
'(2) The resource reference returned from '
'get_resource in a template, as available via '
'the \'refs\' attribute '
'Note this is destructive on update when specified; '
'even if the count is not being reduced, and once '
'a resource name is removed, it\'s name is never '
'reused in subsequent updates'
_("List of resources to be removed "
"when doing an update which requires removal of "
"specific resources. "
"The resource may be specified several ways: "
"(1) The resource name, as in the nested stack, "
"(2) The resource reference returned from "
"get_resource in a template, as available via "
"the 'refs' attribute "
"Note this is destructive on update when specified; "
"even if the count is not being reduced, and once "
"a resource name is removed, it's name is never "
"reused in subsequent updates"
),
default=[]
),

@ -841,8 +841,8 @@ class Server(stack_user.StackUser):
else:
# remove not updated networks from old and new networks lists,
# also get list these networks
not_updated_networks = \
self._get_network_matches(old_networks, new_networks)
not_updated_networks = self._get_network_matches(
old_networks, new_networks)
self.update_networks_matching_iface_port(
old_networks + not_updated_networks, interfaces)
@ -986,8 +986,8 @@ class Server(stack_user.StackUser):
# record if any networks include explicit ports
networks_with_port = False
for network in networks:
networks_with_port = networks_with_port or \
network.get(self.NETWORK_PORT)
networks_with_port = (networks_with_port or
network.get(self.NETWORK_PORT))
if network.get(self.NETWORK_UUID) and network.get(self.NETWORK_ID):
msg = _('Properties "%(uuid)s" and "%(id)s" are both set '
'to the network "%(network)s" for the server '

@ -150,8 +150,8 @@ class SoftwareComponent(sc.SoftwareConfig):
actions = config.get(self.CONFIG_ACTIONS)
if any(action in config_actions for action in actions):
msg = _('Defining more than one configuration for the same '
'action in SoftwareComponent "%s" is not allowed.')\
% self.name
'action in SoftwareComponent "%s" is not allowed.'
) % self.name
raise exception.StackValidationFailed(message=msg)
config_actions.update(actions)

@ -215,8 +215,8 @@ class SoftwareDeployment(signal_responder.SignalResponder):
config = self.rpc_client().show_software_config(
self.context, config_id)
if action not in self.properties[self.DEPLOY_ACTIONS]\
and not config[rpc_api.SOFTWARE_CONFIG_GROUP] == 'component':
if (action not in self.properties[self.DEPLOY_ACTIONS]
and not config[rpc_api.SOFTWARE_CONFIG_GROUP] == 'component'):
return
props = self._build_properties(

@ -106,7 +106,7 @@ class SwiftSignalHandle(resource.Resource):
elif key == self.ENDPOINT:
return self.data().get(self.ENDPOINT)
elif key == self.CURL_CLI:
return ('curl -i -X PUT \'%s\'' %
return ("curl -i -X PUT '%s'" %
self.data().get(self.ENDPOINT))
def handle_delete(self):

@ -127,8 +127,8 @@ class ResourceDefinitionCore(object):
This returns a new resource definition, with all of the functions
parsed in the context of the specified stack and template.
"""
assert not getattr(self, '_frozen', False), \
"Cannot re-parse a frozen definition"
assert not getattr(self, '_frozen', False
), "Cannot re-parse a frozen definition"
def reparse_snippet(snippet):
return template.parse(stack, copy.deepcopy(snippet))

@ -901,8 +901,8 @@ class Stack(collections.Mapping):
# rights to delete the trust unless an admin
trustor_id = user_creds.get('trustor_user_id')
if self.context.user_id != trustor_id:
LOG.debug('Context user_id doesn\'t match '
'trustor, using stored context')
LOG.debug("Context user_id doesn't match "
"trustor, using stored context")
sc = self.stored_context()
sc.clients.client('keystone').delete_trust(
trust_id)

@ -73,8 +73,8 @@ class StackLock(object):
'stack': self.stack.id})
return
if lock_engine_id == self.engine_id or \
self.engine_alive(self.context, lock_engine_id):
if (lock_engine_id == self.engine_id or
self.engine_alive(self.context, lock_engine_id)):
LOG.debug("Lock on stack %(stack)s is owned by engine "
"%(engine)s" % {'stack': self.stack.id,
'engine': lock_engine_id})

@ -154,8 +154,8 @@ class StackResource(resource.Resource):
def _parse_nested_stack(self, stack_name, child_template, child_params,
timeout_mins=None, adopt_data=None):
if self.stack.nested_depth >= cfg.CONF.max_nested_stack_depth:
msg = _("Recursion depth exceeds %d.") % \
cfg.CONF.max_nested_stack_depth
msg = _("Recursion depth exceeds %d."
) % cfg.CONF.max_nested_stack_depth
raise exception.RequestLimitExceeded(message=msg)
parsed_template = self._parse_child_template(child_template)

@ -106,8 +106,8 @@ class WaitConditionTest(common.HeatTestCase):
id = identifier.ResourceIdentifier('test_tenant', stack.name,
stack.id, '', 'WaitHandle')
self.m.StubOutWithMock(aws_wch.WaitConditionHandle, 'identifier')
aws_wch.WaitConditionHandle.identifier().\
MultipleTimes().AndReturn(id)
aws_wch.WaitConditionHandle.identifier(
).MultipleTimes().AndReturn(id)
if stub_status:
self.m.StubOutWithMock(aws_wch.WaitConditionHandle,

@ -152,8 +152,8 @@ class HeatTestCase(testscenarios.WithScenarios,
self.m.StubOutWithMock(glance.ImageConstraint, 'validate')
if num is None:
glance.ImageConstraint.validate(
mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().\
AndReturn(True)
mox.IgnoreArg(), mox.IgnoreArg()
).MultipleTimes().AndReturn(True)
else:
for x in range(num):
glance.ImageConstraint.validate(

@ -33,11 +33,11 @@ class FakeClient(object):
expected = (method, url)
called = self.client.callstack[pos][0:2]
assert self.client.callstack, \
"Expected %s %s but no calls were made." % expected
assert self.client.callstack, ("Expected %s %s "
"but no calls were made." % expected)
assert expected == called, 'Expected %s %s; got %s %s' % \
(expected + called)
assert expected == called, 'Expected %s %s; got %s %s' % (
expected + called)
if body is not None:
assert self.client.callstack[pos][2] == body
@ -48,8 +48,8 @@ class FakeClient(object):
"""
expected = (method, url)
assert self.client.callstack, \
"Expected %s %s but no calls were made." % expected
assert self.client.callstack, (