Fix W504 errors

Also a few related errors based on some earlier investigation
may have been pulled in along the lines of E305.

Story: #2001985
Change-Id: Ifb2d3b481202fbd8cbb472e02de0f14f4d0809fd
This commit is contained in:
Julia Kreger 2018-05-08 12:21:13 -07:00
parent 6ff9a6b14c
commit 530a3ed088
63 changed files with 410 additions and 410 deletions

View File

@ -31,7 +31,7 @@ app = {
'/v1',
# IPA ramdisk methods
'/v1/lookup',
'/v1/heartbeat/[a-z0-9\-]+',
'/v1/heartbeat/[a-z0-9\\-]+',
],
}

View File

@ -34,8 +34,8 @@ class APIBase(wtypes.Base):
"""Render this object as a dict of its fields."""
return dict((k, getattr(self, k))
for k in self.fields
if hasattr(self, k) and
getattr(self, k) != wsme.Unset)
if hasattr(self, k)
and getattr(self, k) != wsme.Unset)
def unset_fields_except(self, except_list=None):
"""Unset fields so they don't appear in the message body.

View File

@ -170,12 +170,12 @@ def update_state_in_older_versions(obj):
to be updated by this method.
"""
# if requested version is < 1.2, convert AVAILABLE to the old NOSTATE
if (pecan.request.version.minor < versions.MINOR_2_AVAILABLE_STATE and
obj.provision_state == ir_states.AVAILABLE):
if (pecan.request.version.minor < versions.MINOR_2_AVAILABLE_STATE
and obj.provision_state == ir_states.AVAILABLE):
obj.provision_state = ir_states.NOSTATE
# if requested version < 1.39, convert INSPECTWAIT to INSPECTING
if (not api_utils.allow_inspect_wait_state() and
obj.provision_state == ir_states.INSPECTWAIT):
if (not api_utils.allow_inspect_wait_state()
and obj.provision_state == ir_states.INSPECTWAIT):
obj.provision_state = ir_states.INSPECTING
@ -522,8 +522,8 @@ class NodeStatesController(rest.RestController):
rpc_node = api_utils.get_rpc_node(node_ident)
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
if ((target in [ir_states.SOFT_REBOOT, ir_states.SOFT_POWER_OFF] or
timeout) and not api_utils.allow_soft_power_off()):
if ((target in [ir_states.SOFT_REBOOT, ir_states.SOFT_POWER_OFF]
or timeout) and not api_utils.allow_soft_power_off()):
raise exception.NotAcceptable()
# FIXME(naohirot): This check is workaround because
# wtypes.IntegerType(minimum=1) is not effective
@ -644,8 +644,8 @@ class NodeStatesController(rest.RestController):
raise wsme.exc.ClientSideError(
msg, status_code=http_client.BAD_REQUEST)
if (rescue_password is not None and
target != ir_states.VERBS['rescue']):
if (rescue_password is not None
and target != ir_states.VERBS['rescue']):
msg = (_('"rescue_password" is only valid when setting target '
'provision state to %s') % ir_states.VERBS['rescue'])
raise wsme.exc.ClientSideError(
@ -1473,10 +1473,10 @@ class NodesController(rest.RestController):
pecan.abort(http_client.BAD_REQUEST, e.args[0])
if not remainder:
return
if ((remainder[0] == 'portgroups' and
not api_utils.allow_portgroups_subcontrollers()) or
(remainder[0] == 'vifs' and
not api_utils.allow_vifs_subcontroller())):
if ((remainder[0] == 'portgroups'
and not api_utils.allow_portgroups_subcontrollers())
or (remainder[0] == 'vifs'
and not api_utils.allow_vifs_subcontroller())):
pecan.abort(http_client.NOT_FOUND)
if remainder[0] == 'traits' and not api_utils.allow_traits():
# NOTE(mgoddard): Returning here will ensure we exhibit the
@ -1764,8 +1764,8 @@ class NodesController(rest.RestController):
if node is not None:
# We're invoking this interface using positional notation, or
# explicitly using 'node'. Try and determine which one.
if (not api_utils.allow_node_logical_names() and
not uuidutils.is_uuid_like(node)):
if (not api_utils.allow_node_logical_names()
and not uuidutils.is_uuid_like(node)):
raise exception.NotAcceptable()
rpc_node = api_utils.get_rpc_node(node_uuid or node)
@ -1809,13 +1809,13 @@ class NodesController(rest.RestController):
if self.from_chassis:
raise exception.OperationNotPermitted()
if (not api_utils.allow_resource_class() and
node.resource_class is not wtypes.Unset):
if (not api_utils.allow_resource_class()
and node.resource_class is not wtypes.Unset):
raise exception.NotAcceptable()
n_interface = node.network_interface
if (not api_utils.allow_network_interface() and
n_interface is not wtypes.Unset):
if (not api_utils.allow_network_interface()
and n_interface is not wtypes.Unset):
raise exception.NotAcceptable()
if not api_utils.allow_dynamic_interfaces():
@ -1823,8 +1823,8 @@ class NodesController(rest.RestController):
if getattr(node, field) is not wsme.Unset:
raise exception.NotAcceptable()
if (not api_utils.allow_storage_interface() and
node.storage_interface is not wtypes.Unset):
if (not api_utils.allow_storage_interface()
and node.storage_interface is not wtypes.Unset):
raise exception.NotAcceptable()
if node.traits is not wtypes.Unset:
@ -1832,8 +1832,8 @@ class NodesController(rest.RestController):
"be set via the node traits API.")
raise exception.Invalid(msg)
if (not api_utils.allow_rescue_interface() and
node.rescue_interface is not wtypes.Unset):
if (not api_utils.allow_rescue_interface()
and node.rescue_interface is not wtypes.Unset):
raise exception.NotAcceptable()
# NOTE(deva): get_topic_for checks if node.driver is in the hash ring
@ -1932,8 +1932,8 @@ class NodesController(rest.RestController):
"is in progress.")
raise wsme.exc.ClientSideError(
msg % node_ident, status_code=http_client.CONFLICT)
elif (rpc_node.provision_state == ir_states.INSPECTING and
api_utils.allow_inspect_wait_state()):
elif (rpc_node.provision_state == ir_states.INSPECTING
and api_utils.allow_inspect_wait_state()):
msg = _('Cannot update node "%(node)s" while it is in state '
'"%(state)s".') % {'node': rpc_node.uuid,
'state': ir_states.INSPECTING}

View File

@ -394,14 +394,14 @@ class PortsController(rest.RestController):
"""
if fields is None:
return
if (not api_utils.allow_port_advanced_net_fields() and
set(fields).intersection(self.advanced_net_fields)):
if (not api_utils.allow_port_advanced_net_fields()
and set(fields).intersection(self.advanced_net_fields)):
raise exception.NotAcceptable()
if ('portgroup_uuid' in fields and not
api_utils.allow_portgroups_subcontrollers()):
if ('portgroup_uuid' in fields
and not api_utils.allow_portgroups_subcontrollers()):
raise exception.NotAcceptable()
if ('physical_network' in fields and not
api_utils.allow_port_physical_network()):
if ('physical_network' in fields
and not api_utils.allow_port_physical_network()):
raise exception.NotAcceptable()
@METRICS.timer('PortsController.get_all')
@ -451,8 +451,8 @@ class PortsController(rest.RestController):
# We're invoking this interface using positional notation, or
# explicitly using 'node'. Try and determine which one.
# Make sure only one interface, node or node_uuid is used
if (not api_utils.allow_node_logical_names() and
not uuidutils.is_uuid_like(node)):
if (not api_utils.allow_node_logical_names()
and not uuidutils.is_uuid_like(node)):
raise exception.NotAcceptable()
return self._get_ports_collection(node_uuid or node, address,
@ -498,8 +498,8 @@ class PortsController(rest.RestController):
# We're invoking this interface using positional notation, or
# explicitly using 'node'. Try and determine which one.
# Make sure only one interface, node or node_uuid is used
if (not api_utils.allow_node_logical_names() and
not uuidutils.is_uuid_like(node)):
if (not api_utils.allow_node_logical_names()
and not uuidutils.is_uuid_like(node)):
raise exception.NotAcceptable()
# NOTE(lucasagomes): /detail should only work against collections
@ -565,8 +565,8 @@ class PortsController(rest.RestController):
vif = api_utils.handle_post_port_like_extra_vif(pdict)
if (pdict.get('portgroup_uuid') and
(pdict.get('pxe_enabled') or vif)):
if (pdict.get('portgroup_uuid')
and (pdict.get('pxe_enabled') or vif)):
rpc_pg = objects.Portgroup.get_by_uuid(context,
pdict['portgroup_uuid'])
if not rpc_pg.standalone_ports_supported:
@ -626,11 +626,11 @@ class PortsController(rest.RestController):
raise exception.OperationNotPermitted()
fields_to_check = set()
for field in (self.advanced_net_fields +
['portgroup_uuid', 'physical_network']):
for field in (self.advanced_net_fields
+ ['portgroup_uuid', 'physical_network']):
field_path = '/%s' % field
if (api_utils.get_patch_values(patch, field_path) or
api_utils.is_path_removed(patch, field_path)):
if (api_utils.get_patch_values(patch, field_path)
or api_utils.is_path_removed(patch, field_path)):
fields_to_check.add(field)
self._check_allowed_port_fields(fields_to_check)
@ -669,8 +669,8 @@ class PortsController(rest.RestController):
rpc_port[field] = patch_val
rpc_node = objects.Node.get_by_id(context, rpc_port.node_id)
if (rpc_node.provision_state == ir_states.INSPECTING and
api_utils.allow_inspect_wait_state()):
if (rpc_node.provision_state == ir_states.INSPECTING
and api_utils.allow_inspect_wait_state()):
msg = _('Cannot update port "%(port)s" on "%(node)s" while it is '
'in state "%(state)s".') % {'port': rpc_port.uuid,
'node': rpc_node.uuid,

View File

@ -443,13 +443,13 @@ class PortgroupsController(pecan.rest.RestController):
if self.parent_node_ident:
raise exception.OperationNotPermitted()
if (not api_utils.allow_portgroup_mode_properties() and
(portgroup.mode is not wtypes.Unset or
portgroup.properties is not wtypes.Unset)):
if (not api_utils.allow_portgroup_mode_properties()
and (portgroup.mode is not wtypes.Unset
or portgroup.properties is not wtypes.Unset)):
raise exception.NotAcceptable()
if (portgroup.name and
not api_utils.is_valid_logical_name(portgroup.name)):
if (portgroup.name
and not api_utils.is_valid_logical_name(portgroup.name)):
error_msg = _("Cannot create portgroup with invalid name "
"'%(name)s'") % {'name': portgroup.name}
raise wsme.exc.ClientSideError(
@ -497,9 +497,9 @@ class PortgroupsController(pecan.rest.RestController):
if self.parent_node_ident:
raise exception.OperationNotPermitted()
if (not api_utils.allow_portgroup_mode_properties() and
(api_utils.is_path_updated(patch, '/mode') or
api_utils.is_path_updated(patch, '/properties'))):
if (not api_utils.allow_portgroup_mode_properties()
and (api_utils.is_path_updated(patch, '/mode')
or api_utils.is_path_updated(patch, '/properties'))):
raise exception.NotAcceptable()
rpc_portgroup = api_utils.get_rpc_portgroup_with_suffix(
@ -507,8 +507,7 @@ class PortgroupsController(pecan.rest.RestController):
names = api_utils.get_patch_values(patch, '/name')
for name in names:
if (name and
not api_utils.is_valid_logical_name(name)):
if (name and not api_utils.is_valid_logical_name(name)):
error_msg = _("Portgroup %(portgroup)s: Cannot change name to"
" invalid name '%(name)s'") % {'portgroup':
portgroup_ident,
@ -544,8 +543,8 @@ class PortgroupsController(pecan.rest.RestController):
rpc_portgroup[field] = patch_val
rpc_node = objects.Node.get_by_id(context, rpc_portgroup.node_id)
if (rpc_node.provision_state == ir_states.INSPECTING and
api_utils.allow_inspect_wait_state()):
if (rpc_node.provision_state == ir_states.INSPECTING
and api_utils.allow_inspect_wait_state()):
msg = _('Cannot update portgroup "%(portgroup)s" on node '
'"%(node)s" while it is in state "%(state)s".') % {
'portgroup': rpc_portgroup.uuid, 'node': rpc_node.uuid,

View File

@ -142,8 +142,8 @@ class LookupController(rest.RestController):
# at all and nodes in a wrong state by different error messages.
raise exception.NotFound()
if (CONF.api.restrict_lookup and
node.provision_state not in _LOOKUP_ALLOWED_STATES):
if (CONF.api.restrict_lookup
and node.provision_state not in _LOOKUP_ALLOWED_STATES):
raise exception.NotFound()
return LookupResult.convert_with_links(node)

View File

@ -142,8 +142,8 @@ def is_path_removed(patch, path):
"""
path = path.rstrip('/')
for p in patch:
if ((p['path'] == path or p['path'].startswith(path + '/')) and
p['op'] == 'remove'):
if ((p['path'] == path or p['path'].startswith(path + '/'))
and p['op'] == 'remove'):
return True
@ -349,8 +349,8 @@ def check_allow_specify_fields(fields):
attributes, this method checks if the required version is being
requested.
"""
if (fields is not None and pecan.request.version.minor <
versions.MINOR_8_FETCHING_SUBSET_OF_FIELDS):
if (fields is not None and pecan.request.version.minor
< versions.MINOR_8_FETCHING_SUBSET_OF_FIELDS):
raise exception.NotAcceptable()
@ -385,8 +385,8 @@ def check_allowed_portgroup_fields(fields):
"""
if fields is None:
return
if (('mode' in fields or 'properties' in fields) and
not allow_portgroup_mode_properties()):
if (('mode' in fields or 'properties' in fields)
and not allow_portgroup_mode_properties()):
raise exception.NotAcceptable()
@ -402,8 +402,8 @@ def check_for_invalid_state_and_allow_filter(provision_state):
Version 1.9 of the API allows filter nodes by provision state.
"""
if provision_state is not None:
if (pecan.request.version.minor <
versions.MINOR_9_PROVISION_STATE_FILTER):
if (pecan.request.version.minor
< versions.MINOR_9_PROVISION_STATE_FILTER):
raise exception.NotAcceptable()
valid_states = states.machine.states
if provision_state not in valid_states:
@ -416,8 +416,8 @@ def check_allow_specify_driver(driver):
Version 1.16 of the API allows filter nodes by driver.
"""
if (driver is not None and pecan.request.version.minor <
versions.MINOR_16_DRIVER_FILTER):
if (driver is not None and pecan.request.version.minor
< versions.MINOR_16_DRIVER_FILTER):
raise exception.NotAcceptable(_(
"Request not acceptable. The minimal required API version "
"should be %(base)s.%(opr)s") %
@ -430,8 +430,8 @@ def check_allow_specify_resource_class(resource_class):
Version 1.21 of the API allows filtering nodes by resource_class.
"""
if (resource_class is not None and pecan.request.version.minor <
versions.MINOR_21_RESOURCE_CLASS):
if (resource_class is not None and pecan.request.version.minor
< versions.MINOR_21_RESOURCE_CLASS):
raise exception.NotAcceptable(_(
"Request not acceptable. The minimal required API version "
"should be %(base)s.%(opr)s") %
@ -519,8 +519,8 @@ def allow_links_node_states_and_driver_properties():
Version 1.14 of the API allows the display of links to node states
and driver properties.
"""
return (pecan.request.version.minor >=
versions.MINOR_14_LINKS_NODESTATES_DRIVERPROPERTIES)
return (pecan.request.version.minor
>= versions.MINOR_14_LINKS_NODESTATES_DRIVERPROPERTIES)
def allow_port_internal_info():
@ -528,8 +528,8 @@ def allow_port_internal_info():
Version 1.18 of the API exposes internal_info readonly field for the port.
"""
return (pecan.request.version.minor >=
versions.MINOR_18_PORT_INTERNAL_INFO)
return (pecan.request.version.minor
>= versions.MINOR_18_PORT_INTERNAL_INFO)
def allow_port_advanced_net_fields():
@ -537,8 +537,8 @@ def allow_port_advanced_net_fields():
Version 1.19 of the API added support for these new fields in port object.
"""
return (pecan.request.version.minor >=
versions.MINOR_19_PORT_ADVANCED_NET_FIELDS)
return (pecan.request.version.minor
>= versions.MINOR_19_PORT_ADVANCED_NET_FIELDS)
def allow_network_interface():
@ -546,8 +546,8 @@ def allow_network_interface():
Version 1.20 of the API added support for network interfaces.
"""
return (pecan.request.version.minor >=
versions.MINOR_20_NETWORK_INTERFACE)
return (pecan.request.version.minor
>= versions.MINOR_20_NETWORK_INTERFACE)
def allow_resource_class():
@ -555,8 +555,8 @@ def allow_resource_class():
Version 1.21 of the API added support for resource_class.
"""
return (pecan.request.version.minor >=
versions.MINOR_21_RESOURCE_CLASS)
return (pecan.request.version.minor
>= versions.MINOR_21_RESOURCE_CLASS)
def allow_ramdisk_endpoints():
@ -572,8 +572,8 @@ def allow_portgroups():
Version 1.23 of the API added support for PortGroups.
"""
return (pecan.request.version.minor >=
versions.MINOR_23_PORTGROUPS)
return (pecan.request.version.minor
>= versions.MINOR_23_PORTGROUPS)
def allow_portgroups_subcontrollers():
@ -582,8 +582,8 @@ def allow_portgroups_subcontrollers():
Version 1.24 of the API added support for Portgroups as
subcontrollers
"""
return (pecan.request.version.minor >=
versions.MINOR_24_PORTGROUPS_SUBCONTROLLERS)
return (pecan.request.version.minor
>= versions.MINOR_24_PORTGROUPS_SUBCONTROLLERS)
def allow_remove_chassis_uuid():
@ -592,8 +592,8 @@ def allow_remove_chassis_uuid():
Version 1.25 of the API added support for chassis_uuid
removal
"""
return (pecan.request.version.minor >=
versions.MINOR_25_UNSET_CHASSIS_UUID)
return (pecan.request.version.minor
>= versions.MINOR_25_UNSET_CHASSIS_UUID)
def allow_portgroup_mode_properties():
@ -602,8 +602,8 @@ def allow_portgroup_mode_properties():
Version 1.26 of the API added mode and properties fields to portgroup
object.
"""
return (pecan.request.version.minor >=
versions.MINOR_26_PORTGROUP_MODE_PROPERTIES)
return (pecan.request.version.minor
>= versions.MINOR_26_PORTGROUP_MODE_PROPERTIES)
def allow_vifs_subcontroller():
@ -612,8 +612,8 @@ def allow_vifs_subcontroller():
Version 1.28 of the API added support for VIFs to be
attached to Nodes.
"""
return (pecan.request.version.minor >=
versions.MINOR_28_VIFS_SUBCONTROLLER)
return (pecan.request.version.minor
>= versions.MINOR_28_VIFS_SUBCONTROLLER)
def allow_dynamic_drivers():
@ -622,8 +622,8 @@ def allow_dynamic_drivers():
Version 1.30 of the API added support for all of the driver
composition related calls in the /v1/drivers API.
"""
return (pecan.request.version.minor >=
versions.MINOR_30_DYNAMIC_DRIVERS)
return (pecan.request.version.minor
>= versions.MINOR_30_DYNAMIC_DRIVERS)
def allow_dynamic_interfaces():
@ -632,8 +632,8 @@ def allow_dynamic_interfaces():
Version 1.31 of the API added support for viewing and setting the fields
in ``V31_FIELDS`` on the node object.
"""
return (pecan.request.version.minor >=
versions.MINOR_31_DYNAMIC_INTERFACES)
return (pecan.request.version.minor
>= versions.MINOR_31_DYNAMIC_INTERFACES)
def allow_volume():
@ -649,8 +649,8 @@ def allow_storage_interface():
Version 1.33 of the API added support for storage interfaces.
"""
return (pecan.request.version.minor >=
versions.MINOR_33_STORAGE_INTERFACE)
return (pecan.request.version.minor
>= versions.MINOR_33_STORAGE_INTERFACE)
def allow_port_physical_network():
@ -661,9 +661,9 @@ def allow_port_physical_network():
supports the physical_network field as this may not be the case during a
rolling upgrade.
"""
return ((pecan.request.version.minor >=
versions.MINOR_34_PORT_PHYSICAL_NETWORK) and
objects.Port.supports_physical_network())
return ((pecan.request.version.minor
>= versions.MINOR_34_PORT_PHYSICAL_NETWORK)
and objects.Port.supports_physical_network())
def allow_node_rebuild_with_configdrive():
@ -671,8 +671,8 @@ def allow_node_rebuild_with_configdrive():
Version 1.35 of the API added support for node rebuild with configdrive.
"""
return (pecan.request.version.minor >=
versions.MINOR_35_REBUILD_CONFIG_DRIVE)
return (pecan.request.version.minor
>= versions.MINOR_35_REBUILD_CONFIG_DRIVE)
def allow_agent_version_in_heartbeat():
@ -681,8 +681,8 @@ def allow_agent_version_in_heartbeat():
Version 1.36 of the API added the ability for agents to pass their version
information to Ironic on heartbeat.
"""
return (pecan.request.version.minor >=
versions.MINOR_36_AGENT_VERSION_HEARTBEAT)
return (pecan.request.version.minor
>= versions.MINOR_36_AGENT_VERSION_HEARTBEAT)
def allow_rescue_interface():

View File

@ -145,8 +145,8 @@ class NoExceptionTracebackHook(hooks.PecanHook):
# Do nothing if there is no error.
# Status codes in the range 200 (OK) to 399 (400 = BAD_REQUEST) are not
# an error.
if (http_client.OK <= state.response.status_int <
http_client.BAD_REQUEST):
if (http_client.OK <= state.response.status_int
< http_client.BAD_REQUEST):
return
json_body = state.response.json
@ -175,5 +175,5 @@ class PublicUrlHook(hooks.PecanHook):
"""
def before(self, state):
state.request.public_url = (cfg.CONF.api.public_endpoint or
state.request.host_url)
state.request.public_url = (cfg.CONF.api.public_endpoint
or state.request.host_url)

View File

@ -83,9 +83,9 @@ def is_volume_available(volume):
:returns: Boolean if volume is available.
"""
return (volume.status == AVAILABLE or
(volume.status == IN_USE and
volume.multiattach))
return (volume.status == AVAILABLE
or (volume.status == IN_USE
and volume.multiattach))
def is_volume_attached(node, volume):

View File

@ -180,8 +180,8 @@ def default_interface(driver_or_hw_type, interface_type,
# For non hardware types we need to set a fallback for the network
# interface however hardware_types specify their own defaults if not in
# the config file.
if (CONF.dhcp.dhcp_provider == 'neutron' and
'flat' in CONF.enabled_network_interfaces):
if (CONF.dhcp.dhcp_provider == 'neutron'
and 'flat' in CONF.enabled_network_interfaces):
additional_defaults['network'] = 'flat'
elif 'noop' in CONF.enabled_network_interfaces:
additional_defaults['network'] = 'noop'

View File

@ -99,8 +99,8 @@ class FSM(machines.FiniteMachine):
def _post_process_event(self, event, result):
# Clear '_target_state' if we've reached it
if (self._target_state is not None and
self._target_state == self._current.name):
if (self._target_state is not None
and self._target_state == self._current.name):
self._target_state = None
# If new state has a different target, update the '_target_state'
if self._states[self._current.name]['target'] is not None:
@ -136,8 +136,8 @@ class FSM(machines.FiniteMachine):
super(FSM, self).initialize(start_state=start_state)
current_state = self._current.name
self._validate_target_state(target_state)
self._target_state = (target_state or
self._states[current_state]['target'])
self._target_state = (target_state
or self._states[current_state]['target'])
@_translate_excp
def process_event(self, event, target_state=None):

View File

@ -83,8 +83,8 @@ def check_image_service(func):
# TODO(pas-ha) remove in Rocky
# NOTE(pas-ha) new option must win if configured
if (CONF.glance.glance_api_servers and
not CONF.glance.endpoint_override):
if (CONF.glance.glance_api_servers
and not CONF.glance.endpoint_override):
# NOTE(pas-ha) all the 2 methods have image_href as the first
# positional arg, but check in kwargs too
image_href = args[0] if args else kwargs.get('image_href')
@ -211,8 +211,8 @@ class BaseImageService(object):
"""
image_id = service_utils.parse_image_id(image_href)
if (self.version == 2 and
'file' in CONF.glance.allowed_direct_url_schemes):
if (self.version == 2
and 'file' in CONF.glance.allowed_direct_url_schemes):
location = self._get_location(image_id)
url = urlparse.urlparse(location)

View File

@ -153,8 +153,8 @@ def is_image_available(context, image):
if hasattr(context, 'auth_token') and context.auth_token:
return True
if ((getattr(image, 'is_public', None) or
getattr(image, 'visibility', None) == 'public') or context.is_admin):
if ((getattr(image, 'is_public', None)
or getattr(image, 'visibility', None) == 'public') or context.is_admin):
return True
properties = image.properties
if context.project_id and ('owner_id' in properties):
@ -183,8 +183,8 @@ def is_image_active(image):
def is_glance_image(image_href):
if not isinstance(image_href, six.string_types):
return False
return (image_href.startswith('glance://') or
uuidutils.is_uuid_like(image_href))
return (image_href.startswith('glance://')
or uuidutils.is_uuid_like(image_href))
def is_image_href_ordinary_file_name(image_href):
@ -197,6 +197,6 @@ def is_image_href_ordinary_file_name(image_href):
:returns: True if image_href is ordinary file name, False otherwise.
"""
return not (is_glance_image(image_href) or
urlparse.urlparse(image_href).scheme.lower() in
return not (is_glance_image(image_href)
or urlparse.urlparse(image_href).scheme.lower() in
image_service.protocol_mapping)

View File

@ -197,13 +197,13 @@ class GlanceImageService(base_image_service.BaseImageService,
def _validate_temp_url_config(self):
"""Validate the required settings for a temporary URL."""
if (not CONF.glance.swift_temp_url_key and
CONF.deploy.object_store_endpoint_type != 'swift'):
if (not CONF.glance.swift_temp_url_key
and CONF.deploy.object_store_endpoint_type != 'swift'):
raise exc.MissingParameterValue(_(
'Swift temporary URLs require a shared secret to be created. '
'You must provide "swift_temp_url_key" as a config option.'))
if (CONF.glance.swift_temp_url_duration <
CONF.glance.swift_temp_url_expected_download_start_delay):
if (CONF.glance.swift_temp_url_duration
< CONF.glance.swift_temp_url_expected_download_start_delay):
raise exc.InvalidParameterValue(_(
'"swift_temp_url_duration" must be greater than or equal to '
'"[glance]swift_temp_url_expected_download_start_delay" '
@ -244,8 +244,8 @@ class GlanceImageService(base_image_service.BaseImageService,
num_dashes = image_id[:seed_num_chars].count('-')
num_chars = seed_num_chars + num_dashes
name_suffix = image_id[:num_chars]
new_container_name = (CONF.glance.swift_container +
'_' + name_suffix)
new_container_name = (CONF.glance.swift_container
+ '_' + name_suffix)
return new_container_name
else:
return CONF.glance.swift_container
@ -270,8 +270,8 @@ class GlanceImageService(base_image_service.BaseImageService,
usage time.
"""
max_valid_time = (
int(time.time()) +
CONF.glance.swift_temp_url_expected_download_start_delay)
int(time.time())
+ CONF.glance.swift_temp_url_expected_download_start_delay)
keys_to_remove = [
k for k, v in self._cache.items()
if (v.url_expires_at < max_valid_time)]

View File

@ -214,8 +214,8 @@ class FileImageService(BaseImageService):
try:
# We should have read and write access to source file to create
# hard link to it.
if (local_device == os.stat(source_image_path).st_dev and
os.access(source_image_path, os.R_OK | os.W_OK)):
if (local_device == os.stat(source_image_path).st_dev
and os.access(source_image_path, os.R_OK | os.W_OK)):
image_file.close()
os.remove(dest_image_path)
os.link(source_image_path, dest_image_path)

View File

@ -480,12 +480,12 @@ def is_whole_disk_image(ctx, instance_info):
iproperties = get_image_properties(ctx, image_source)
except Exception:
return
is_whole_disk_image = (not iproperties.get('kernel_id') and
not iproperties.get('ramdisk_id'))
is_whole_disk_image = (not iproperties.get('kernel_id')
and not iproperties.get('ramdisk_id'))
else:
# Non glance image ref
if (not instance_info.get('kernel') and
not instance_info.get('ramdisk')):
if (not instance_info.get('kernel')
and not instance_info.get('ramdisk')):
is_whole_disk_image = True
return is_whole_disk_image

View File

@ -63,12 +63,12 @@ def get_client(token=None, context=None):
# 'noauth' then would correspond to 'auth_type=none' and
# 'endpoint_override'
adapter_params = {}
if (CONF.neutron.auth_strategy == 'noauth' and
CONF.neutron.auth_type is None):
if (CONF.neutron.auth_strategy == 'noauth'
and CONF.neutron.auth_type is None):
CONF.set_override('auth_type', 'none', group='neutron')
if not CONF.neutron.endpoint_override:
adapter_params['endpoint_override'] = (CONF.neutron.url or
DEFAULT_NEUTRON_URL)
adapter_params['endpoint_override'] = (CONF.neutron.url
or DEFAULT_NEUTRON_URL)
else:
if CONF.keystone.region_name and not CONF.neutron.region_name:
adapter_params['region_name'] = CONF.keystone.region_name
@ -464,8 +464,8 @@ def validate_port_info(node, port):
# Subnet Manager.
if port.extra.get('client-id'):
return True
if (node.network_interface == 'neutron' and
not port.local_link_connection):
if (node.network_interface == 'neutron'
and not port.local_link_connection):
LOG.warning("The local_link_connection is required for "
"'neutron' network interface and is not present "
"in the nodes %(node)s port %(port)s",
@ -577,8 +577,8 @@ class NeutronNetworkInterfaceMixin(object):
def get_cleaning_network_uuid(self, task):
cleaning_network = (
task.node.driver_info.get('cleaning_network') or
CONF.neutron.cleaning_network
task.node.driver_info.get('cleaning_network')
or CONF.neutron.cleaning_network
)
return validate_network(
cleaning_network, _('cleaning network'),
@ -586,8 +586,8 @@ class NeutronNetworkInterfaceMixin(object):
def get_provisioning_network_uuid(self, task):
provisioning_network = (
task.node.driver_info.get('provisioning_network') or
CONF.neutron.provisioning_network
task.node.driver_info.get('provisioning_network')
or CONF.neutron.provisioning_network
)
return validate_network(
provisioning_network, _('provisioning network'),
@ -597,8 +597,8 @@ class NeutronNetworkInterfaceMixin(object):
# FlatNetwork uses tenant network for rescue operation.
def get_rescuing_network_uuid(self, task):
rescuing_network = (
task.node.driver_info.get('rescuing_network') or
CONF.neutron.rescuing_network
task.node.driver_info.get('rescuing_network')
or CONF.neutron.rescuing_network
)
return validate_network(
rescuing_network, _('rescuing network'),

View File

@ -227,8 +227,8 @@ def create_pxe_config(task, pxe_options, template=None):
_ensure_config_dirs_exist(task.node.uuid)
pxe_config_file_path = get_pxe_config_file_path(task.node.uuid)
is_uefi_boot_mode = (deploy_utils.get_boot_mode_for_deploy(task.node) ==
'uefi')
is_uefi_boot_mode = (deploy_utils.get_boot_mode_for_deploy(task.node)
== 'uefi')
# grub bootloader panics with '{}' around any of its tags in its
# config file. To overcome that 'ROOT' and 'DISK_IDENTIFIER' are enclosed
@ -274,8 +274,8 @@ def create_ipxe_boot_script():
# NOTE(pas-ha) to prevent unneeded writes,
# only write to file if its content is different from required,
# which should be rather rare
if (not os.path.isfile(bootfile_path) or
not utils.file_has_content(bootfile_path, boot_script)):
if (not os.path.isfile(bootfile_path)
or not utils.file_has_content(bootfile_path, boot_script)):
utils.write_to_file(bootfile_path, boot_script)
@ -287,8 +287,8 @@ def clean_up_pxe_config(task):
"""
LOG.debug("Cleaning up PXE config for node %s", task.node.uuid)
is_uefi_boot_mode = (deploy_utils.get_boot_mode_for_deploy(task.node) ==
'uefi')
is_uefi_boot_mode = (deploy_utils.get_boot_mode_for_deploy(task.node)
== 'uefi')
if is_uefi_boot_mode and not CONF.pxe.ipxe_enabled:
api = dhcp_factory.DHCPFactory().provider
ip_addresses = api.get_ip_addresses(task)

View File

@ -90,8 +90,8 @@ def is_valid_datapath_id(datapath_id):
"""
m = "^[0-9a-f]{16}$"
return (isinstance(datapath_id, six.string_types) and
re.match(m, datapath_id.lower()))
return (isinstance(datapath_id, six.string_types)
and re.match(m, datapath_id.lower()))
_is_valid_logical_name_re = re.compile(r'^[A-Z0-9-._~]+$', re.I)

View File

@ -32,8 +32,8 @@ class WSGIService(service.ServiceBase):
"""
self.name = name
self.app = app.VersionSelectorApplication()
self.workers = (CONF.api.api_workers or
processutils.get_worker_count())
self.workers = (CONF.api.api_workers
or processutils.get_worker_count())
if self.workers and self.workers < 1:
raise exception.ConfigInvalid(
_("api_workers value of %d is invalid, "

View File

@ -153,8 +153,8 @@ class BaseConductorManager(object):
# Check for required config options if object_store_endpoint_type is
# radosgw
if (CONF.deploy.configdrive_use_object_store and
CONF.deploy.object_store_endpoint_type == "radosgw"):
if (CONF.deploy.configdrive_use_object_store
and CONF.deploy.object_store_endpoint_type == "radosgw"):
if (None in (CONF.swift.auth_url, CONF.swift.username,
CONF.swift.password)):
msg = _("Parameters missing to make a connection with "
@ -489,8 +489,8 @@ class BaseConductorManager(object):
try:
with task_manager.acquire(context, node_uuid,
purpose='node state check') as task:
if (task.node.maintenance or
task.node.provision_state not in provision_state):
if (task.node.maintenance
or task.node.provision_state not in provision_state):
continue
target_state = (None if not keep_target_state else

View File

@ -181,8 +181,8 @@ class ConductorManager(base_manager.BaseConductorManager):
if interface_field not in delta:
continue
if not (node_obj.provision_state in allowed_update_states or
node_obj.maintenance):
if not (node_obj.provision_state in allowed_update_states
or node_obj.maintenance):
raise exception.InvalidState(
action % {'node': node_obj.uuid,
'allowed': ', '.join(allowed_update_states),
@ -193,8 +193,8 @@ class ConductorManager(base_manager.BaseConductorManager):
with task_manager.acquire(context, node_id, shared=False,
purpose='node update') as task:
# Prevent instance_uuid overwriting
if ('instance_uuid' in delta and node_obj.instance_uuid and
task.node.instance_uuid):
if ('instance_uuid' in delta and node_obj.instance_uuid
and task.node.instance_uuid):
raise exception.NodeAssociated(
node=node_id, instance=task.node.instance_uuid)
@ -202,8 +202,8 @@ class ConductorManager(base_manager.BaseConductorManager):
# instance, nova will not update its internal record. That will
# result in the new resource class exposed on the node as available
# for consumption, and nova may try to schedule on this node again.
if ('resource_class' in delta and task.node.resource_class and
task.node.provision_state not in allowed_update_states):
if ('resource_class' in delta and task.node.resource_class
and task.node.provision_state not in allowed_update_states):
raise exception.InvalidState(
action % {'node': node_obj.uuid,
'allowed': ', '.join(allowed_update_states),
@ -257,8 +257,8 @@ class ConductorManager(base_manager.BaseConductorManager):
{'driver': task.node.driver, 'state': new_state})
if new_state in (states.SOFT_REBOOT, states.SOFT_POWER_OFF):
power_timeout = (timeout or
CONF.conductor.soft_power_off_timeout)
power_timeout = (timeout
or CONF.conductor.soft_power_off_timeout)
else:
power_timeout = timeout
@ -1390,8 +1390,8 @@ class ConductorManager(base_manager.BaseConductorManager):
purpose='provision action %s'
% action) as task:
node = task.node
if (action == states.VERBS['provide'] and
node.provision_state == states.MANAGEABLE):
if (action == states.VERBS['provide']
and node.provision_state == states.MANAGEABLE):
task.process_event(
'provide',
callback=self._spawn_worker,
@ -1399,8 +1399,8 @@ class ConductorManager(base_manager.BaseConductorManager):
err_handler=utils.provisioning_error_handler)
return
if (action == states.VERBS['manage'] and
node.provision_state == states.ENROLL):
if (action == states.VERBS['manage']
and node.provision_state == states.ENROLL):
task.process_event(
'manage',
callback=self._spawn_worker,
@ -1408,8 +1408,8 @@ class ConductorManager(base_manager.BaseConductorManager):
err_handler=utils.provisioning_error_handler)
return
if (action == states.VERBS['adopt'] and
node.provision_state in (states.MANAGEABLE,
if (action == states.VERBS['adopt']
and node.provision_state in (states.MANAGEABLE,
states.ADOPTFAIL)):
task.process_event(
'adopt',
@ -1418,14 +1418,14 @@ class ConductorManager(base_manager.BaseConductorManager):
err_handler=utils.provisioning_error_handler)
return
if (action == states.VERBS['abort'] and
node.provision_state == states.CLEANWAIT):
if (action == states.VERBS['abort']
and node.provision_state == states.CLEANWAIT):
# Check if the clean step is abortable; if so abort it.
# Otherwise, indicate in that clean step, that cleaning
# should be aborted after that step is done.
if (node.clean_step and not
node.clean_step.get('abortable')):
if (node.clean_step
and not node.clean_step.get('abortable')):
LOG.info('The current clean step "%(clean_step)s" for '
'node %(node)s is not abortable. Adding a '
'flag to abort the cleaning after the clean '
@ -1456,8 +1456,8 @@ class ConductorManager(base_manager.BaseConductorManager):
target_state=target_state)
return
if (action == states.VERBS['abort'] and
node.provision_state == states.RESCUEWAIT):
if (action == states.VERBS['abort']
and node.provision_state == states.RESCUEWAIT):
utils.remove_node_rescue_password(node, save=True)
task.process_event(
'abort',
@ -1519,10 +1519,10 @@ class ConductorManager(base_manager.BaseConductorManager):
# at the same time.
# NOTE(dtantsur): it's also pointless (and dangerous) to
# sync power state when a power action is in progress
if (task.node.provision_state in SYNC_EXCLUDED_STATES or
task.node.maintenance or
task.node.target_power_state or
task.node.reservation):
if (task.node.provision_state in SYNC_EXCLUDED_STATES
or task.node.maintenance
or task.node.target_power_state
or task.node.reservation):
continue
count = do_sync_power_state(
task, self.power_state_sync_count[node_uuid])
@ -1628,8 +1628,8 @@ class ConductorManager(base_manager.BaseConductorManager):
# TODO(dtantsur): clean up all states that are not stable and
# are not one of WAIT states.
if not maintenance and (provision_state in (states.DEPLOYING,
states.CLEANING) or
target_power_state is not None):
states.CLEANING)
or target_power_state is not None):
LOG.debug('Node %(node)s taken over from conductor %(old)s '
'requires state clean up: provision state is '
'%(state)s, target power state is %(pstate)s',
@ -1826,9 +1826,9 @@ class ConductorManager(base_manager.BaseConductorManager):
# NOTE(deva): now that we have the lock, check again to
# avoid racing with deletes and other state changes
node = task.node
if (node.maintenance or
node.conductor_affinity == self.conductor.id or
node.provision_state != states.ACTIVE):
if (node.maintenance
or node.conductor_affinity == self.conductor.id
or node.provision_state != states.ACTIVE):
continue
task.spawn_after(self._spawn_worker,
@ -1937,8 +1937,8 @@ class ConductorManager(base_manager.BaseConductorManager):
# CLEANFAIL -> MANAGEABLE
# INSPECTIONFAIL -> MANAGEABLE
# DEPLOYFAIL -> DELETING
if (not node.maintenance and
node.provision_state not in states.DELETE_ALLOWED_STATES):
if (not node.maintenance
and node.provision_state not in states.DELETE_ALLOWED_STATES):
msg = (_('Can not delete node "%(node)s" while it is in '
'provision state "%(state)s". Valid provision states '
'to perform deletion are: "%(valid_states)s", '
@ -2253,8 +2253,8 @@ class ConductorManager(base_manager.BaseConductorManager):
# Only allow updating MAC addresses for active nodes if maintenance
# mode is on.
if ((node.provision_state == states.ACTIVE or node.instance_uuid)
and 'address' in port_obj.obj_what_changed() and
not node.maintenance):
and 'address' in port_obj.obj_what_changed()
and not node.maintenance):
action = _("Cannot update hardware address for port "
"%(port)s as node %(node)s is active or has "
"instance UUID assigned")
@ -3083,8 +3083,8 @@ class ConductorManager(base_manager.BaseConductorManager):
if not objinst.obj_attr_is_set(name):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(name) or
getattr(oldobj, name) != getattr(objinst, name)):
if (not oldobj.obj_attr_is_set(name)
or getattr(oldobj, name) != getattr(objinst, name)):
updates[name] = field.to_primitive(objinst, name,
getattr(objinst, name))
# This is safe since a field named this would conflict with the

View File

@ -221,8 +221,8 @@ def node_power_action(task, new_state, timeout=None):
# take power action
try:
if (target_state == states.POWER_ON and
node.provision_state == states.ACTIVE):
if (target_state == states.POWER_ON
and node.provision_state == states.ACTIVE):
task.driver.storage.attach_volumes(task)
if new_state != states.REBOOT:
@ -260,8 +260,8 @@ def node_power_action(task, new_state, timeout=None):
'new_state': new_state})
# NOTE(TheJulia): Similarly to power-on, when we power-off
# a node, we should detach any volume attachments.
if (target_state == states.POWER_OFF and
node.provision_state == states.ACTIVE):
if (target_state == states.POWER_OFF
and node.provision_state == states.ACTIVE):
try:
task.driver.storage.detach_volumes(task)
except exception.StorageError as e:

View File

@ -35,9 +35,9 @@ node = table('nodes',
def upgrade():
network_iface = (CONF.default_network_interface or
('flat' if CONF.dhcp.dhcp_provider == 'neutron'
else 'noop'))
network_iface = (CONF.default_network_interface
or ('flat' if CONF.dhcp.dhcp_provider == 'neutron'
else 'noop'))
op.execute(
node.update().where(
node.c.network_interface == null()).values(

View File

@ -253,12 +253,12 @@ class Connection(api.Connection):
query = query.filter(models.Node.reservation.in_(
filters['reserved_by_any_of']))
if 'provisioned_before' in filters:
limit = (timeutils.utcnow() -
datetime.timedelta(seconds=filters['provisioned_before']))
limit = (timeutils.utcnow()
- datetime.timedelta(seconds=filters['provisioned_before']))
query = query.filter(models.Node.provision_updated_at < limit)
if 'inspection_started_before' in filters:
limit = ((timeutils.utcnow()) -
(datetime.timedelta(
limit = ((timeutils.utcnow())
- (datetime.timedelta(
seconds=filters['inspection_started_before'])))
query = query.filter(models.Node.inspection_started_at < limit)
@ -489,12 +489,12 @@ class Connection(api.Connection):
if values['provision_state'] == states.INSPECTING:
values['inspection_started_at'] = timeutils.utcnow()
values['inspection_finished_at'] = None
elif (ref.provision_state == states.INSPECTING and
values['provision_state'] == states.MANAGEABLE):
elif (ref.provision_state == states.INSPECTING
and values['provision_state'] == states.MANAGEABLE):
values['inspection_finished_at'] = timeutils.utcnow()
values['inspection_started_at'] = None
elif (ref.provision_state == states.INSPECTING and
values['provision_state'] == states.INSPECTFAIL):
elif (ref.provision_state == states.INSPECTING
and values['provision_state'] == states.INSPECTFAIL):
values['inspection_started_at'] = None
ref.update(values)

View File

@ -129,8 +129,8 @@ class BaseDriver(object):
@property
def all_interfaces(self):
return (list(self.core_interfaces + self.standard_interfaces) +
['vendor'])
return (list(self.core_interfaces + self.standard_interfaces)
+ ['vendor'])
@property
def non_vendor_interfaces(self):

View File

@ -191,8 +191,8 @@ def _get_completed_cleaning_command(task, commands):
LOG.debug('Clean step still running for node %(node)s: %(step)s',
{'step': last_step, 'node': task.node.uuid})
return
elif (last_command['command_status'] == 'SUCCEEDED' and
last_step != task.node.clean_step):
elif (last_command['command_status'] == 'SUCCEEDED'
and last_step != task.node.clean_step):
# A previous clean_step was running, the new command has not yet
# started.
LOG.debug('Clean step not yet started for node %(node)s: %(step)s',
@ -307,16 +307,16 @@ class HeartbeatMixin(object):
LOG.debug('Heartbeat from node %(node)s in maintenance mode; '
'not taking any action.', {'node': node.uuid})
return
elif (node.provision_state == states.DEPLOYWAIT and
not self.deploy_has_started(task)):
elif (node.provision_state == states.DEPLOYWAIT
and not self.deploy_has_started(task)):
msg = _('Node failed to deploy.')
self.continue_deploy(task)
elif (node.provision_state == states.DEPLOYWAIT and
self.deploy_is_done(task)):
elif (node.provision_state == states.DEPLOYWAIT
and self.deploy_is_done(task)):
msg = _('Node failed to move to active state.')
self.reboot_to_instance(task)
elif (node.provision_state == states.DEPLOYWAIT and
self.deploy_has_started(task)):
elif (node.provision_state == states.DEPLOYWAIT
and self.deploy_has_started(task)):
node.touch_provisioning()
elif node.provision_state == states.CLEANWAIT:
node.touch_provisioning()
@ -364,8 +364,8 @@ class HeartbeatMixin(object):
raise exception.InstanceRescueFailure(node=node.uuid,
instance=node.instance_uuid,
reason=e)
if ((not result.get('command_status')) or
result.get('command_status') != 'SUCCEEDED'):
if ((not result.get('command_status'))
or result.get('command_status') != 'SUCCEEDED'):
# NOTE(mariojv) Caller will clean up failed rescue in exception
# handler.
fail_reason = (_('Agent returned bad result for command '

View File

@ -199,8 +199,8 @@ def start_shellinabox_console(node_uuid, port, console_cmd):
# if it is, then the shellinaboxd is invoked successfully as a daemon.
# otherwise check the error.
if locals['returncode'] is not None:
if (locals['returncode'] == 0 and os.path.exists(pid_file) and
psutil.pid_exists(_get_console_pid(node_uuid))):
if (locals['returncode'] == 0 and os.path.exists(pid_file)
and psutil.pid_exists(_get_console_pid(node_uuid))):
raise loopingcall.LoopingCallDone()
else:
(stdout, stderr) = popen_obj.communicate()
@ -324,8 +324,8 @@ def start_socat_console(node_uuid, port, console_cmd):
if wait_state['returncode'] is None:
# If the pid file is created and the process is running,
# we stop checking it periodically.
if (os.path.exists(pid_file) and
psutil.pid_exists(_get_console_pid(node_uuid))):
if (os.path.exists(pid_file)
and psutil.pid_exists(_get_console_pid(node_uuid))):
raise loopingcall.LoopingCallDone()
else:
# socat returned, it failed to start.

View File

@ -520,8 +520,8 @@ def set_failed_state(task, msg, collect_logs=True):
"""
node = task.node
if (collect_logs and
CONF.agent.deploy_logs_collect in ('on_failure', 'always')):
if (collect_logs
and CONF.agent.deploy_logs_collect in ('on_failure', 'always')):
driver_utils.collect_ramdisk_logs(node)
try:
@ -1079,8 +1079,8 @@ def parse_instance_info(node):
i_info['image_source'] = info.get('image_source')
iwdi = node.driver_internal_info.get('is_whole_disk_image')
if not iwdi:
if (i_info['image_source'] and
not service_utils.is_glance_image(
if (i_info['image_source']
and not service_utils.is_glance_image(
i_info['image_source'])):
i_info['kernel'] = info.get('kernel')
i_info['ramdisk'] = info.get('ramdisk')
@ -1292,8 +1292,8 @@ def populate_storage_driver_internal_info(task):
driver_internal_info['boot_from_volume'] = vol_uuid
# NOTE(TheJulia): This would be a convenient place to check
# if we need to know about deploying the volume.
if (check_interface_capability(task.driver.deploy, deploy_capability) and
task.driver.storage.should_write_image(task)):
if (check_interface_capability(task.driver.deploy, deploy_capability)
and task.driver.storage.should_write_image(task)):
driver_internal_info['boot_from_volume_deploy'] = vol_uuid
# NOTE(TheJulia): This is also a useful place to include a
# root device hint since we should/might/be able to obtain

View File

@ -364,8 +364,8 @@ def _max_volume_size_mb(raid_level, physical_disks, free_space_mb,
disks_count = len(physical_disks)
overhead_disks_count = _raid_level_overhead(raid_level, spans_count)
return int(stripes_per_disk * stripe_size_kb *
(disks_count - overhead_disks_count) / units.Ki)
return int(stripes_per_disk * stripe_size_kb
* (disks_count - overhead_disks_count) / units.Ki)
def _volume_usage_per_disk_mb(logical_disk, physical_disks, spans_count=1,
@ -441,8 +441,8 @@ def _find_configuration(logical_disks, physical_disks):
# step 1 - process volumes with predefined disks and exact size
for volume in [volume for volume in logical_disks
if ('physical_disks' in volume and
volume['size_mb'] != 'MAX')]:
if ('physical_disks' in volume
and volume['size_mb'] != 'MAX')]:
_calculate_volume_props(volume, physical_disks, free_space_mb)
processed_volumes.append(volume)
@ -476,8 +476,8 @@ def _find_configuration(logical_disks, physical_disks):
# step 3 - process volumes with predefined disks and size_mb == 'MAX'
for volume in [volume for volume in logical_disks
if ('physical_disks' in volume and
volume['size_mb'] == 'MAX')]:
if ('physical_disks' in volume
and volume['size_mb'] == 'MAX')]:
_calculate_volume_props(volume, physical_disks, free_space_mb)
processed_volumes.append(volume)
@ -541,12 +541,12 @@ def _assign_disks_to_volume(logical_disks, physical_disks_by_type,
for (controller, disk_type,
interface_type, size_mb), disks in physical_disks_by_type.items():
if ('disk_type' in logical_disk and
logical_disk['disk_type'] != disk_type):
if ('disk_type' in logical_disk
and logical_disk['disk_type'] != disk_type):
continue
if ('interface_type' in logical_disk and
logical_disk['interface_type'] != interface_type):
if ('interface_type' in logical_disk
and logical_disk['interface_type'] != interface_type):
continue
# filter out disks without free disk space
@ -573,8 +573,8 @@ def _assign_disks_to_volume(logical_disks, physical_disks_by_type,
candidate_max_disks = min([max_disks, len(disks)])
for disks_count in range(min_disks, candidate_max_disks + 1):
if ('number_of_physical_disks' in logical_disk and
logical_disk['number_of_physical_disks'] != disks_count):
if ('number_of_physical_disks' in logical_disk
and logical_disk['number_of_physical_disks'] != disks_count):
continue
# skip invalid disks_count

View File

@ -160,10 +160,10 @@ def _get_boot_iso(task, root_uuid):
task.context, image_href, ['boot_iso', 'kernel_id', 'ramdisk_id']))
boot_iso_uuid = image_properties.get('boot_iso')
kernel_href = (task.node.instance_info.get('kernel') or
image_properties.get('kernel_id'))
ramdisk_href = (task.node.instance_info.get('ramdisk') or
image_properties.get('ramdisk_id'))
kernel_href = (task.node.instance_info.get('kernel')
or image_properties.get('kernel_id'))
ramdisk_href = (task.node.instance_info.get('ramdisk')
or image_properties.get('ramdisk_id'))
if boot_iso_uuid:
LOG.debug("Found boot_iso %s in Glance", boot_iso_uuid)
@ -558,8 +558,8 @@ class IloVirtualMediaBoot(base.BootInterface):
disable_secure_boot_if_supported(task)
driver_internal_info = task.node.driver_internal_info
if (deploy_utils.is_iscsi_boot(task) and
task.node.driver_internal_info.get('ilo_uefi_iscsi_boot')):
if (deploy_utils.is_iscsi_boot(task)
and task.node.driver_internal_info.get('ilo_uefi_iscsi_boot')):
# It will clear iSCSI info from iLO
task.driver.management.clear_iscsi_boot_target(task)
driver_internal_info.pop('ilo_uefi_iscsi_boot', None)
@ -706,8 +706,8 @@ class IloPXEBoot(pxe.PXEBoot):
disable_secure_boot_if_supported(task)
driver_internal_info = task.node.driver_internal_info
if (deploy_utils.is_iscsi_boot(task) and
task.node.driver_internal_info.get('ilo_uefi_iscsi_boot')):
if (deploy_utils.is_iscsi_boot(task)
and task.node.driver_internal_info.get('ilo_uefi_iscsi_boot')):
# It will clear iSCSI info from iLO in case of booting from
# volume in UEFI boot mode
task.driver.management.clear_iscsi_boot_target(task)

View File

@ -92,9 +92,9 @@ def _execute_ilo_clean_step(node, step, *args, **kwargs):
def _should_collect_logs(command):
"""Returns boolean to check whether logs need to collected or not."""
return ((CONF.agent.deploy_logs_collect == 'on_failure' and
command['command_status'] == 'FAILED') or
CONF.agent.deploy_logs_collect == 'always')
return ((CONF.agent.deploy_logs_collect == 'on_failure'
and command['command_status'] == 'FAILED')
or CONF.agent.deploy_logs_collect == 'always')
class IloManagement(base.ManagementInterface):

View File

@ -58,8 +58,8 @@ class VendorPassthru(base.VendorInterface):
def _validate_boot_into_iso(self, task, kwargs):
"""Validates if attach_iso can be called and if inputs are proper."""
if not (task.node.provision_state == states.MANAGEABLE or
task.node.maintenance is True):
if not (task.node.provision_state == states.MANAGEABLE
or task.node.maintenance is True):
msg = (_("The requested action 'boot_into_iso' can be performed "
"only when node %(node_uuid)s is in %(state)s state or "
"in 'maintenance' mode") %

View File

@ -248,8 +248,8 @@ class ImageCache(object):
for f in os.listdir(self.master_dir))
total_size = sum(os.path.getsize(f)
for f in total_listing)
while listing and (total_size > self._cache_size or
(amount is not None and amount > 0)):
while listing and (total_size > self._cache_size
or (amount is not None and amount > 0)):
file_name, last_used, stat = listing.pop()
try:
os.unlink(file_name)
@ -420,8 +420,8 @@ def _delete_dest_path_if_stale(master_path, dest_path):
# Image not cached, re-download
return False
master_path_exists = os.path.exists(master_path)
if (not master_path_exists or
os.stat(master_path).st_ino != os.stat(dest_path).st_ino):
if (not master_path_exists
or os.stat(master_path).st_ino != os.stat(dest_path).st_ino):
# Image exists in cache, but dest_path out of date
os.unlink(dest_path)
return False

View File

@ -439,9 +439,9 @@ def _exec_ipmitool(driver_info, command, check_exit_code=None):
with excutils.save_and_reraise_exception() as ctxt:
err_list = [x for x in IPMITOOL_RETRYABLE_FAILURES
if x in six.text_type(e)]
if ((time.time() > end_time) or
(num_tries == 0) or
not err_list):
if ((time.time() > end_time)
or (num_tries == 0)
or not err_list):
LOG.error('IPMI Error while attempting "%(cmd)s" '
'for node %(node)s. Error: %(error)s',
{'node': driver_info['uuid'],
@ -983,9 +983,9 @@ class IPMIManagement(base.ManagementInterface):
driver_info = task.node.driver_info
driver_internal_info = task.node.driver_internal_info
if (driver_info.get('ipmi_force_boot_device', False) and
driver_internal_info.get('persistent_boot_device') and
driver_internal_info.get('is_next_boot_persistent', True)):
if (driver_info.get('ipmi_force_boot_device', False)
and driver_internal_info.get('persistent_boot_device')
and driver_internal_info.get('is_next_boot_persistent', True)):
return {
'boot_device': driver_internal_info['persistent_boot_device'],
'persistent': True

View File

@ -281,10 +281,10 @@ def _prepare_boot_iso(task, root_uuid):
image_props = ['kernel_id', 'ramdisk_id']
image_properties = images.get_image_properties(
task.context, image_href, image_props)
kernel_href = (task.node.instance_info.get('kernel') or
image_properties['kernel_id'])
ramdisk_href = (task.node.instance_info.get('ramdisk') or
image_properties['ramdisk_id'])
kernel_href = (task.node.instance_info.get('kernel')
or image_properties['kernel_id'])
ramdisk_href = (task.node.instance_info.get('ramdisk')
or image_properties['ramdisk_id'])
deploy_iso_filename = _get_iso_name(task.node, label='deploy')
deploy_iso = ('file://' + os.path.join(

View File

@ -111,9 +111,9 @@ def parse_driver_info(node):
error_msgs.append(
_("Value '%s' is not an integer for 'irmc_snmp_port'") %
d_info['irmc_snmp_port'])
if (d_info['irmc_snmp_version'].lower() in ('v1', 'v2c') and
d_info['irmc_snmp_community'] and
not isinstance(d_info['irmc_snmp_community'], six.string_types)):
if (d_info['irmc_snmp_version'].lower() in ('v1', 'v2c')
and d_info['irmc_snmp_community']
and not isinstance(d_info['irmc_snmp_community'], six.string_types)):
error_msgs.append(
_("Value '%s' is not a string for 'irmc_snmp_community'") %
d_info['irmc_snmp_community'])

View File

@ -73,12 +73,12 @@ def _is_expected_power_state(target_state, boot_status_value):
:param boot_status_value: SNMP BOOT_STATUS_VALUE.
:returns: True if expected power state, otherwise Flase.
"""
if (target_state == states.SOFT_POWER_OFF and
boot_status_value in (BOOT_STATUS_VALUE['unknown'],
BOOT_STATUS_VALUE['off'])):
if (target_state == states.SOFT_POWER_OFF
and boot_status_value in (BOOT_STATUS_VALUE['unknown'],
BOOT_STATUS_VALUE['off'])):
return True
elif (target_state == states.SOFT_REBOOT and
boot_status_value == BOOT_STATUS_VALUE['os-running']):
elif (target_state == states.SOFT_REBOOT
and boot_status_value == BOOT_STATUS_VALUE['os-running']):
return True
return False

View File

@ -78,9 +78,9 @@ def _is_port_physnet_allowed(port, physnets):
ignored.
:returns: True if the port's physical network is allowed, False otherwise.
"""
return (not physnets or
port.physical_network is None or
port.physical_network in physnets)
return (not physnets
or port.physical_network is None
or port.physical_network in physnets)
def _get_free_portgroups_and_ports(task, vif_id, physnets):
@ -218,8 +218,8 @@ def plug_port_to_tenant_network(task, port_like_obj, client=None):
client_id_opt = None
vif_id = (
port_like_obj.internal_info.get(TENANT_VIF_KEY) or
port_like_obj.extra.get('vif_port_id'))
port_like_obj.internal_info.get(TENANT_VIF_KEY)
or port_like_obj.extra.get('vif_port_id'))
if not vif_id:
obj_name = port_like_obj.__class__.__name__.lower()
@ -340,8 +340,8 @@ class VIFPortIDMixin(object):
"""
# FIXME(sambetts) Remove this when we no longer support a nova
# driver that uses port.extra
return (port_like_obj.internal_info.get(TENANT_VIF_KEY) or
port_like_obj.extra.get('vif_port_id'))
return (port_like_obj.internal_info.get(TENANT_VIF_KEY)
or port_like_obj.extra.get('vif_port_id'))
def vif_list(self, task):
"""List attached VIF IDs for a node
@ -371,10 +371,10 @@ class VIFPortIDMixin(object):
:returns: VIF ID associated with p_obj or None.
"""
return (p_obj.internal_info.get('cleaning_vif_port_id') or
p_obj.internal_info.get('provisioning_vif_port_id') or
p_obj.internal_info.get('rescuing_vif_port_id') or
self._get_vif_id_by_port_like_obj(p_obj) or None)
return (p_obj.internal_info.get('cleaning_vif_port_id')
or p_obj.internal_info.get('provisioning_vif_port_id')
or p_obj.internal_info.get('rescuing_vif_port_id')
or self._get_vif_id_by_port_like_obj(p_obj) or None)
class NeutronVIFPortIDMixin(VIFPortIDMixin):
@ -410,8 +410,8 @@ class NeutronVIFPortIDMixin(VIFPortIDMixin):
original_port = objects.Port.get_by_id(context, port_obj.id)
updated_client_id = port_obj.extra.get('client-id')
if (original_port.extra.get('client-id') !=
updated_client_id):
if (original_port.extra.get('client-id')
!= updated_client_id):
# DHCP Option with opt_value=None will remove it
# from the neutron port
if vif:
@ -431,8 +431,8 @@ class NeutronVIFPortIDMixin(VIFPortIDMixin):
{'port': port_uuid,
'instance': node.instance_uuid})
if portgroup_obj and ((set(port_obj.obj_what_changed()) &
{'pxe_enabled', 'portgroup_id'}) or vif):
if portgroup_obj and ((set(port_obj.obj_what_changed())
& {'pxe_enabled', 'portgroup_id'}) or vif):
if not portgroup_obj.standalone_ports_supported:
reason = []
if port_obj.pxe_enabled:
@ -461,8 +461,8 @@ class NeutronVIFPortIDMixin(VIFPortIDMixin):
portgroup_uuid = portgroup_obj.uuid
# NOTE(vsaienko) address is not mandatory field in portgroup.
# Do not touch neutron port if we removed address on portgroup.
if ('address' in portgroup_obj.obj_what_changed() and
portgroup_obj.address):
if ('address' in portgroup_obj.obj_what_changed()
and portgroup_obj.address):
pg_vif = self._get_vif_id_by_port_like_obj(portgroup_obj)
if pg_vif:
neutron.update_port_address(pg_vif, portgroup_obj.address,

View File

@ -63,8 +63,8 @@ class FlatNetwork(common.NeutronVIFPortIDMixin,
client = neutron.get_client(context=task.context)
for port_like_obj in task.ports + task.portgroups:
vif_port_id = (
port_like_obj.internal_info.get(common.TENANT_VIF_KEY) or
port_like_obj.extra.get('vif_port_id')
port_like_obj.internal_info.get(common.TENANT_VIF_KEY)
or port_like_obj.extra.get('vif_port_id')
)
if not vif_port_id:
continue

View File

@ -62,9 +62,9 @@ class NeutronNetwork(common.NeutronVIFPortIDMixin,
self.get_cleaning_network_uuid(task)
self.get_provisioning_network_uuid(task)
node = task.node
if (node.provision_state == states.DEPLOYING and
node.driver_internal_info.get('is_whole_disk_image') and
deploy_utils.get_boot_option(node) == 'netboot'):
if (node.provision_state == states.DEPLOYING
and node.driver_internal_info.get('is_whole_disk_image')
and deploy_utils.get_boot_option(node) == 'netboot'):
error_msg = (_('The node %s cannot perform "local" boot for '
'whole disk image when node is using "neutron" '
'network and is configured with "netboot" boot '
@ -254,8 +254,8 @@ class NeutronNetwork(common.NeutronVIFPortIDMixin,
portgroups = task.portgroups
for port_like_obj in ports + portgroups:
vif_port_id = (
port_like_obj.internal_info.get(common.TENANT_VIF_KEY) or
port_like_obj.extra.get('vif_port_id'))
port_like_obj.internal_info.get(common.TENANT_VIF_KEY)
or port_like_obj.extra.get('vif_port_id'))
if not vif_port_id:
continue
neutron.unbind_neutron_port(vif_port_id, context=task.context)

View File

@ -167,8 +167,8 @@ def _is_node_in_use(server_hardware, applied_sp_uri, by_oneview=False):
"""
operation = operator.ne if by_oneview else operator.eq
server_profile_uri = server_hardware.get('serverProfileUri')
return (server_profile_uri and
operation(applied_sp_uri, server_profile_uri))
return (server_profile_uri
and operation(applied_sp_uri, server_profile_uri))
def is_node_in_use_by_oneview(node):

View File

@ -169,8 +169,8 @@ class OneViewPower(base.PowerInterface):
oneview_client.server_hardware.update_power_state(
SET_POWER_STATE_MAP.get(power_state),
server_hardware, timeout=timeout)
elif (power_state == states.REBOOT or
power_state == states.SOFT_REBOOT):
elif (power_state == states.REBOOT
or power_state == states.SOFT_REBOOT):
power_off_mode = (states.POWER_OFF
if power_state == states.REBOOT
else states.SOFT_POWER_OFF)

View File

@ -109,8 +109,8 @@ def _get_instance_image_info(node, ctx):
image_info = {}
# NOTE(pas-ha) do not report image kernel and ramdisk for
# local boot or whole disk images so that they are not cached
if (node.driver_internal_info.get('is_whole_disk_image') or
deploy_utils.get_boot_option(node) == 'local'):
if (node.driver_internal_info.get('is_whole_disk_image')
or deploy_utils.get_boot_option(node) == 'local'):
return image_info
root_dir = pxe_utils.get_root_dir()
@ -168,8 +168,8 @@ def _build_deploy_pxe_options(task, pxe_info, mode='deploy'):
(ramdisk_label, 'deployment_ari_path')):
if CONF.pxe.ipxe_enabled:
image_href = pxe_info[label][0]
if (CONF.pxe.ipxe_use_swift and
service_utils.is_glance_image(image_href)):
if (CONF.pxe.ipxe_use_swift
and service_utils.is_glance_image(image_href)):
pxe_opts[option] = images.get_temp_url_for_glance_image(
task.context, image_href)
else:
@ -246,8 +246,8 @@ def _build_pxe_config_options(task, pxe_info, service=False):
mode = deploy_utils.rescue_or_deploy_mode(node)
if service:
pxe_options = {}
elif (node.driver_internal_info.get('boot_from_volume') and
CONF.pxe.ipxe_enabled):
elif (node.driver_internal_info.get('boot_from_volume')
and CONF.pxe.ipxe_enabled):
pxe_options = _get_volume_pxe_options(task)
else:
pxe_options = _build_deploy_pxe_options(task, pxe_info, mode=mode)
@ -271,8 +271,8 @@ def _build_service_pxe_config(task, instance_image_info,
# NOTE(pas-ha) if it is takeover of ACTIVE node or node performing
# unrescue operation, first ensure that basic PXE configs and links
# are in place before switching pxe config
if (node.provision_state in [states.ACTIVE, states.UNRESCUING] and
not os.path.isfile(pxe_config_path)):
if (node.provision_state in [states.ACTIVE, states.UNRESCUING]
and not os.path.isfile(pxe_config_path)):
pxe_options = _build_pxe_config_options(task, instance_image_info,
service=True)
pxe_config_template = deploy_utils.get_pxe_config_template(node)
@ -353,9 +353,9 @@ def validate_boot_parameters_for_trusted_boot(node):
is_whole_disk_image = node.driver_internal_info.get('is_whole_disk_image')
# 'is_whole_disk_image' is not supported by trusted boot, because there is
# no Kernel/Ramdisk to measure at all.
if (boot_mode != 'bios' or
is_whole_disk_image or
boot_option != 'netboot'):
if (boot_mode != 'bios'
or is_whole_disk_image
or boot_option != 'netboot'):
msg = (_("Trusted boot is only supported in BIOS boot mode with "
"netboot and without whole_disk_image, but Node "
"%(node_uuid)s was configured with boot_mode: %(boot_mode)s, "
@ -448,8 +448,8 @@ class PXEBoot(base.BootInterface):
% node.uuid)
if CONF.pxe.ipxe_enabled:
if (not CONF.deploy.http_url or
not CONF.deploy.http_root):
if (not CONF.deploy.http_url
or not CONF.deploy.http_root):
raise exception.MissingParameterValue(_(
"iPXE boot is enabled but no HTTP URL or HTTP "
"root was specified."))
@ -468,8 +468,8 @@ class PXEBoot(base.BootInterface):
return
d_info = deploy_utils.get_image_instance_info(node)
if (node.driver_internal_info.get('is_whole_disk_image') or
deploy_utils.get_boot_option(node) == 'local'):
if (node.driver_internal_info.get('is_whole_disk_image')
or deploy_utils.get_boot_option(node) == 'local'):
props = []
elif service_utils.is_glance_image(d_info['image_source']):
props = ['kernel_id', 'ramdisk_id']

View File

@ -79,8 +79,8 @@ class CinderStorage(base.StorageInterface):
ipxe_enabled = CONF.pxe.ipxe_enabled
for connector in task.volume_connectors:
if (connector.type in VALID_ISCSI_TYPES and
connector.connector_id is not None):
if (connector.type in VALID_ISCSI_TYPES
and connector.connector_id is not None):
iscsi_uuids_found.append(connector.uuid)
if not ipxe_enabled:
msg = _("The [pxe]/ipxe_enabled option must "
@ -88,8 +88,8 @@ class CinderStorage(base.StorageInterface):
"booting to an iSCSI volume.")
self._fail_validation(task, msg)
if (connector.type in VALID_FC_TYPES and
connector.connector_id is not None):
if (connector.type in VALID_FC_TYPES
and connector.connector_id is not None):
# NOTE(TheJulia): Unlike iSCSI with cinder, we have no need
# to warn about multiple IQN entries, since we are able to
# submit multiple fibre channel WWPN entries.
@ -313,8 +313,9 @@ class CinderStorage(base.StorageInterface):
# NOTE(TheJulia): If the node is in ACTIVE state, we can
# tolerate failures detaching as the node is likely being
# powered down to cause a detachment event.
allow_errors = (task.node.provision_state == states.ACTIVE or
aborting_attach and outer_args['attempt'] > 0)
allow_errors = (task.node.provision_state == states.ACTIVE
or aborting_attach and outer_args['attempt']
> 0)
cinder.detach_volumes(task, targets, connector,
allow_errors=allow_errors)
except exception.StorageError as e:

View File

@ -88,8 +88,8 @@ class IronicObject(object_base.VersionedObject):
object.
"""
for field in self.fields:
if (self.obj_attr_is_set(field) and
self[field] != loaded_object[field]):
if (self.obj_attr_is_set(field)
and self[field] != loaded_object[field]):
self[field] = loaded_object[field]
def _convert_to_version(self, target_version,
@ -167,8 +167,8 @@ class IronicObject(object_base.VersionedObject):
# is supported by this service. self.VERSION is the version of
# this object instance -- it may get set via e.g. the
# serialization or deserialization process, or here.
if (self.__class__.VERSION != target_version or
self.VERSION != self.__class__.VERSION):
if (self.__class__.VERSION != target_version
or self.VERSION != self.__class__.VERSION):
self.VERSION = target_version
@classmethod

View File

@ -56,8 +56,8 @@ class StringFieldThatAcceptsCallable(object_fields.StringField):
def __repr__(self):
default = self._default
if (self._default != object_fields.UnspecifiedDefault and
callable(self._default)):
if (self._default != object_fields.UnspecifiedDefault
and callable(self._default)):
default = "%s-%s" % (
self._default.__name__,
hashlib.md5(inspect.getsource(

View File

@ -94,8 +94,8 @@ class NotificationBase(base.IronicObject):
"""
if CONF.notification_level is None:
return False
return (NOTIFY_LEVELS[self.level] >=
NOTIFY_LEVELS[CONF.notification_level])
return (NOTIFY_LEVELS[self.level]
>= NOTIFY_LEVELS[CONF.notification_level])
def emit(self, context):
"""Send the notification.

View File

@ -51,8 +51,8 @@ class TestV1Root(base.BaseApiTest):
# Check if all known resources are present and there are no extra ones.
not_resources = ('id', 'links', 'media_types')
actual_resources = tuple(set(data) - set(not_resources))
expected_resources = (['chassis', 'drivers', 'nodes', 'ports'] +
additional_expected_resources)
expected_resources = (['chassis', 'drivers', 'nodes', 'ports']
+ additional_expected_resources)
self.assertEqual(sorted(expected_resources), sorted(actual_resources))
self.assertIn({'type': 'application/vnd.openstack.ironic.v1+json',
'base': 'application/json'}, data['media_types'])

View File

@ -119,8 +119,8 @@ class DriverLoadTestCase(db_base.DbTestCase):
@mock.patch.object(driver_factory.LOG, 'warning', autospec=True)
def test_build_driver_for_task_incorrect(self, mock_warn, mock_attach):
# Cannot set these node interfaces for classic driver
no_set_interfaces = (drivers_base.ALL_INTERFACES -
set(['network', 'storage']))
no_set_interfaces = (drivers_base.ALL_INTERFACES
- set(['network', 'storage']))
for iface in no_set_interfaces:
iface_name = '%s_interface' % iface
node_kwargs = {'uuid': uuidutils.generate_uuid(),
@ -305,8 +305,8 @@ class CheckAndUpdateNodeInterfacesTestCase(db_base.DbTestCase):
def test_create_node_classic_driver_not_allowed_interfaces_set(self):
# Cannot set these node interfaces for classic driver
no_set_interfaces = (drivers_base.ALL_INTERFACES -
set(['network', 'storage']))
no_set_interfaces = (drivers_base.ALL_INTERFACES
- set(['network', 'storage']))
for iface in no_set_interfaces:
iface_name = '%s_interface' % iface
node_kwargs = {'uuid': uuidutils.generate_uuid(),
@ -319,8 +319,8 @@ class CheckAndUpdateNodeInterfacesTestCase(db_base.DbTestCase):
driver_factory.check_and_update_node_interfaces, node)
def test_create_node_classic_driver_no_interfaces_set(self):
no_set_interfaces = (drivers_base.ALL_INTERFACES -
set(['network', 'storage']))
no_set_interfaces = (drivers_base.ALL_INTERFACES
- set(['network', 'storage']))
node_kwargs = {'uuid': uuidutils.generate_uuid()}
node = obj_utils.get_test_node(self.context, driver='fake',
**node_kwargs)
@ -368,8 +368,8 @@ class CheckAndUpdateNodeInterfacesTestCase(db_base.DbTestCase):
def test_update_node_set_classic_driver_and_not_allowed_interfaces(self):
"""Update driver to classic and interfaces specified"""
not_allowed_interfaces = (drivers_base.ALL_INTERFACES -
set(['network', 'storage']))
not_allowed_interfaces = (drivers_base.ALL_INTERFACES
- set(['network', 'storage']))
self.config(enabled_drivers=['fake', 'fake_agent'])
for iface in not_allowed_interfaces:
iface_name = '%s_interface' % iface
@ -400,8 +400,8 @@ class CheckAndUpdateNodeInterfacesTestCase(db_base.DbTestCase):
def test_update_node_set_classic_driver_unset_interfaces(self):
"""Update driver to classic and set interfaces to None"""
no_set_interfaces = (drivers_base.ALL_INTERFACES -
set(['network', 'storage']))
no_set_interfaces = (drivers_base.ALL_INTERFACES
- set(['network', 'storage']))
self.config(enabled_drivers=['fake', 'fake_agent'])
for iface in no_set_interfaces:
iface_name = '%s_interface' % iface
@ -416,8 +416,8 @@ class CheckAndUpdateNodeInterfacesTestCase(db_base.DbTestCase):
def test_update_node_classic_driver_unset_interfaces(self):
"""Update interfaces to None for node with classic driver"""
no_set_interfaces = (drivers_base.ALL_INTERFACES -
set(['network', 'storage']))
no_set_interfaces = (drivers_base.ALL_INTERFACES
- set(['network', 'storage']))
self.config(enabled_drivers=['fake', 'fake_agent'])
for iface in no_set_interfaces:
iface_name = '%s_interface' % iface
@ -431,8 +431,8 @@ class CheckAndUpdateNodeInterfacesTestCase(db_base.DbTestCase):
def test_update_node_set_classic_driver_no_interfaces(self):
"""Update driver to classic no interfaces specified"""
self._set_config_interface_options_hardware_type()
no_set_interfaces = (drivers_base.ALL_INTERFACES -
set(['network', 'storage']))
no_set_interfaces = (drivers_base.ALL_INTERFACES
- set(['network', 'storage']))
for iface in no_set_interfaces:
iface_name = '%s_interface' % iface
node_kwargs = {'uuid': uuidutils.generate_uuid()}
@ -618,8 +618,8 @@ class TestFakeHardware(hardware_type.AbstractHardwareType):
return [fake.FakeVendorB, fake.FakeVendorA]
OPTIONAL_INTERFACES = (set(drivers_base.BareDriver().standard_interfaces) -
{'management', 'boot'}) | {'vendor'}
OPTIONAL_INTERFACES = (set(drivers_base.BareDriver().standard_interfaces)
- {'management', 'boot'}) | {'vendor'}
class HardwareTypeLoadTestCase(db_base.DbTestCase):

View File

@ -700,8 +700,8 @@ class TestGlanceSwiftTempURL(base.TestCase):
temp_url = self.service.swift_temp_url(image_info=self.fake_image)
self.assertEqual(
(urlparse.urljoin(CONF.glance.swift_endpoint_url, 'swift') +
tempurl_mock.return_value),
(urlparse.urljoin(CONF.glance.swift_endpoint_url, 'swift')
+ tempurl_mock.return_value),
temp_url)
tempurl_mock.assert_called_with(
path=path,
@ -865,8 +865,8 @@ class TestSwiftTempUrlCache(base.TestCase):
temp_url = self.glance_service.swift_temp_url(
image_info=fake_image)
self.assertEqual(CONF.glance.swift_endpoint_url +
tempurl_mock.return_value,
self.assertEqual(CONF.glance.swift_endpoint_url
+ tempurl_mock.return_value,
temp_url)
cleanup_mock.assert_called_once_with()
tempurl_mock.assert_called_with(
@ -921,8 +921,8 @@ class TestSwiftTempUrlCache(base.TestCase):
query = '?temp_url_sig=hmacsig&temp_url_expires=%s'
self.glance_service._cache[fake_image['id']] = (
glance_v2.TempUrlCacheElement(
url=(CONF.glance.swift_endpoint_url + path +
query % old_exp_time),
url=(CONF.glance.swift_endpoint_url + path
+ query % old_exp_time),
url_expires_at=old_exp_time)
)
@ -935,8 +935,8 @@ class TestSwiftTempUrlCache(base.TestCase):
fresh_temp_url = self.glance_service.swift_temp_url(
image_info=fake_image)
self.assertEqual(CONF.glance.swift_endpoint_url +
tempurl_mock.return_value,
self.assertEqual(CONF.glance.swift_endpoint_url
+ tempurl_mock.return_value,
fresh_temp_url)
tempurl_mock.assert_called_with(
path=path,
@ -994,8 +994,8 @@ class TestSwiftTempUrlCache(base.TestCase):
image_id=fake_image['id']
)
self.assertEqual(CONF.glance.swift_endpoint_url +
tempurl_mock.return_value,
self.assertEqual(CONF.glance.swift_endpoint_url
+ tempurl_mock.return_value,
temp_url)
tempurl_mock.assert_called_with(
path=path,

View File

@ -316,8 +316,8 @@ class GenericUtilsTestCase(base.TestCase):
valid_no_proxy = [
('a' * 63 + '.' + '0' * 63 + '.c.' + 'd' * 61 + '.' + 'e' * 61),
('A' * 63 + '.' + '0' * 63 + '.C.' + 'D' * 61 + '.' + 'E' * 61),
('.' + 'a' * 62 + '.' + '0' * 62 + '.c.' + 'd' * 61 + '.' +
'e' * 61),
('.' + 'a' * 62 + '.' + '0' * 62 + '.c.' + 'd' * 61 + '.'
+ 'e' * 61),
',,example.com:3128,',
'192.168.1.1', # IP should be valid
]
@ -336,14 +336,14 @@ class GenericUtilsTestCase(base.TestCase):
# Invalid values for 'no_proxy'
invalid_no_proxy = [
('A' * 64 + '.' + '0' * 63 + '.C.' + 'D' * 61 + '.' +
'E' * 61), # too long (> 253)
('A' * 64 + '.' + '0' * 63 + '.C.' + 'D' * 61 + '.'
+ 'E' * 61), # too long (> 253)
('a' * 100),
'a..com',
('.' + 'a' * 63 + '.' + '0' * 62 + '.c.' + 'd' * 61 + '.' +
'e' * 61), # too long (> 251 after deleting .)
('*.' + 'a' * 60 + '.' + '0' * 60 + '.c.' + 'd' * 61 + '.' +
'e' * 61), # starts with *.
('.' + 'a' * 63 + '.' + '0' * 62 + '.c.' + 'd' * 61 + '.'
+ 'e' * 61), # too long (> 251 after deleting .)
('*.' + 'a' * 60 + '.' + '0' * 60 + '.c.' + 'd' * 61 + '.'
+ 'e' * 61), # starts with *.
'c.-a.com',
'c.a-.com',
]

View File

@ -244,8 +244,8 @@ class MigrationCheckersMixin(object):
data_conductor = {'hostname': 'test_host'}
conductors.insert().execute(data_conductor)
conductor = conductors.select(
conductors.c.hostname ==
data_conductor['hostname']).execute().first()
conductors.c.hostname
== data_conductor['hostname']).execute().first()
data_node = {'uuid': uuidutils.generate_uuid(),
'conductor_affinity': conductor['id']}

View File

@ -504,8 +504,8 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
def test_get_properties(self):
self.assertEqual(
set(list(ansible_deploy.COMMON_PROPERTIES) +
['deploy_forces_oob_reboot']),
set(list(ansible_deploy.COMMON_PROPERTIES)
+ ['deploy_forces_oob_reboot']),
set(self.driver.get_properties()))
@mock.patch.object(deploy_utils, 'check_for_missing_params',

View File

@ -903,8 +903,8 @@ class DracRaidInterfaceTestCase(db_base.DbTestCase):
'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1'],
'is_root_volume': True
}
self.logical_disks = ([self.root_logical_disk] +
self.nonroot_logical_disks)
self.logical_disks = ([self.root_logical_disk]
+ self.nonroot_logical_disks)
self.target_raid_configuration = {'logical_disks': self.logical_disks}
self.node.target_raid_config = self.target_raid_configuration
self.node.save()

View File

@ -494,10 +494,10 @@ class IRMCDeployPrivateMethodsTestCase(db_base.DbTestCase):
task.context, 'image-uuid', ['kernel_id', 'ramdisk_id'])
create_boot_iso_mock.assert_called_once_with(
task.context,
'/remote_image_share_root/' +
'/remote_image_share_root/'
"boot-%s.iso" % self.node.uuid,
'kernel_uuid', 'ramdisk_uuid',
'file:///remote_image_share_root/' +
'file:///remote_image_share_root/'
"deploy-%s.iso" % self.node.uuid,
'root-uuid', 'kernel-params', 'uefi')
task.node.refresh()
@ -746,7 +746,7 @@ class IRMCDeployPrivateMethodsTestCase(db_base.DbTestCase):
task.node,
'iso_filename')
get_irmc_client_mock.assert_called_once_with(task.node)
self.assertEqual("iRMC Inserting virtual cdrom failed. " +
self.assertEqual("iRMC Inserting virtual cdrom failed. "
"Reason: fake error", str(e))
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
@ -772,7 +772,7 @@ class IRMCDeployPrivateMethodsTestCase(db_base.DbTestCase):
e = self.assertRaises(exception.IRMCOperationError,
irmc_boot._detach_virtual_cd,
task.node)
self.assertEqual("iRMC Ejecting virtual cdrom failed. " +
self.assertEqual("iRMC Ejecting virtual cdrom failed. "
"Reason: fake error", str(e))
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
@ -825,7 +825,7 @@ class IRMCDeployPrivateMethodsTestCase(db_base.DbTestCase):
task.node,
'iso_filename')
get_irmc_client_mock.assert_called_once_with(task.node)
self.assertEqual("iRMC Inserting virtual floppy failed. " +
self.assertEqual("iRMC Inserting virtual floppy failed. "
"Reason: fake error", str(e))
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,

View File

@ -471,7 +471,7 @@ class IRMCManagementTestCase(db_base.DbTestCase):
e = self.assertRaises(exception.FailedToGetSensorData,
self.driver.management.get_sensors_data,
task)
self.assertEqual("Failed to get sensor data for node 1be26c0b-" +
self.assertEqual("Failed to get sensor data for node 1be26c0b-"
"03f2-4d2e-ae87-c02d7f33c123. Error: Fake Error",
str(e))

View File

@ -80,7 +80,7 @@ class OneViewCommonTestCase(db_base.DbTestCase):
mock_oneview.assert_called_once_with(credentials)
def test_get_ilo_access(self):
url = ("hplocons://addr=1.2.3.4&sessionkey" +
url = ("hplocons://addr=1.2.3.4&sessionkey"
"=a79659e3b3b7c8209c901ac3509a6719")
remote_console = {'remoteConsoleUrl': url}
host_ip, token = common._get_ilo_access(remote_console)

View File

@ -224,8 +224,8 @@ class PXEPrivateMethodsTestCase(db_base.DbTestCase):
ramdisk_label))
}
if (whle_dsk_img or
deploy_utils.get_boot_option(self.node) == 'local'):
if (whle_dsk_img
or deploy_utils.get_boot_option(self.node) == 'local'):
ramdisk = 'no_ramdisk'
kernel = 'no_kernel'
else:
@ -385,8 +385,8 @@ class PXEPrivateMethodsTestCase(db_base.DbTestCase):
kernel = os.path.join(http_url, self.node.uuid, 'kernel')
ramdisk = os.path.join(http_url, self.node.uuid, 'ramdisk')
if (whle_dsk_img or
deploy_utils.get_boot_option(self.node) == 'local'):
if (whle_dsk_img
or deploy_utils.get_boot_option(self.node) == 'local'):
ramdisk = 'no_ramdisk'
kernel = 'no_kernel'
else:

View File

@ -741,8 +741,8 @@ class SNMPDeviceDriverTestCase(db_base.DbTestCase):
mock_client = mock_get_client.return_value
driver = snmp._get_driver(self.node)
attempts = CONF.snmp.power_timeout // driver.retry_interval
mock_client.get.side_effect = ([driver.value_power_off] +
[42] * attempts)
mock_client.get.side_effect = ([driver.value_power_off]
+ [42] * attempts)
pstate = driver.power_reset()
calls = [mock.call(driver._snmp_oid(), driver.value_power_off),
mock.call(driver._snmp_oid(), driver.value_power_on)]
@ -773,8 +773,8 @@ class SNMPDeviceDriverTestCase(db_base.DbTestCase):
mock_client = mock_get_client.return_value
driver = snmp._get_driver(self.node)
attempts = CONF.snmp.power_timeout // driver.retry_interval
mock_client.get.side_effect = ([driver.value_power_off] *
(1 + attempts))
mock_client.get.side_effect = ([driver.value_power_off]
* (1 + attempts))
pstate = driver.power_reset()
calls = [mock.call(driver._snmp_oid(), driver.value_power_off),
mock.call(driver._snmp_oid(), driver.value_power_on)]

View File

@ -227,16 +227,16 @@ class UtilsTestCase(db_base.DbTestCase):
driver_utils.capabilities_to_dict,
capabilities_already_dict
)
self.assertEqual("Value of 'capabilities' must be string. Got " +
str(dict), str(exc))
self.assertEqual("Value of 'capabilities' must be string. Got "
+ str(dict), str(exc))
exc = self.assertRaises(
exception.InvalidParameterValue,
driver_utils.capabilities_to_dict,
capabilities_something_else
)
self.assertEqual("Value of 'capabilities' must be string. Got " +
str(int), str(exc))
self.assertEqual("Value of 'capabilities' must be string. Got "
+ str(int), str(exc))
def test_normalize_mac_string(self):
mac_raw = "0A:1B-2C-3D:4F"
@ -268,8 +268,8 @@ class UtilsRamdiskLogsTestCase(tests_base.TestCase):
node2 = obj_utils.get_test_node(
self.context, instance_uuid=instance_uuid)
name = driver_utils.get_ramdisk_logs_file_name(node2)
expected_name = ('1be26c0b-03f2-4d2e-ae87-c02d7f33c123_' +
instance_uuid + '_2000-01-01-00-00-00.tar.gz')
expected_name = ('1be26c0b-03f2-4d2e-ae87-c02d7f33c123_'
+ instance_uuid + '_2000-01-01-00-00-00.tar.gz')
self.assertEqual(expected_name, name)
@mock.patch.object(driver_utils, 'store_ramdisk_logs', autospec=True)

View File

@ -250,8 +250,8 @@ def get_payloads_with_schemas(from_module):
# First class is this payload class, parent class is the 2nd
# one in the tuple
parent = base_classes[1]
if (not hasattr(parent, 'SCHEMA') or
parent.SCHEMA != payload.SCHEMA):
if (not hasattr(parent, 'SCHEMA')
or parent.SCHEMA != payload.SCHEMA):
payloads.append(payload)
return payloads

View File

@ -106,9 +106,9 @@ commands = {posargs}
[flake8]
# [W503] Line break before binary operator.
# NOTE(TheJulia): Adding E305,W504,W605,W606,E501,F405 to the ignore list
# NOTE(TheJulia): Adding E305,W605,W606,E501,F405 to the ignore list
# until we are able to clean them up in the code base.
ignore = E129,W503,E305,W504,W605,W606,E501,F405
ignore = E129,W503,E305,W605,W606,E501,F405
filename = *.py,app.wsgi
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build
import-order-style = pep8