Merge "Fix all pep8 E129 errors"
This commit is contained in:
commit
805359d9a2
@ -162,8 +162,8 @@ class DhcpAgent(manager.Manager):
|
|||||||
# will automatically be retried on the notification
|
# will automatically be retried on the notification
|
||||||
self.schedule_resync(e, network.id)
|
self.schedule_resync(e, network.id)
|
||||||
if (isinstance(e, oslo_messaging.RemoteError) and
|
if (isinstance(e, oslo_messaging.RemoteError) and
|
||||||
e.exc_type == 'NetworkNotFound' or
|
e.exc_type == 'NetworkNotFound' or
|
||||||
isinstance(e, exceptions.NetworkNotFound)):
|
isinstance(e, exceptions.NetworkNotFound)):
|
||||||
LOG.debug("Network %s has been deleted.", network.id)
|
LOG.debug("Network %s has been deleted.", network.id)
|
||||||
else:
|
else:
|
||||||
LOG.exception('Unable to %(action)s dhcp for %(net_id)s.',
|
LOG.exception('Unable to %(action)s dhcp for %(net_id)s.',
|
||||||
@ -521,7 +521,7 @@ class DhcpAgent(manager.Manager):
|
|||||||
|
|
||||||
def disable_isolated_metadata_proxy(self, network):
|
def disable_isolated_metadata_proxy(self, network):
|
||||||
if (self.conf.enable_metadata_network and
|
if (self.conf.enable_metadata_network and
|
||||||
network.id in self._metadata_routers):
|
network.id in self._metadata_routers):
|
||||||
uuid = self._metadata_routers[network.id]
|
uuid = self._metadata_routers[network.id]
|
||||||
is_router_id = True
|
is_router_id = True
|
||||||
else:
|
else:
|
||||||
|
@ -456,7 +456,7 @@ class L3NATAgent(ha.AgentMixin,
|
|||||||
|
|
||||||
def _process_router_if_compatible(self, router):
|
def _process_router_if_compatible(self, router):
|
||||||
if (self.conf.external_network_bridge and
|
if (self.conf.external_network_bridge and
|
||||||
not ip_lib.device_exists(self.conf.external_network_bridge)):
|
not ip_lib.device_exists(self.conf.external_network_bridge)):
|
||||||
LOG.error("The external network bridge '%s' does not exist",
|
LOG.error("The external network bridge '%s' does not exist",
|
||||||
self.conf.external_network_bridge)
|
self.conf.external_network_bridge)
|
||||||
return
|
return
|
||||||
|
@ -309,8 +309,8 @@ class FipNamespace(namespaces.Namespace):
|
|||||||
|
|
||||||
def _update_gateway_port(self, agent_gateway_port, interface_name):
|
def _update_gateway_port(self, agent_gateway_port, interface_name):
|
||||||
if (self.agent_gateway_port and
|
if (self.agent_gateway_port and
|
||||||
not self._check_for_gateway_ip_change(agent_gateway_port)):
|
not self._check_for_gateway_ip_change(agent_gateway_port)):
|
||||||
return
|
return
|
||||||
# Caller already holding lock
|
# Caller already holding lock
|
||||||
self._update_gateway_route(
|
self._update_gateway_route(
|
||||||
agent_gateway_port, interface_name, tbl_index=None)
|
agent_gateway_port, interface_name, tbl_index=None)
|
||||||
|
@ -625,8 +625,8 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
|
|||||||
if int_port_addr_value is None:
|
if int_port_addr_value is None:
|
||||||
return False
|
return False
|
||||||
if ((key != lib_constants.IP_VERSION_6) and
|
if ((key != lib_constants.IP_VERSION_6) and
|
||||||
int_port_addr_scopes.get(str(key)) in
|
int_port_addr_scopes.get(str(key)) in
|
||||||
ext_port_addr_scopes.values()):
|
ext_port_addr_scopes.values()):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -813,7 +813,7 @@ class Dnsmasq(DhcpLocalProcess):
|
|||||||
(iaid, ip, client_id) = parts[1], parts[2], parts[4]
|
(iaid, ip, client_id) = parts[1], parts[2], parts[4]
|
||||||
ip = ip.strip('[]')
|
ip = ip.strip('[]')
|
||||||
if (ip_version and
|
if (ip_version and
|
||||||
netaddr.IPAddress(ip).version != ip_version):
|
netaddr.IPAddress(ip).version != ip_version):
|
||||||
continue
|
continue
|
||||||
leases[ip] = {'iaid': iaid,
|
leases[ip] = {'iaid': iaid,
|
||||||
'client_id': client_id,
|
'client_id': client_id,
|
||||||
@ -1122,7 +1122,8 @@ class Dnsmasq(DhcpLocalProcess):
|
|||||||
continue
|
continue
|
||||||
for alloc in port.fixed_ips:
|
for alloc in port.fixed_ips:
|
||||||
if (alloc.subnet_id in subnets and
|
if (alloc.subnet_id in subnets and
|
||||||
subnets[alloc.subnet_id].gateway_ip == alloc.ip_address):
|
subnets[alloc.subnet_id].gateway_ip ==
|
||||||
|
alloc.ip_address):
|
||||||
isolated_subnets[alloc.subnet_id] = False
|
isolated_subnets[alloc.subnet_id] = False
|
||||||
|
|
||||||
return isolated_subnets
|
return isolated_subnets
|
||||||
@ -1165,7 +1166,7 @@ class Dnsmasq(DhcpLocalProcess):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
if (conf.enable_metadata_network and
|
if (conf.enable_metadata_network and
|
||||||
cls.has_metadata_subnet(all_subnets)):
|
cls.has_metadata_subnet(all_subnets)):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
isolated_subnets = cls.get_isolated_subnets(network)
|
isolated_subnets = cls.get_isolated_subnets(network)
|
||||||
|
@ -499,7 +499,7 @@ class IptablesManager(object):
|
|||||||
|
|
||||||
err = self._do_run_restore(args, commands)
|
err = self._do_run_restore(args, commands)
|
||||||
if (isinstance(err, linux_utils.ProcessExecutionError) and
|
if (isinstance(err, linux_utils.ProcessExecutionError) and
|
||||||
err.returncode == XTABLES_RESOURCE_PROBLEM_CODE):
|
err.returncode == XTABLES_RESOURCE_PROBLEM_CODE):
|
||||||
# maybe we run on a platform that includes iptables commit
|
# maybe we run on a platform that includes iptables commit
|
||||||
# 999eaa241212d3952ddff39a99d0d55a74e3639e (for example, latest
|
# 999eaa241212d3952ddff39a99d0d55a74e3639e (for example, latest
|
||||||
# RHEL) and failed because of xlock acquired by another
|
# RHEL) and failed because of xlock acquired by another
|
||||||
|
@ -364,7 +364,7 @@ class ConjIPFlowManager(object):
|
|||||||
for sg_conj_id_map in vlan_conj_id_map.values():
|
for sg_conj_id_map in vlan_conj_id_map.values():
|
||||||
for remote_sg_id, unused in unused_dict.items():
|
for remote_sg_id, unused in unused_dict.items():
|
||||||
if (remote_sg_id in sg_conj_id_map and
|
if (remote_sg_id in sg_conj_id_map and
|
||||||
sg_conj_id_map[remote_sg_id] & unused):
|
sg_conj_id_map[remote_sg_id] & unused):
|
||||||
sg_conj_id_map[remote_sg_id] -= unused
|
sg_conj_id_map[remote_sg_id] -= unused
|
||||||
if not sg_conj_id_map[remote_sg_id]:
|
if not sg_conj_id_map[remote_sg_id]:
|
||||||
del sg_conj_id_map[remote_sg_id]
|
del sg_conj_id_map[remote_sg_id]
|
||||||
|
@ -142,7 +142,7 @@ def merge_port_ranges(rule_conj_list):
|
|||||||
cur_conj.remove(conj_id)
|
cur_conj.remove(conj_id)
|
||||||
|
|
||||||
if (len(result) == 1 and result[0][0]['port_range_min'] == 1 and
|
if (len(result) == 1 and result[0][0]['port_range_min'] == 1 and
|
||||||
result[0][0]['port_range_max'] == 65535):
|
result[0][0]['port_range_max'] == 65535):
|
||||||
del result[0][0]['port_range_min']
|
del result[0][0]['port_range_min']
|
||||||
del result[0][0]['port_range_max']
|
del result[0][0]['port_range_max']
|
||||||
return result
|
return result
|
||||||
|
@ -62,7 +62,7 @@ class XenAPIClient(object):
|
|||||||
# [XENAPI_PLUGIN_FAILURE_ID, methodname, except_class_name, message]
|
# [XENAPI_PLUGIN_FAILURE_ID, methodname, except_class_name, message]
|
||||||
# We can distinguish the error type by checking the message string.
|
# We can distinguish the error type by checking the message string.
|
||||||
if (len(failure_details) == 4 and
|
if (len(failure_details) == 4 and
|
||||||
XENAPI_PLUGIN_FAILURE_ID == failure_details[0]):
|
XENAPI_PLUGIN_FAILURE_ID == failure_details[0]):
|
||||||
if (MSG_UNAUTHORIZED == failure_details[3]):
|
if (MSG_UNAUTHORIZED == failure_details[3]):
|
||||||
return oslo_rootwrap_cmd.RC_UNAUTHORIZED
|
return oslo_rootwrap_cmd.RC_UNAUTHORIZED
|
||||||
elif (MSG_NOT_FOUND == failure_details[3]):
|
elif (MSG_NOT_FOUND == failure_details[3]):
|
||||||
|
@ -113,7 +113,7 @@ class SecurityGroupAgentRpc(object):
|
|||||||
@functools.wraps(func)
|
@functools.wraps(func)
|
||||||
def decorated_function(self, *args, **kwargs):
|
def decorated_function(self, *args, **kwargs):
|
||||||
if (isinstance(self.firewall, firewall.NoopFirewallDriver) or
|
if (isinstance(self.firewall, firewall.NoopFirewallDriver) or
|
||||||
not is_firewall_enabled()):
|
not is_firewall_enabled()):
|
||||||
LOG.info("Skipping method %s as firewall is disabled "
|
LOG.info("Skipping method %s as firewall is disabled "
|
||||||
"or configured as NoopFirewallDriver.",
|
"or configured as NoopFirewallDriver.",
|
||||||
func.__name__)
|
func.__name__)
|
||||||
|
@ -147,7 +147,7 @@ def get_limit_and_marker(request):
|
|||||||
def _get_pagination_max_limit():
|
def _get_pagination_max_limit():
|
||||||
max_limit = -1
|
max_limit = -1
|
||||||
if (cfg.CONF.pagination_max_limit.lower() !=
|
if (cfg.CONF.pagination_max_limit.lower() !=
|
||||||
constants.PAGINATION_INFINITE):
|
constants.PAGINATION_INFINITE):
|
||||||
try:
|
try:
|
||||||
max_limit = int(cfg.CONF.pagination_max_limit)
|
max_limit = int(cfg.CONF.pagination_max_limit)
|
||||||
if max_limit == 0:
|
if max_limit == 0:
|
||||||
|
@ -51,7 +51,7 @@ def pull(resource_type, resource_id, **kwargs):
|
|||||||
obj = callback(resource_type, resource_id, **kwargs)
|
obj = callback(resource_type, resource_id, **kwargs)
|
||||||
if obj:
|
if obj:
|
||||||
if (not isinstance(obj, base.NeutronObject) or
|
if (not isinstance(obj, base.NeutronObject) or
|
||||||
resource_type != obj.obj_name()):
|
resource_type != obj.obj_name()):
|
||||||
raise exceptions.CallbackWrongResourceType(
|
raise exceptions.CallbackWrongResourceType(
|
||||||
resource_type=resource_type)
|
resource_type=resource_type)
|
||||||
return obj
|
return obj
|
||||||
|
@ -169,11 +169,12 @@ class Controller(object):
|
|||||||
attr_data = self._attr_info.get(attr_name)
|
attr_data = self._attr_info.get(attr_name)
|
||||||
if attr_data and attr_data['is_visible']:
|
if attr_data and attr_data['is_visible']:
|
||||||
if policy.check(
|
if policy.check(
|
||||||
context,
|
context,
|
||||||
'%s:%s' % (self._plugin_handlers[self.SHOW], attr_name),
|
'%s:%s' % (self._plugin_handlers[self.SHOW],
|
||||||
data,
|
attr_name),
|
||||||
might_not_exist=True,
|
data,
|
||||||
pluralized=self._collection):
|
might_not_exist=True,
|
||||||
|
pluralized=self._collection):
|
||||||
# this attribute is visible, check next one
|
# this attribute is visible, check next one
|
||||||
continue
|
continue
|
||||||
# if the code reaches this point then either the policy check
|
# if the code reaches this point then either the policy check
|
||||||
|
@ -375,7 +375,7 @@ def enable_tests_from_config():
|
|||||||
if cfg.CONF.SECURITYGROUP.enable_security_group:
|
if cfg.CONF.SECURITYGROUP.enable_security_group:
|
||||||
cfg.CONF.set_default('ip6tables_installed', True)
|
cfg.CONF.set_default('ip6tables_installed', True)
|
||||||
if ('sriovnicswitch' in cfg.CONF.ml2.mechanism_drivers and
|
if ('sriovnicswitch' in cfg.CONF.ml2.mechanism_drivers and
|
||||||
'qos' in cfg.CONF.ml2.extension_drivers):
|
'qos' in cfg.CONF.ml2.extension_drivers):
|
||||||
cfg.CONF.set_default('vf_extended_management', True)
|
cfg.CONF.set_default('vf_extended_management', True)
|
||||||
if cfg.CONF.SECURITYGROUP.firewall_driver in (
|
if cfg.CONF.SECURITYGROUP.firewall_driver in (
|
||||||
'iptables',
|
'iptables',
|
||||||
|
@ -73,8 +73,9 @@ class AgentSchedulerDbMixin(agents_db.AgentDbMixin):
|
|||||||
agent_data = agent['agent']
|
agent_data = agent['agent']
|
||||||
agent_notifier = self.agent_notifiers.get(original_agent['agent_type'])
|
agent_notifier = self.agent_notifiers.get(original_agent['agent_type'])
|
||||||
if (agent_notifier and
|
if (agent_notifier and
|
||||||
'admin_state_up' in agent_data and
|
'admin_state_up' in agent_data and
|
||||||
original_agent['admin_state_up'] != agent_data['admin_state_up']):
|
original_agent['admin_state_up'] !=
|
||||||
|
agent_data['admin_state_up']):
|
||||||
agent_notifier.agent_updated(context,
|
agent_notifier.agent_updated(context,
|
||||||
agent_data['admin_state_up'],
|
agent_data['admin_state_up'],
|
||||||
original_agent['host'])
|
original_agent['host'])
|
||||||
|
@ -278,7 +278,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
|||||||
# raise if multiple tenants found or if the only tenant found
|
# raise if multiple tenants found or if the only tenant found
|
||||||
# is not the owner of the network
|
# is not the owner of the network
|
||||||
if (len(tenant_ids) > 1 or len(tenant_ids) == 1 and
|
if (len(tenant_ids) > 1 or len(tenant_ids) == 1 and
|
||||||
original.tenant_id not in tenant_ids):
|
original.tenant_id not in tenant_ids):
|
||||||
self._validate_projects_have_access_to_network(
|
self._validate_projects_have_access_to_network(
|
||||||
original, tenant_ids)
|
original, tenant_ids)
|
||||||
|
|
||||||
@ -570,7 +570,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
|||||||
error_message = _("Subnet has a prefix length that is "
|
error_message = _("Subnet has a prefix length that is "
|
||||||
"incompatible with DHCP service enabled")
|
"incompatible with DHCP service enabled")
|
||||||
if ((ip_ver == 4 and subnet_prefixlen > 30) or
|
if ((ip_ver == 4 and subnet_prefixlen > 30) or
|
||||||
(ip_ver == 6 and subnet_prefixlen > 126)):
|
(ip_ver == 6 and subnet_prefixlen > 126)):
|
||||||
raise exc.InvalidInput(error_message=error_message)
|
raise exc.InvalidInput(error_message=error_message)
|
||||||
|
|
||||||
net = netaddr.IPNetwork(s['cidr'])
|
net = netaddr.IPNetwork(s['cidr'])
|
||||||
@ -937,7 +937,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
|||||||
for ip in port['fixed_ips']:
|
for ip in port['fixed_ips']:
|
||||||
if ip['subnet_id'] == result['id']:
|
if ip['subnet_id'] == result['id']:
|
||||||
if (port['device_owner'] in
|
if (port['device_owner'] in
|
||||||
constants.ROUTER_INTERFACE_OWNERS):
|
constants.ROUTER_INTERFACE_OWNERS):
|
||||||
routers.append(port['device_id'])
|
routers.append(port['device_id'])
|
||||||
ip['ip_address'] = result['gateway_ip']
|
ip['ip_address'] = result['gateway_ip']
|
||||||
else:
|
else:
|
||||||
@ -1244,7 +1244,8 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
|||||||
|
|
||||||
def _check_mac_addr_update(self, context, port, new_mac, device_owner):
|
def _check_mac_addr_update(self, context, port, new_mac, device_owner):
|
||||||
if (device_owner and
|
if (device_owner and
|
||||||
device_owner.startswith(constants.DEVICE_OWNER_NETWORK_PREFIX)):
|
device_owner.startswith(
|
||||||
|
constants.DEVICE_OWNER_NETWORK_PREFIX)):
|
||||||
raise n_exc.UnsupportedPortDeviceOwner(
|
raise n_exc.UnsupportedPortDeviceOwner(
|
||||||
op=_("mac address update"), port_id=id,
|
op=_("mac address update"), port_id=id,
|
||||||
device_owner=device_owner)
|
device_owner=device_owner)
|
||||||
|
@ -134,7 +134,7 @@ class DNSDbMixin(object):
|
|||||||
context, floatingip_data))
|
context, floatingip_data))
|
||||||
if dns_data_db:
|
if dns_data_db:
|
||||||
if (dns_data_db['published_dns_name'] != current_dns_name or
|
if (dns_data_db['published_dns_name'] != current_dns_name or
|
||||||
dns_data_db['published_dns_domain'] != current_dns_domain):
|
dns_data_db['published_dns_domain'] != current_dns_domain):
|
||||||
dns_actions_data = DNSActionsData(
|
dns_actions_data = DNSActionsData(
|
||||||
previous_dns_name=dns_data_db['published_dns_name'],
|
previous_dns_name=dns_data_db['published_dns_name'],
|
||||||
previous_dns_domain=dns_data_db['published_dns_domain'])
|
previous_dns_domain=dns_data_db['published_dns_domain'])
|
||||||
|
@ -238,7 +238,7 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||||||
subnet_list = self._get_subnets(context)
|
subnet_list = self._get_subnets(context)
|
||||||
for subnet in subnet_list:
|
for subnet in subnet_list:
|
||||||
if ((netaddr.IPSet([subnet.cidr]) & new_subnet_ipset) and
|
if ((netaddr.IPSet([subnet.cidr]) & new_subnet_ipset) and
|
||||||
str(subnet.cidr) != const.PROVISIONAL_IPV6_PD_PREFIX):
|
str(subnet.cidr) != const.PROVISIONAL_IPV6_PD_PREFIX):
|
||||||
# don't give out details of the overlapping subnet
|
# don't give out details of the overlapping subnet
|
||||||
err_msg = ("Requested subnet with cidr: %(cidr)s for "
|
err_msg = ("Requested subnet with cidr: %(cidr)s for "
|
||||||
"network: %(network_id)s overlaps with another "
|
"network: %(network_id)s overlaps with another "
|
||||||
|
@ -99,7 +99,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
|||||||
raise l3agentscheduler.DVRL3CannotAssignToDvrAgent()
|
raise l3agentscheduler.DVRL3CannotAssignToDvrAgent()
|
||||||
|
|
||||||
if (agent_mode == constants.L3_AGENT_MODE_LEGACY and
|
if (agent_mode == constants.L3_AGENT_MODE_LEGACY and
|
||||||
router.get('distributed')):
|
router.get('distributed')):
|
||||||
raise l3agentscheduler.RouterL3AgentMismatch(
|
raise l3agentscheduler.RouterL3AgentMismatch(
|
||||||
router_id=router['id'], agent_id=agent['id'])
|
router_id=router['id'], agent_id=agent['id'])
|
||||||
|
|
||||||
|
@ -333,7 +333,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||||||
if (not extensions.is_extension_supported(
|
if (not extensions.is_extension_supported(
|
||||||
l3_plugin,
|
l3_plugin,
|
||||||
constants.L3_AGENT_SCHEDULER_EXT_ALIAS) or
|
constants.L3_AGENT_SCHEDULER_EXT_ALIAS) or
|
||||||
l3_plugin.router_scheduler is None):
|
l3_plugin.router_scheduler is None):
|
||||||
# that might mean that we are dealing with non-agent-based
|
# that might mean that we are dealing with non-agent-based
|
||||||
# implementation of l3 services
|
# implementation of l3 services
|
||||||
return
|
return
|
||||||
@ -801,7 +801,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||||||
'subnet_id': subnet['id']}
|
'subnet_id': subnet['id']}
|
||||||
|
|
||||||
if (subnet['ip_version'] == 6 and not
|
if (subnet['ip_version'] == 6 and not
|
||||||
ipv6_utils.is_ipv6_pd_enabled(subnet)):
|
ipv6_utils.is_ipv6_pd_enabled(subnet)):
|
||||||
# Add new prefix to an existing ipv6 port with the same network id
|
# Add new prefix to an existing ipv6 port with the same network id
|
||||||
# if one exists
|
# if one exists
|
||||||
port = self._find_ipv6_router_port_by_network(context, router,
|
port = self._find_ipv6_router_port_by_network(context, router,
|
||||||
|
@ -361,7 +361,7 @@ class DVRResourceOperationHandler(object):
|
|||||||
# dvr service ports except for the compute port and
|
# dvr service ports except for the compute port and
|
||||||
# dhcp port.
|
# dhcp port.
|
||||||
if (port['device_owner'] == "" or
|
if (port['device_owner'] == "" or
|
||||||
port['device_owner'] in allowed_device_owners):
|
port['device_owner'] in allowed_device_owners):
|
||||||
addr_pair_active_service_port_list = (
|
addr_pair_active_service_port_list = (
|
||||||
self._get_ports_for_allowed_address_pair_ip(
|
self._get_ports_for_allowed_address_pair_ip(
|
||||||
admin_ctx, port['network_id'],
|
admin_ctx, port['network_id'],
|
||||||
@ -458,8 +458,8 @@ class DVRResourceOperationHandler(object):
|
|||||||
for port in router.attached_ports:
|
for port in router.attached_ports:
|
||||||
p = port['port']
|
p = port['port']
|
||||||
if (p['network_id'] == net_id and
|
if (p['network_id'] == net_id and
|
||||||
p['device_owner'] == device_owner and
|
p['device_owner'] == device_owner and
|
||||||
self.l3plugin._port_has_ipv6_address(p)):
|
self.l3plugin._port_has_ipv6_address(p)):
|
||||||
return self.l3plugin._core_plugin._make_port_dict(p)
|
return self.l3plugin._core_plugin._make_port_dict(p)
|
||||||
|
|
||||||
def _check_for_multiprefix_csnat_port_and_update(
|
def _check_for_multiprefix_csnat_port_and_update(
|
||||||
@ -678,7 +678,7 @@ class _DVRAgentInterfaceMixin(object):
|
|||||||
# Skip if it is bound, but not to the given host
|
# Skip if it is bound, but not to the given host
|
||||||
fip_dest_host = floating_ip.get('dest_host')
|
fip_dest_host = floating_ip.get('dest_host')
|
||||||
if (fip_host != l3_const.FLOATING_IP_HOST_NEEDS_BINDING and
|
if (fip_host != l3_const.FLOATING_IP_HOST_NEEDS_BINDING and
|
||||||
fip_host != host and fip_dest_host is None):
|
fip_host != host and fip_dest_host is None):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# not being skipped, log host
|
# not being skipped, log host
|
||||||
@ -721,8 +721,7 @@ class _DVRAgentInterfaceMixin(object):
|
|||||||
# All unbound ports with floatingip irrespective of
|
# All unbound ports with floatingip irrespective of
|
||||||
# the device owner should be included as valid ports
|
# the device owner should be included as valid ports
|
||||||
# and updated.
|
# and updated.
|
||||||
if (port_in_migration or
|
if port_in_migration or self._is_unbound_port(port):
|
||||||
self._is_unbound_port(port)):
|
|
||||||
port_dict.update({port['id']: port})
|
port_dict.update({port['id']: port})
|
||||||
continue
|
continue
|
||||||
port_host = port[portbindings.HOST_ID]
|
port_host = port[portbindings.HOST_ID]
|
||||||
|
@ -63,8 +63,8 @@ def _notify_l3_agent_ha_port_update(resource, event, trigger, **kwargs):
|
|||||||
if new_port and original_port and host:
|
if new_port and original_port and host:
|
||||||
new_device_owner = new_port.get('device_owner', '')
|
new_device_owner = new_port.get('device_owner', '')
|
||||||
if (new_device_owner == constants.DEVICE_OWNER_ROUTER_HA_INTF and
|
if (new_device_owner == constants.DEVICE_OWNER_ROUTER_HA_INTF and
|
||||||
new_port['status'] == constants.PORT_STATUS_ACTIVE and
|
new_port['status'] == constants.PORT_STATUS_ACTIVE and
|
||||||
original_port['status'] != new_port['status']):
|
original_port['status'] != new_port['status']):
|
||||||
l3plugin = directory.get_plugin(plugin_constants.L3)
|
l3plugin = directory.get_plugin(plugin_constants.L3)
|
||||||
l3plugin.l3_rpc_notifier.routers_updated_on_host(
|
l3plugin.l3_rpc_notifier.routers_updated_on_host(
|
||||||
context, [new_port['device_id']], host)
|
context, [new_port['device_id']], host)
|
||||||
|
@ -122,7 +122,7 @@ def do_upgrade(config, cmd):
|
|||||||
branch = None
|
branch = None
|
||||||
|
|
||||||
if ((CONF.command.revision or CONF.command.delta) and
|
if ((CONF.command.revision or CONF.command.delta) and
|
||||||
(CONF.command.expand or CONF.command.contract)):
|
(CONF.command.expand or CONF.command.contract)):
|
||||||
raise SystemExit(_(
|
raise SystemExit(_(
|
||||||
'Phase upgrade options do not accept revision specification'))
|
'Phase upgrade options do not accept revision specification'))
|
||||||
|
|
||||||
|
@ -440,8 +440,7 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
|
|||||||
|
|
||||||
def _validate_port_range(self, rule):
|
def _validate_port_range(self, rule):
|
||||||
"""Check that port_range is valid."""
|
"""Check that port_range is valid."""
|
||||||
if (rule['port_range_min'] is None and
|
if rule['port_range_min'] is None and rule['port_range_max'] is None:
|
||||||
rule['port_range_max'] is None):
|
|
||||||
return
|
return
|
||||||
if not rule['protocol']:
|
if not rule['protocol']:
|
||||||
raise ext_sg.SecurityGroupProtocolRequiredWithPorts()
|
raise ext_sg.SecurityGroupProtocolRequiredWithPorts()
|
||||||
@ -474,8 +473,8 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
|
|||||||
# Only the protocols above support port ranges, raise otherwise.
|
# Only the protocols above support port ranges, raise otherwise.
|
||||||
# When min/max are the same it is just a single port.
|
# When min/max are the same it is just a single port.
|
||||||
if (rule['port_range_min'] is not None and
|
if (rule['port_range_min'] is not None and
|
||||||
rule['port_range_max'] is not None and
|
rule['port_range_max'] is not None and
|
||||||
rule['port_range_min'] != rule['port_range_max']):
|
rule['port_range_min'] != rule['port_range_max']):
|
||||||
raise ext_sg.SecurityGroupInvalidProtocolForPortRange(
|
raise ext_sg.SecurityGroupInvalidProtocolForPortRange(
|
||||||
protocol=ip_proto)
|
protocol=ip_proto)
|
||||||
|
|
||||||
|
@ -117,7 +117,7 @@ class SecurityGroupServerNotifierRpcMixin(sg_db.SecurityGroupDbMixin):
|
|||||||
# since it includes the legacy router interface device owners
|
# since it includes the legacy router interface device owners
|
||||||
# and DVR router interface device owners.
|
# and DVR router interface device owners.
|
||||||
if (port['device_owner'] not in
|
if (port['device_owner'] not in
|
||||||
[const.DEVICE_OWNER_DHCP, const.ROUTER_INTERFACE_OWNERS]):
|
[const.DEVICE_OWNER_DHCP, const.ROUTER_INTERFACE_OWNERS]):
|
||||||
sec_groups |= set(port.get(ext_sg.SECURITYGROUPS))
|
sec_groups |= set(port.get(ext_sg.SECURITYGROUPS))
|
||||||
|
|
||||||
if sec_groups:
|
if sec_groups:
|
||||||
@ -172,7 +172,7 @@ class SecurityGroupInfoAPIMixin(object):
|
|||||||
security_group_id = rule_in_db.get('security_group_id')
|
security_group_id = rule_in_db.get('security_group_id')
|
||||||
ethertype = rule_in_db['ethertype']
|
ethertype = rule_in_db['ethertype']
|
||||||
if ('security_group_source_groups'
|
if ('security_group_source_groups'
|
||||||
not in sg_info['devices'][port_id]):
|
not in sg_info['devices'][port_id]):
|
||||||
sg_info['devices'][port_id][
|
sg_info['devices'][port_id][
|
||||||
'security_group_source_groups'] = []
|
'security_group_source_groups'] = []
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ def validate_post_plugin_load():
|
|||||||
message. If all is OK then it will return None.
|
message. If all is OK then it will return None.
|
||||||
"""
|
"""
|
||||||
if ('dhcp_agents_per_network' in cfg.CONF and
|
if ('dhcp_agents_per_network' in cfg.CONF and
|
||||||
cfg.CONF.dhcp_agents_per_network <= 0):
|
cfg.CONF.dhcp_agents_per_network <= 0):
|
||||||
msg = _("dhcp_agents_per_network must be >= 1. '%s' "
|
msg = _("dhcp_agents_per_network must be >= 1. '%s' "
|
||||||
"is invalid.") % cfg.CONF.dhcp_agents_per_network
|
"is invalid.") % cfg.CONF.dhcp_agents_per_network
|
||||||
return msg
|
return msg
|
||||||
|
@ -134,7 +134,7 @@ class QosPolicy(rbac_db.NeutronRbacObject):
|
|||||||
policy_obj = super(QosPolicy, cls).get_object(admin_context,
|
policy_obj = super(QosPolicy, cls).get_object(admin_context,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
if (not policy_obj or
|
if (not policy_obj or
|
||||||
not cls.is_accessible(context, policy_obj)):
|
not cls.is_accessible(context, policy_obj)):
|
||||||
return
|
return
|
||||||
|
|
||||||
policy_obj.obj_load_attr('rules')
|
policy_obj.obj_load_attr('rules')
|
||||||
|
@ -234,13 +234,13 @@ class PolicyHook(hooks.PecanHook):
|
|||||||
attr_data = controller.resource_info.get(attr_name)
|
attr_data = controller.resource_info.get(attr_name)
|
||||||
if attr_data and attr_data['is_visible']:
|
if attr_data and attr_data['is_visible']:
|
||||||
if policy.check(
|
if policy.check(
|
||||||
context,
|
context,
|
||||||
# NOTE(kevinbenton): this used to reference a
|
# NOTE(kevinbenton): this used to reference a
|
||||||
# _plugin_handlers dict, why?
|
# _plugin_handlers dict, why?
|
||||||
'get_%s:%s' % (resource, attr_name),
|
'get_%s:%s' % (resource, attr_name),
|
||||||
data,
|
data,
|
||||||
might_not_exist=True,
|
might_not_exist=True,
|
||||||
pluralized=collection):
|
pluralized=collection):
|
||||||
# this attribute is visible, check next one
|
# this attribute is visible, check next one
|
||||||
continue
|
continue
|
||||||
# if the code reaches this point then either the policy check
|
# if the code reaches this point then either the policy check
|
||||||
|
@ -141,7 +141,7 @@ class L2populationMechanismDriver(api.MechanismDriver):
|
|||||||
orig = context.original
|
orig = context.original
|
||||||
|
|
||||||
if (orig['mac_address'] != port['mac_address'] and
|
if (orig['mac_address'] != port['mac_address'] and
|
||||||
context.status == const.PORT_STATUS_ACTIVE):
|
context.status == const.PORT_STATUS_ACTIVE):
|
||||||
msg = _("unable to modify mac_address of ACTIVE port "
|
msg = _("unable to modify mac_address of ACTIVE port "
|
||||||
"%s") % port['id']
|
"%s") % port['id']
|
||||||
raise exceptions.InvalidInput(error_message=msg)
|
raise exceptions.InvalidInput(error_message=msg)
|
||||||
|
@ -196,7 +196,7 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
# prefix = mix_interface.1 (backward compatible)
|
# prefix = mix_interface.1 (backward compatible)
|
||||||
# prefix = mix_iHASHED.1111
|
# prefix = mix_iHASHED.1111
|
||||||
if (len(physical_interface) + len(vlan_postfix) >
|
if (len(physical_interface) + len(vlan_postfix) >
|
||||||
constants.DEVICE_NAME_MAX_LEN):
|
constants.DEVICE_NAME_MAX_LEN):
|
||||||
physical_interface = plugin_utils.get_interface_name(
|
physical_interface = plugin_utils.get_interface_name(
|
||||||
physical_interface, max_len=(constants.DEVICE_NAME_MAX_LEN -
|
physical_interface, max_len=(constants.DEVICE_NAME_MAX_LEN -
|
||||||
MAX_VLAN_POSTFIX_LEN))
|
MAX_VLAN_POSTFIX_LEN))
|
||||||
|
@ -82,7 +82,7 @@ class SriovNicSwitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase):
|
|||||||
if profile:
|
if profile:
|
||||||
capabilities = profile.get('capabilities', [])
|
capabilities = profile.get('capabilities', [])
|
||||||
if (vnic_type == portbindings.VNIC_DIRECT and
|
if (vnic_type == portbindings.VNIC_DIRECT and
|
||||||
'switchdev' in capabilities):
|
'switchdev' in capabilities):
|
||||||
LOG.debug("Refusing to bind due to unsupported vnic_type: %s "
|
LOG.debug("Refusing to bind due to unsupported vnic_type: %s "
|
||||||
"with switchdev capability", portbindings.VNIC_DIRECT)
|
"with switchdev capability", portbindings.VNIC_DIRECT)
|
||||||
return
|
return
|
||||||
|
@ -1425,7 +1425,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
|
|||||||
def _setup_tunnel_port(self, br, port_name, remote_ip, tunnel_type):
|
def _setup_tunnel_port(self, br, port_name, remote_ip, tunnel_type):
|
||||||
try:
|
try:
|
||||||
if (netaddr.IPAddress(self.local_ip).version !=
|
if (netaddr.IPAddress(self.local_ip).version !=
|
||||||
netaddr.IPAddress(remote_ip).version):
|
netaddr.IPAddress(remote_ip).version):
|
||||||
LOG.error("IP version mismatch, cannot create tunnel: "
|
LOG.error("IP version mismatch, cannot create tunnel: "
|
||||||
"local_ip=%(lip)s remote_ip=%(rip)s",
|
"local_ip=%(lip)s remote_ip=%(rip)s",
|
||||||
{'lip': self.local_ip, 'rip': remote_ip})
|
{'lip': self.local_ip, 'rip': remote_ip})
|
||||||
@ -2065,8 +2065,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
|
|||||||
# Secure and wire/unwire VIFs and update their status
|
# Secure and wire/unwire VIFs and update their status
|
||||||
# on Neutron server
|
# on Neutron server
|
||||||
if (self._port_info_has_changes(port_info) or
|
if (self._port_info_has_changes(port_info) or
|
||||||
self.sg_agent.firewall_refresh_needed() or
|
self.sg_agent.firewall_refresh_needed() or
|
||||||
ovs_restarted):
|
ovs_restarted):
|
||||||
LOG.debug("Starting to process devices in:%s",
|
LOG.debug("Starting to process devices in:%s",
|
||||||
port_info)
|
port_info)
|
||||||
failed_devices = self.process_network_ports(
|
failed_devices = self.process_network_ports(
|
||||||
@ -2153,7 +2153,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
|
|||||||
|
|
||||||
def _check_agent_configurations(self):
|
def _check_agent_configurations(self):
|
||||||
if (self.enable_distributed_routing and self.enable_tunneling and
|
if (self.enable_distributed_routing and self.enable_tunneling and
|
||||||
not self.l2_pop):
|
not self.l2_pop):
|
||||||
|
|
||||||
raise ValueError(_("DVR deployments for VXLAN/GRE/Geneve "
|
raise ValueError(_("DVR deployments for VXLAN/GRE/Geneve "
|
||||||
"underlays require L2-pop to be enabled, "
|
"underlays require L2-pop to be enabled, "
|
||||||
|
@ -85,7 +85,7 @@ class OpenvswitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase):
|
|||||||
if profile:
|
if profile:
|
||||||
capabilities = profile.get('capabilities', [])
|
capabilities = profile.get('capabilities', [])
|
||||||
if (vnic_type == portbindings.VNIC_DIRECT and
|
if (vnic_type == portbindings.VNIC_DIRECT and
|
||||||
'switchdev' not in capabilities):
|
'switchdev' not in capabilities):
|
||||||
LOG.debug("Refusing to bind due to unsupported vnic_type: %s with "
|
LOG.debug("Refusing to bind due to unsupported vnic_type: %s with "
|
||||||
"no switchdev capability", portbindings.VNIC_DIRECT)
|
"no switchdev capability", portbindings.VNIC_DIRECT)
|
||||||
return
|
return
|
||||||
@ -96,8 +96,8 @@ class OpenvswitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase):
|
|||||||
if (any(x in caps.get('iface_types', []) for x
|
if (any(x in caps.get('iface_types', []) for x
|
||||||
in [a_const.OVS_DPDK_VHOST_USER,
|
in [a_const.OVS_DPDK_VHOST_USER,
|
||||||
a_const.OVS_DPDK_VHOST_USER_CLIENT]) and
|
a_const.OVS_DPDK_VHOST_USER_CLIENT]) and
|
||||||
agent['configurations'].get('datapath_type') ==
|
agent['configurations'].get('datapath_type') ==
|
||||||
a_const.OVS_DATAPATH_NETDEV):
|
a_const.OVS_DATAPATH_NETDEV):
|
||||||
return portbindings.VIF_TYPE_VHOST_USER
|
return portbindings.VIF_TYPE_VHOST_USER
|
||||||
return self.vif_type
|
return self.vif_type
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ class DNSExtensionDriver(api.ExtensionDriver):
|
|||||||
is_dns_domain_changed = (dns_domain is not None and
|
is_dns_domain_changed = (dns_domain is not None and
|
||||||
dns_data_db[dns_apidef.DNSDOMAIN] != dns_domain)
|
dns_data_db[dns_apidef.DNSDOMAIN] != dns_domain)
|
||||||
if (is_dns_name_changed or is_dns_domain_changed or
|
if (is_dns_name_changed or is_dns_domain_changed or
|
||||||
(has_fixed_ips and dns_data_db['current_dns_name'])):
|
(has_fixed_ips and dns_data_db['current_dns_name'])):
|
||||||
dns_data_db = self._populate_previous_external_dns_data(
|
dns_data_db = self._populate_previous_external_dns_data(
|
||||||
dns_data_db)
|
dns_data_db)
|
||||||
dns_data_db = self._populate_current_external_dns_data(
|
dns_data_db = self._populate_current_external_dns_data(
|
||||||
|
@ -329,14 +329,13 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||||||
host = attrs.get(portbindings.HOST_ID) or ''
|
host = attrs.get(portbindings.HOST_ID) or ''
|
||||||
|
|
||||||
original_host = binding.host
|
original_host = binding.host
|
||||||
if (validators.is_attr_set(host) and
|
if validators.is_attr_set(host) and original_host != host:
|
||||||
original_host != host):
|
|
||||||
binding.host = host
|
binding.host = host
|
||||||
changes = True
|
changes = True
|
||||||
|
|
||||||
vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE)
|
vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE)
|
||||||
if (validators.is_attr_set(vnic_type) and
|
if (validators.is_attr_set(vnic_type) and
|
||||||
binding.vnic_type != vnic_type):
|
binding.vnic_type != vnic_type):
|
||||||
binding.vnic_type = vnic_type
|
binding.vnic_type = vnic_type
|
||||||
changes = True
|
changes = True
|
||||||
|
|
||||||
@ -564,7 +563,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||||||
|
|
||||||
# Update the port status if requested by the bound driver.
|
# Update the port status if requested by the bound driver.
|
||||||
if (bind_context._binding_levels and
|
if (bind_context._binding_levels and
|
||||||
bind_context._new_port_status):
|
bind_context._new_port_status):
|
||||||
port_db.status = bind_context._new_port_status
|
port_db.status = bind_context._new_port_status
|
||||||
port['status'] = bind_context._new_port_status
|
port['status'] = bind_context._new_port_status
|
||||||
|
|
||||||
@ -886,13 +885,12 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||||||
# relationship can be updated.
|
# relationship can be updated.
|
||||||
context.session.expire(db_network)
|
context.session.expire(db_network)
|
||||||
|
|
||||||
if (
|
if (mtuw_apidef.MTU in net_data or
|
||||||
mtuw_apidef.MTU in net_data or
|
|
||||||
# NOTE(ihrachys) mtu may be null for existing networks,
|
# NOTE(ihrachys) mtu may be null for existing networks,
|
||||||
# calculate and update it as needed; the conditional can be
|
# calculate and update it as needed; the conditional can be
|
||||||
# removed in Queens when we populate all mtu attributes and
|
# removed in Queens when we populate all mtu attributes and
|
||||||
# enforce it's not nullable on database level
|
# enforce it's not nullable on database level
|
||||||
db_network.mtu is None):
|
db_network.mtu is None):
|
||||||
db_network.mtu = self._get_network_mtu(db_network,
|
db_network.mtu = self._get_network_mtu(db_network,
|
||||||
validate=False)
|
validate=False)
|
||||||
# agents should now update all ports to reflect new MTU
|
# agents should now update all ports to reflect new MTU
|
||||||
@ -1747,7 +1745,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||||||
port_id = port.id
|
port_id = port.id
|
||||||
if ((port.status != status and
|
if ((port.status != status and
|
||||||
port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE) or
|
port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE) or
|
||||||
port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
|
port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
|
||||||
attr = {
|
attr = {
|
||||||
'id': port.id,
|
'id': port.id,
|
||||||
portbindings.HOST_ID: host,
|
portbindings.HOST_ID: host,
|
||||||
@ -1759,7 +1757,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||||||
with db_api.context_manager.writer.using(context):
|
with db_api.context_manager.writer.using(context):
|
||||||
context.session.add(port) # bring port into writer session
|
context.session.add(port) # bring port into writer session
|
||||||
if (port.status != status and
|
if (port.status != status and
|
||||||
port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE):
|
port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE):
|
||||||
original_port = self._make_port_dict(port)
|
original_port = self._make_port_dict(port)
|
||||||
port.status = status
|
port.status = status
|
||||||
# explicit flush before _make_port_dict to ensure extensions
|
# explicit flush before _make_port_dict to ensure extensions
|
||||||
@ -1783,7 +1781,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||||||
updated = True
|
updated = True
|
||||||
|
|
||||||
if (updated and
|
if (updated and
|
||||||
port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
|
port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
|
||||||
with db_api.context_manager.writer.using(context):
|
with db_api.context_manager.writer.using(context):
|
||||||
port = db.get_port(context, port_id)
|
port = db.get_port(context, port_id)
|
||||||
if not port:
|
if not port:
|
||||||
@ -1915,7 +1913,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||||||
events.AFTER_DELETE))
|
events.AFTER_DELETE))
|
||||||
def _handle_segment_change(self, rtype, event, trigger, context, segment):
|
def _handle_segment_change(self, rtype, event, trigger, context, segment):
|
||||||
if (event == events.PRECOMMIT_CREATE and
|
if (event == events.PRECOMMIT_CREATE and
|
||||||
not isinstance(trigger, segments_plugin.Plugin)):
|
not isinstance(trigger, segments_plugin.Plugin)):
|
||||||
# TODO(xiaohhui): Now, when create network, ml2 will reserve
|
# TODO(xiaohhui): Now, when create network, ml2 will reserve
|
||||||
# segment and trigger this event handler. This event handler
|
# segment and trigger this event handler. This event handler
|
||||||
# will reserve segment again, which will lead to error as the
|
# will reserve segment again, which will lead to error as the
|
||||||
@ -1957,7 +1955,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||||||
self, context, network_with_segments,
|
self, context, network_with_segments,
|
||||||
original_network=network_with_segments)
|
original_network=network_with_segments)
|
||||||
if (event == events.PRECOMMIT_CREATE or
|
if (event == events.PRECOMMIT_CREATE or
|
||||||
event == events.PRECOMMIT_DELETE):
|
event == events.PRECOMMIT_DELETE):
|
||||||
self.mechanism_manager.update_network_precommit(mech_context)
|
self.mechanism_manager.update_network_precommit(mech_context)
|
||||||
elif event == events.AFTER_CREATE or event == events.AFTER_DELETE:
|
elif event == events.AFTER_CREATE or event == events.AFTER_DELETE:
|
||||||
self.mechanism_manager.update_network_postcommit(mech_context)
|
self.mechanism_manager.update_network_postcommit(mech_context)
|
||||||
|
@ -264,7 +264,7 @@ class TrackedResource(BaseResource):
|
|||||||
# or if data migrations do not take care of usage counters, the
|
# or if data migrations do not take care of usage counters, the
|
||||||
# assumption will not hold anymore
|
# assumption will not hold anymore
|
||||||
if (tenant_id in self._dirty_tenants or
|
if (tenant_id in self._dirty_tenants or
|
||||||
not usage_info or usage_info.dirty):
|
not usage_info or usage_info.dirty):
|
||||||
LOG.debug(("Usage tracker for resource:%(resource)s and tenant:"
|
LOG.debug(("Usage tracker for resource:%(resource)s and tenant:"
|
||||||
"%(tenant_id)s is out of sync, need to count used "
|
"%(tenant_id)s is out of sync, need to count used "
|
||||||
"quota"), {'resource': self.name,
|
"quota"), {'resource': self.name,
|
||||||
|
@ -171,7 +171,7 @@ class ResourceRegistry(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if (not cfg.CONF.QUOTAS.track_quota_usage or
|
if (not cfg.CONF.QUOTAS.track_quota_usage or
|
||||||
resource_name not in self._tracked_resource_mappings):
|
resource_name not in self._tracked_resource_mappings):
|
||||||
LOG.info("Creating instance of CountableResource for "
|
LOG.info("Creating instance of CountableResource for "
|
||||||
"resource:%s", resource_name)
|
"resource:%s", resource_name)
|
||||||
return resource.CountableResource(
|
return resource.CountableResource(
|
||||||
|
@ -85,7 +85,7 @@ class AutoScheduler(object):
|
|||||||
az_hints = (net.get(az_def.AZ_HINTS) or
|
az_hints = (net.get(az_def.AZ_HINTS) or
|
||||||
cfg.CONF.default_availability_zones)
|
cfg.CONF.default_availability_zones)
|
||||||
if (az_hints and
|
if (az_hints and
|
||||||
dhcp_agent['availability_zone'] not in az_hints):
|
dhcp_agent['availability_zone'] not in az_hints):
|
||||||
continue
|
continue
|
||||||
bindings_to_add.append((dhcp_agent, net_id))
|
bindings_to_add.append((dhcp_agent, net_id))
|
||||||
# do it outside transaction so particular scheduling results don't
|
# do it outside transaction so particular scheduling results don't
|
||||||
|
@ -106,7 +106,7 @@ class L3Scheduler(object):
|
|||||||
|
|
||||||
for router, count in plugin.get_routers_l3_agents_count(context):
|
for router, count in plugin.get_routers_l3_agents_count(context):
|
||||||
if (count < 1 or
|
if (count < 1 or
|
||||||
router.get('ha', False) and count < max_agents_for_ha):
|
router.get('ha', False) and count < max_agents_for_ha):
|
||||||
# Either the router was un-scheduled (scheduled to 0 agents),
|
# Either the router was un-scheduled (scheduled to 0 agents),
|
||||||
# or it's an HA router and it was under-scheduled (scheduled to
|
# or it's an HA router and it was under-scheduled (scheduled to
|
||||||
# less than max_agents_for_ha). Either way, it should be added
|
# less than max_agents_for_ha). Either way, it should be added
|
||||||
|
@ -69,16 +69,16 @@ class Designate(driver.ExternalDNSService):
|
|||||||
ipv6_ptr_zone_size = CONF.designate.ipv6_ptr_zone_prefix_size
|
ipv6_ptr_zone_size = CONF.designate.ipv6_ptr_zone_prefix_size
|
||||||
|
|
||||||
if (ipv4_ptr_zone_size < IPV4_PTR_ZONE_PREFIX_MIN_SIZE or
|
if (ipv4_ptr_zone_size < IPV4_PTR_ZONE_PREFIX_MIN_SIZE or
|
||||||
ipv4_ptr_zone_size > IPV4_PTR_ZONE_PREFIX_MAX_SIZE or
|
ipv4_ptr_zone_size > IPV4_PTR_ZONE_PREFIX_MAX_SIZE or
|
||||||
(ipv4_ptr_zone_size % 8) != 0):
|
(ipv4_ptr_zone_size % 8) != 0):
|
||||||
raise dns_exc.InvalidPTRZoneConfiguration(
|
raise dns_exc.InvalidPTRZoneConfiguration(
|
||||||
parameter='ipv4_ptr_zone_size', number='8',
|
parameter='ipv4_ptr_zone_size', number='8',
|
||||||
maximum=str(IPV4_PTR_ZONE_PREFIX_MAX_SIZE),
|
maximum=str(IPV4_PTR_ZONE_PREFIX_MAX_SIZE),
|
||||||
minimum=str(IPV4_PTR_ZONE_PREFIX_MIN_SIZE))
|
minimum=str(IPV4_PTR_ZONE_PREFIX_MIN_SIZE))
|
||||||
|
|
||||||
if (ipv6_ptr_zone_size < IPV6_PTR_ZONE_PREFIX_MIN_SIZE or
|
if (ipv6_ptr_zone_size < IPV6_PTR_ZONE_PREFIX_MIN_SIZE or
|
||||||
ipv6_ptr_zone_size > IPV6_PTR_ZONE_PREFIX_MAX_SIZE or
|
ipv6_ptr_zone_size > IPV6_PTR_ZONE_PREFIX_MAX_SIZE or
|
||||||
(ipv6_ptr_zone_size % 4) != 0):
|
(ipv6_ptr_zone_size % 4) != 0):
|
||||||
raise dns_exc.InvalidPTRZoneConfiguration(
|
raise dns_exc.InvalidPTRZoneConfiguration(
|
||||||
parameter='ipv6_ptr_zone_size', number='4',
|
parameter='ipv6_ptr_zone_size', number='4',
|
||||||
maximum=str(IPV6_PTR_ZONE_PREFIX_MAX_SIZE),
|
maximum=str(IPV6_PTR_ZONE_PREFIX_MAX_SIZE),
|
||||||
|
@ -407,7 +407,7 @@ class NovaSegmentNotifier(object):
|
|||||||
def _does_port_require_nova_inventory_update(self, port):
|
def _does_port_require_nova_inventory_update(self, port):
|
||||||
device_owner = port.get('device_owner')
|
device_owner = port.get('device_owner')
|
||||||
if (device_owner.startswith(constants.DEVICE_OWNER_COMPUTE_PREFIX) or
|
if (device_owner.startswith(constants.DEVICE_OWNER_COMPUTE_PREFIX) or
|
||||||
device_owner == constants.DEVICE_OWNER_DHCP):
|
device_owner == constants.DEVICE_OWNER_DHCP):
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ def _update_timestamp(session, context, instances):
|
|||||||
while objs_list:
|
while objs_list:
|
||||||
obj = objs_list.pop()
|
obj = objs_list.pop()
|
||||||
if (isinstance(obj, standard_attr.HasStandardAttributes) and
|
if (isinstance(obj, standard_attr.HasStandardAttributes) and
|
||||||
obj.standard_attr_id):
|
obj.standard_attr_id):
|
||||||
obj.updated_at = timeutils.utcnow()
|
obj.updated_at = timeutils.utcnow()
|
||||||
|
|
||||||
|
|
||||||
|
@ -302,7 +302,8 @@ def get_unassigned_pd_interfaces(router):
|
|||||||
for intf in router[lib_constants.INTERFACE_KEY]:
|
for intf in router[lib_constants.INTERFACE_KEY]:
|
||||||
for subnet in intf['subnets']:
|
for subnet in intf['subnets']:
|
||||||
if (ipv6_utils.is_ipv6_pd_enabled(subnet) and
|
if (ipv6_utils.is_ipv6_pd_enabled(subnet) and
|
||||||
subnet['cidr'] == lib_constants.PROVISIONAL_IPV6_PD_PREFIX):
|
subnet['cidr'] ==
|
||||||
|
lib_constants.PROVISIONAL_IPV6_PD_PREFIX):
|
||||||
pd_intfs.append(intf)
|
pd_intfs.append(intf)
|
||||||
return pd_intfs
|
return pd_intfs
|
||||||
|
|
||||||
@ -312,7 +313,8 @@ def assign_prefix_for_pd_interfaces(router):
|
|||||||
for ifno, intf in enumerate(router[lib_constants.INTERFACE_KEY]):
|
for ifno, intf in enumerate(router[lib_constants.INTERFACE_KEY]):
|
||||||
for subnet in intf['subnets']:
|
for subnet in intf['subnets']:
|
||||||
if (ipv6_utils.is_ipv6_pd_enabled(subnet) and
|
if (ipv6_utils.is_ipv6_pd_enabled(subnet) and
|
||||||
subnet['cidr'] == lib_constants.PROVISIONAL_IPV6_PD_PREFIX):
|
subnet['cidr'] ==
|
||||||
|
lib_constants.PROVISIONAL_IPV6_PD_PREFIX):
|
||||||
subnet['cidr'] = "2001:db8:%d::/64" % ifno
|
subnet['cidr'] = "2001:db8:%d::/64" % ifno
|
||||||
pd_intfs.append(intf)
|
pd_intfs.append(intf)
|
||||||
return pd_intfs
|
return pd_intfs
|
||||||
|
@ -595,7 +595,7 @@ class VethFixture(fixtures.Fixture):
|
|||||||
for port in self.ports:
|
for port in self.ports:
|
||||||
ip_wrapper = ip_lib.IPWrapper(port.namespace)
|
ip_wrapper = ip_lib.IPWrapper(port.namespace)
|
||||||
if (ip_wrapper.netns.exists(port.namespace) or
|
if (ip_wrapper.netns.exists(port.namespace) or
|
||||||
port.namespace is None):
|
port.namespace is None):
|
||||||
try:
|
try:
|
||||||
ip_wrapper.del_veth(port.name)
|
ip_wrapper.del_veth(port.name)
|
||||||
break
|
break
|
||||||
@ -666,7 +666,7 @@ class MacvtapFixture(fixtures.Fixture):
|
|||||||
|
|
||||||
def destroy(self):
|
def destroy(self):
|
||||||
if (ip_lib.network_namespace_exists(self.ip_dev.namespace) or
|
if (ip_lib.network_namespace_exists(self.ip_dev.namespace) or
|
||||||
self.ip_dev.namespace is None):
|
self.ip_dev.namespace is None):
|
||||||
try:
|
try:
|
||||||
self.ip_dev.link.delete()
|
self.ip_dev.link.delete()
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
|
@ -109,7 +109,7 @@ class FakeFullstackMachine(machine_fixtures.FakeMachineBase):
|
|||||||
subnet_id = fixed_ip['subnet_id']
|
subnet_id = fixed_ip['subnet_id']
|
||||||
subnet = self.safe_client.client.show_subnet(subnet_id)
|
subnet = self.safe_client.client.show_subnet(subnet_id)
|
||||||
if (netaddr.IPAddress(fixed_ip['ip_address']).version ==
|
if (netaddr.IPAddress(fixed_ip['ip_address']).version ==
|
||||||
constants.IP_VERSION_6):
|
constants.IP_VERSION_6):
|
||||||
# v6Address/default_route is auto-configured.
|
# v6Address/default_route is auto-configured.
|
||||||
self._ipv6 = fixed_ip['ip_address']
|
self._ipv6 = fixed_ip['ip_address']
|
||||||
self.gateway_ipv6 = subnet['subnet']['gateway_ip']
|
self.gateway_ipv6 = subnet['subnet']['gateway_ip']
|
||||||
|
@ -55,7 +55,7 @@ class BaseSecurityGroupsSameNetworkTest(base.BaseFullStackTestCase):
|
|||||||
super(BaseSecurityGroupsSameNetworkTest, self).setUp(env)
|
super(BaseSecurityGroupsSameNetworkTest, self).setUp(env)
|
||||||
|
|
||||||
if (self.firewall_driver == 'openvswitch' and
|
if (self.firewall_driver == 'openvswitch' and
|
||||||
not OVSVersionChecker.supports_ovsfirewall()):
|
not OVSVersionChecker.supports_ovsfirewall()):
|
||||||
self.skipTest("Open vSwitch firewall_driver doesn't work "
|
self.skipTest("Open vSwitch firewall_driver doesn't work "
|
||||||
"with this version of ovs.")
|
"with this version of ovs.")
|
||||||
|
|
||||||
|
@ -1220,7 +1220,7 @@ class TestDnsmasq(TestBase):
|
|||||||
possible_leases = 0
|
possible_leases = 0
|
||||||
for i, s in enumerate(network.subnets):
|
for i, s in enumerate(network.subnets):
|
||||||
if (s.ip_version != 6 or
|
if (s.ip_version != 6 or
|
||||||
s.ipv6_address_mode == constants.DHCPV6_STATEFUL):
|
s.ipv6_address_mode == constants.DHCPV6_STATEFUL):
|
||||||
if s.ip_version == 4:
|
if s.ip_version == 4:
|
||||||
expected.extend([prefix % (
|
expected.extend([prefix % (
|
||||||
i, s.cidr.split('/')[0],
|
i, s.cidr.split('/')[0],
|
||||||
|
@ -275,7 +275,7 @@ class SGServerRpcCallBackTestCase(test_sg.SecurityGroupDBTestCase):
|
|||||||
def _port_with_addr_pairs_and_security_group(self):
|
def _port_with_addr_pairs_and_security_group(self):
|
||||||
plugin_obj = directory.get_plugin()
|
plugin_obj = directory.get_plugin()
|
||||||
if ('allowed-address-pairs'
|
if ('allowed-address-pairs'
|
||||||
not in plugin_obj.supported_extension_aliases):
|
not in plugin_obj.supported_extension_aliases):
|
||||||
self.skipTest("Test depends on allowed-address-pairs extension")
|
self.skipTest("Test depends on allowed-address-pairs extension")
|
||||||
fake_prefix = FAKE_PREFIX['IPv4']
|
fake_prefix = FAKE_PREFIX['IPv4']
|
||||||
with self.network() as n,\
|
with self.network() as n,\
|
||||||
|
@ -215,7 +215,7 @@ class AgentSchedulerTestMixIn(object):
|
|||||||
agents = self._list_agents()
|
agents = self._list_agents()
|
||||||
for agent_data in agents['agents']:
|
for agent_data in agents['agents']:
|
||||||
if (agent_data['agent_type'] == agent_type and
|
if (agent_data['agent_type'] == agent_type and
|
||||||
agent_data['host'] == host):
|
agent_data['host'] == host):
|
||||||
return agent_data['id']
|
return agent_data['id']
|
||||||
|
|
||||||
|
|
||||||
@ -1139,7 +1139,7 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
|
|||||||
default = l3agents['agents'][0]['id']
|
default = l3agents['agents'][0]['id']
|
||||||
for com in agents['agents']:
|
for com in agents['agents']:
|
||||||
if (com['id'] != default and
|
if (com['id'] != default and
|
||||||
com['agent_type'] == constants.AGENT_TYPE_L3):
|
com['agent_type'] == constants.AGENT_TYPE_L3):
|
||||||
another_l3_agent_id = com['id']
|
another_l3_agent_id = com['id']
|
||||||
another_l3_agent_host = com['host']
|
another_l3_agent_host = com['host']
|
||||||
break
|
break
|
||||||
|
@ -341,7 +341,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
|
|||||||
data['subnet'][arg] = kwargs[arg]
|
data['subnet'][arg] = kwargs[arg]
|
||||||
|
|
||||||
if ('gateway_ip' in kwargs and
|
if ('gateway_ip' in kwargs and
|
||||||
kwargs['gateway_ip'] is not constants.ATTR_NOT_SPECIFIED):
|
kwargs['gateway_ip'] is not constants.ATTR_NOT_SPECIFIED):
|
||||||
data['subnet']['gateway_ip'] = kwargs['gateway_ip']
|
data['subnet']['gateway_ip'] = kwargs['gateway_ip']
|
||||||
|
|
||||||
subnet_req = self.new_create_request('subnets', data, fmt)
|
subnet_req = self.new_create_request('subnets', data, fmt)
|
||||||
@ -401,9 +401,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
|
|||||||
data['port'][arg] = kwargs[arg]
|
data['port'][arg] = kwargs[arg]
|
||||||
# create a dhcp port device id if one hasn't been supplied
|
# create a dhcp port device id if one hasn't been supplied
|
||||||
if ('device_owner' in kwargs and
|
if ('device_owner' in kwargs and
|
||||||
kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and
|
kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and
|
||||||
'host' in kwargs and
|
'host' in kwargs and
|
||||||
'device_id' not in kwargs):
|
'device_id' not in kwargs):
|
||||||
device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host'])
|
device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host'])
|
||||||
data['port']['device_id'] = device_id
|
data['port']['device_id'] = device_id
|
||||||
port_req = self.new_create_request('ports', data, fmt)
|
port_req = self.new_create_request('ports', data, fmt)
|
||||||
@ -4351,8 +4351,8 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
|
|||||||
ipv6_ra_mode=addr_mode,
|
ipv6_ra_mode=addr_mode,
|
||||||
ipv6_address_mode=addr_mode)
|
ipv6_address_mode=addr_mode)
|
||||||
if (insert_db_reference_error or insert_address_allocated or
|
if (insert_db_reference_error or insert_address_allocated or
|
||||||
device_owner == constants.DEVICE_OWNER_ROUTER_SNAT or
|
device_owner == constants.DEVICE_OWNER_ROUTER_SNAT or
|
||||||
device_owner in constants.ROUTER_INTERFACE_OWNERS):
|
device_owner in constants.ROUTER_INTERFACE_OWNERS):
|
||||||
# DVR SNAT, router interfaces and DHCP ports should not have
|
# DVR SNAT, router interfaces and DHCP ports should not have
|
||||||
# been updated with addresses from the new auto-address subnet
|
# been updated with addresses from the new auto-address subnet
|
||||||
self.assertEqual(1, len(port['port']['fixed_ips']))
|
self.assertEqual(1, len(port['port']['fixed_ips']))
|
||||||
|
@ -91,9 +91,9 @@ class DnsExtensionTestCase(test_plugin.Ml2PluginV2TestCase):
|
|||||||
data['port'][arg] = kwargs[arg]
|
data['port'][arg] = kwargs[arg]
|
||||||
# create a dhcp port device id if one hasn't been supplied
|
# create a dhcp port device id if one hasn't been supplied
|
||||||
if ('device_owner' in kwargs and
|
if ('device_owner' in kwargs and
|
||||||
kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and
|
kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and
|
||||||
'host' in kwargs and
|
'host' in kwargs and
|
||||||
'device_id' not in kwargs):
|
'device_id' not in kwargs):
|
||||||
device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host'])
|
device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host'])
|
||||||
data['port']['device_id'] = device_id
|
data['port']['device_id'] = device_id
|
||||||
port_req = self.new_create_request('ports', data, fmt)
|
port_req = self.new_create_request('ports', data, fmt)
|
||||||
|
@ -2114,7 +2114,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
|
|||||||
self.assertEqual(400, res.status_int)
|
self.assertEqual(400, res.status_int)
|
||||||
for p in self._list('ports')['ports']:
|
for p in self._list('ports')['ports']:
|
||||||
if (p['device_owner'] ==
|
if (p['device_owner'] ==
|
||||||
lib_constants.DEVICE_OWNER_FLOATINGIP):
|
lib_constants.DEVICE_OWNER_FLOATINGIP):
|
||||||
self.fail('garbage port is not deleted')
|
self.fail('garbage port is not deleted')
|
||||||
|
|
||||||
def test_floatingip_with_assoc_fails(self):
|
def test_floatingip_with_assoc_fails(self):
|
||||||
|
@ -105,7 +105,7 @@ class PortSecurityTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||||||
self._process_port_port_security_create(context, p, neutron_db)
|
self._process_port_port_security_create(context, p, neutron_db)
|
||||||
|
|
||||||
if (validators.is_attr_set(p.get(ext_sg.SECURITYGROUPS)) and
|
if (validators.is_attr_set(p.get(ext_sg.SECURITYGROUPS)) and
|
||||||
not (port_security and has_ip)):
|
not (port_security and has_ip)):
|
||||||
raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups()
|
raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups()
|
||||||
|
|
||||||
# Port requires ip and port_security enabled for security group
|
# Port requires ip and port_security enabled for security group
|
||||||
|
@ -90,9 +90,9 @@ class QosPolicyObjectTestCase(test_base.BaseObjectIfaceTestCase):
|
|||||||
return_value=[self.db_objs[0]]) as get_objects_mock:
|
return_value=[self.db_objs[0]]) as get_objects_mock:
|
||||||
|
|
||||||
with mock.patch.object(
|
with mock.patch.object(
|
||||||
self.context,
|
self.context,
|
||||||
'elevated',
|
'elevated',
|
||||||
return_value=admin_context) as context_mock:
|
return_value=admin_context) as context_mock:
|
||||||
|
|
||||||
objs = self._test_class.get_objects(
|
objs = self._test_class.get_objects(
|
||||||
self.context,
|
self.context,
|
||||||
|
@ -1152,10 +1152,10 @@ class BaseObjectIfaceTestCase(_BaseObjectTestCase, test_base.BaseTestCase):
|
|||||||
return_value={'a': 'a', 'b': 'b', 'c': 'c'})
|
return_value={'a': 'a', 'b': 'b', 'c': 'c'})
|
||||||
def test_update_changes_forbidden(self, *mocks):
|
def test_update_changes_forbidden(self, *mocks):
|
||||||
with mock.patch.object(
|
with mock.patch.object(
|
||||||
self._test_class,
|
self._test_class,
|
||||||
'fields_no_update',
|
'fields_no_update',
|
||||||
new_callable=mock.PropertyMock(return_value=['a', 'c']),
|
new_callable=mock.PropertyMock(return_value=['a', 'c']),
|
||||||
create=True):
|
create=True):
|
||||||
obj = self._test_class(self.context, **self.obj_fields[0])
|
obj = self._test_class(self.context, **self.obj_fields[0])
|
||||||
self.assertRaises(o_exc.NeutronObjectUpdateForbidden, obj.update)
|
self.assertRaises(o_exc.NeutronObjectUpdateForbidden, obj.update)
|
||||||
|
|
||||||
|
@ -92,7 +92,7 @@ class TestMechanismDriver(api.MechanismDriver):
|
|||||||
if context.vif_type in (portbindings.VIF_TYPE_UNBOUND,
|
if context.vif_type in (portbindings.VIF_TYPE_UNBOUND,
|
||||||
portbindings.VIF_TYPE_BINDING_FAILED):
|
portbindings.VIF_TYPE_BINDING_FAILED):
|
||||||
if (context.segments_to_bind and
|
if (context.segments_to_bind and
|
||||||
context.segments_to_bind[0][api.NETWORK_TYPE] == 'vlan'):
|
context.segments_to_bind[0][api.NETWORK_TYPE] == 'vlan'):
|
||||||
# Partially bound.
|
# Partially bound.
|
||||||
self._check_bound(context.binding_levels,
|
self._check_bound(context.binding_levels,
|
||||||
context.top_bound_segment,
|
context.top_bound_segment,
|
||||||
@ -188,7 +188,7 @@ class TestMechanismDriver(api.MechanismDriver):
|
|||||||
def update_port_precommit(self, context):
|
def update_port_precommit(self, context):
|
||||||
if ((context.original_top_bound_segment and
|
if ((context.original_top_bound_segment and
|
||||||
not context.top_bound_segment) or
|
not context.top_bound_segment) or
|
||||||
(context.host == "host-fail")):
|
(context.host == "host-fail")):
|
||||||
self.bound_ports.remove((context.original['id'],
|
self.bound_ports.remove((context.original['id'],
|
||||||
context.original_host))
|
context.original_host))
|
||||||
self._check_port_context(context, True)
|
self._check_port_context(context, True)
|
||||||
|
@ -282,7 +282,7 @@ class TestAutoScheduleNetworks(TestDhcpSchedulerBaseTestCase):
|
|||||||
expected_hosted_agents = (1 if expected_result and
|
expected_hosted_agents = (1 if expected_result and
|
||||||
self.valid_host else 0)
|
self.valid_host else 0)
|
||||||
if (self.az_hints and
|
if (self.az_hints and
|
||||||
agents[0]['availability_zone'] not in self.az_hints):
|
agents[0]['availability_zone'] not in self.az_hints):
|
||||||
expected_hosted_agents = 0
|
expected_hosted_agents = 0
|
||||||
host = "host-a" if self.valid_host else "host-b"
|
host = "host-a" if self.valid_host else "host-b"
|
||||||
observed_ret_value = scheduler.auto_schedule_networks(
|
observed_ret_value = scheduler.auto_schedule_networks(
|
||||||
|
@ -1243,8 +1243,8 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase):
|
|||||||
}
|
}
|
||||||
|
|
||||||
with mock.patch(
|
with mock.patch(
|
||||||
'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports',
|
'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports',
|
||||||
return_value=[dvr_port]):
|
return_value=[dvr_port]):
|
||||||
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
|
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
|
||||||
r1['id'])
|
r1['id'])
|
||||||
self.assertEqual(sub_ids.pop(),
|
self.assertEqual(sub_ids.pop(),
|
||||||
|
@ -524,8 +524,8 @@ class TestQosPlugin(base.BaseQosTestCase):
|
|||||||
setattr(_policy, "rules", [self.rule])
|
setattr(_policy, "rules", [self.rule])
|
||||||
with mock.patch('neutron.objects.qos.rule.get_rules',
|
with mock.patch('neutron.objects.qos.rule.get_rules',
|
||||||
return_value=[self.rule]), mock.patch(
|
return_value=[self.rule]), mock.patch(
|
||||||
'neutron.objects.qos.policy.QosPolicy.get_object',
|
'neutron.objects.qos.policy.QosPolicy.get_object',
|
||||||
return_value=_policy):
|
return_value=_policy):
|
||||||
self.rule_data['bandwidth_limit_rule']['max_kbps'] = 1
|
self.rule_data['bandwidth_limit_rule']['max_kbps'] = 1
|
||||||
self.qos_plugin.update_policy_bandwidth_limit_rule(
|
self.qos_plugin.update_policy_bandwidth_limit_rule(
|
||||||
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
|
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
|
||||||
|
3
tox.ini
3
tox.ini
@ -137,7 +137,6 @@ commands = sphinx-build -W -b linkcheck doc/source doc/build/linkcheck
|
|||||||
# E125 continuation line does not distinguish itself from next logical line
|
# E125 continuation line does not distinguish itself from next logical line
|
||||||
# E126 continuation line over-indented for hanging indent
|
# E126 continuation line over-indented for hanging indent
|
||||||
# E128 continuation line under-indented for visual indent
|
# E128 continuation line under-indented for visual indent
|
||||||
# E129 visually indented line with same indent as next logical line
|
|
||||||
# H404 multi line docstring should start with a summary
|
# H404 multi line docstring should start with a summary
|
||||||
# H405 multi line docstring summary not separated with an empty line
|
# H405 multi line docstring summary not separated with an empty line
|
||||||
# N530 direct neutron imports not allowed
|
# N530 direct neutron imports not allowed
|
||||||
@ -145,7 +144,7 @@ commands = sphinx-build -W -b linkcheck doc/source doc/build/linkcheck
|
|||||||
# N534 Untranslated exception message
|
# N534 Untranslated exception message
|
||||||
# TODO(amotoki) check the following new rules should be fixed or ignored
|
# TODO(amotoki) check the following new rules should be fixed or ignored
|
||||||
# E731 do not assign a lambda expression, use a def
|
# E731 do not assign a lambda expression, use a def
|
||||||
ignore = E125,E126,E128,E129,E731,H404,H405,N530,N534
|
ignore = E125,E126,E128,E731,H404,H405,N530,N534
|
||||||
# H106: Don't put vim configuration in source files
|
# H106: Don't put vim configuration in source files
|
||||||
# H203: Use assertIs(Not)None to check for None
|
# H203: Use assertIs(Not)None to check for None
|
||||||
# H204: Use assert(Not)Equal to check for equality
|
# H204: Use assert(Not)Equal to check for equality
|
||||||
|
Loading…
Reference in New Issue
Block a user