Fix pep8 E128 warnings in non-test code
Reduces E128 warnings by ~260 to just ~900, no way we're getting rid of all of them at once (or ever). Files under neutron/tests still have a ton of E128 warnings. Change-Id: I9137150ccf129bf443e33428267cd4bc9c323b54 Co-Authored-By: Akihiro Motoki <amotoki@gmail.com>
This commit is contained in:
parent
773489af62
commit
eaf990b2bc
@ -468,7 +468,7 @@ class OVSBridge(BaseOVS):
|
||||
if "cookie" in kwargs:
|
||||
kwargs["cookie"] = check_cookie_mask(str(kwargs["cookie"]))
|
||||
flow_str = ",".join("=".join([key, str(val)])
|
||||
for key, val in kwargs.items())
|
||||
for key, val in kwargs.items())
|
||||
|
||||
flows = self.run_ofctl("dump-flows", [flow_str])
|
||||
if flows:
|
||||
|
@ -32,8 +32,8 @@ class L2AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
|
||||
"""
|
||||
|
||||
def __init__(self, conf):
|
||||
super(L2AgentExtensionsManager, self).__init__(conf,
|
||||
L2_AGENT_EXT_MANAGER_NAMESPACE)
|
||||
super(L2AgentExtensionsManager,
|
||||
self).__init__(conf, L2_AGENT_EXT_MANAGER_NAMESPACE)
|
||||
|
||||
def handle_port(self, context, data):
|
||||
"""Notify all agent extensions to handle port."""
|
||||
|
@ -82,9 +82,9 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
|
||||
else:
|
||||
preserve_ips = self._list_centralized_floating_ip_cidrs()
|
||||
self._external_gateway_added(ex_gw_port,
|
||||
interface_name,
|
||||
self.snat_namespace.name,
|
||||
preserve_ips)
|
||||
interface_name,
|
||||
self.snat_namespace.name,
|
||||
preserve_ips)
|
||||
|
||||
def _external_gateway_removed(self, ex_gw_port, interface_name):
|
||||
super(DvrEdgeRouter, self).external_gateway_removed(ex_gw_port,
|
||||
|
@ -330,7 +330,7 @@ class FipNamespace(namespaces.Namespace):
|
||||
is_ipv6=False)
|
||||
|
||||
def _update_gateway_route(self, agent_gateway_port,
|
||||
interface_name, tbl_index):
|
||||
interface_name, tbl_index):
|
||||
ns_name = self.get_name()
|
||||
ipd = ip_lib.IPDevice(interface_name, namespace=ns_name)
|
||||
# If the 'fg-' device doesn't exist in the namespace then trying
|
||||
|
@ -321,8 +321,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
|
||||
def _delete_gateway_device_if_exists(self, ns_ip_device, gw_ip_addr,
|
||||
snat_idx):
|
||||
try:
|
||||
ns_ip_device.route.delete_gateway(gw_ip_addr,
|
||||
table=snat_idx)
|
||||
ns_ip_device.route.delete_gateway(gw_ip_addr, table=snat_idx)
|
||||
except exceptions.DeviceNotFoundError:
|
||||
pass
|
||||
|
||||
|
@ -32,8 +32,8 @@ class L3AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
|
||||
"""Manage l3 agent extensions."""
|
||||
|
||||
def __init__(self, conf):
|
||||
super(L3AgentExtensionsManager, self).__init__(conf,
|
||||
L3_AGENT_EXT_MANAGER_NAMESPACE)
|
||||
super(L3AgentExtensionsManager,
|
||||
self).__init__(conf, L3_AGENT_EXT_MANAGER_NAMESPACE)
|
||||
|
||||
def add_router(self, context, data):
|
||||
"""Notify all agent extensions to add router."""
|
||||
|
@ -374,7 +374,7 @@ class RouterInfo(object):
|
||||
'new': fip['fixed_ip_address']})
|
||||
fip_statuses[fip['id']] = self.move_floating_ip(fip)
|
||||
elif (ip_cidr in centralized_fip_cidrs and
|
||||
fip.get('host') == self.host):
|
||||
fip.get('host') == self.host):
|
||||
LOG.debug("Floating IP is migrating from centralized "
|
||||
"to distributed: %s", fip)
|
||||
# TODO(dougwig) - remove this disable when fixing bug #1816874
|
||||
@ -586,8 +586,9 @@ class RouterInfo(object):
|
||||
if ipv6_utils.is_ipv6_pd_enabled(subnet):
|
||||
interface_name = self.get_internal_device_name(p['id'])
|
||||
self.agent.pd.enable_subnet(self.router_id, subnet['id'],
|
||||
subnet['cidr'],
|
||||
interface_name, p['mac_address'])
|
||||
subnet['cidr'],
|
||||
interface_name,
|
||||
p['mac_address'])
|
||||
if (subnet['cidr'] !=
|
||||
lib_constants.PROVISIONAL_IPV6_PD_PREFIX):
|
||||
self.pd_subnets[subnet['id']] = subnet['cidr']
|
||||
|
@ -718,8 +718,8 @@ class Dnsmasq(DhcpLocalProcess):
|
||||
ip_address, 'set:', port.id))
|
||||
elif client_id and len(port.extra_dhcp_opts) == 1:
|
||||
buf.write('%s,%s%s,%s,%s\n' %
|
||||
(port.mac_address, self._ID, client_id, name,
|
||||
ip_address))
|
||||
(port.mac_address, self._ID, client_id, name,
|
||||
ip_address))
|
||||
else:
|
||||
buf.write('%s,%s,%s,%s%s\n' %
|
||||
(port.mac_address, name, ip_address,
|
||||
|
@ -185,8 +185,8 @@ class IpConntrackManager(object):
|
||||
r'.* -j CT --zone (?P<zone>\d+).*', rule)
|
||||
if match:
|
||||
# strip off any prefix that the interface is using
|
||||
short_port_id = (match.group('dev')
|
||||
[n_const.LINUX_DEV_PREFIX_LEN:])
|
||||
short_port_id = (
|
||||
match.group('dev')[n_const.LINUX_DEV_PREFIX_LEN:])
|
||||
self._device_zone_map[short_port_id] = int(match.group('zone'))
|
||||
LOG.debug("Populated conntrack zone map: %s", self._device_zone_map)
|
||||
|
||||
|
@ -119,7 +119,7 @@ class IpsetManager(object):
|
||||
new_set_name = set_name + SWAP_SUFFIX
|
||||
set_type = self._get_ipset_set_type(ethertype)
|
||||
process_input = ["create %s hash:net family %s" % (new_set_name,
|
||||
set_type)]
|
||||
set_type)]
|
||||
for ip in member_ips:
|
||||
process_input.append("add %s %s" % (new_set_name, ip))
|
||||
|
||||
|
@ -72,7 +72,8 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
|
||||
CONNTRACK_ZONE_PER_PORT = False
|
||||
|
||||
def __init__(self, namespace=None):
|
||||
self.iptables = iptables_manager.IptablesManager(state_less=True,
|
||||
self.iptables = iptables_manager.IptablesManager(
|
||||
state_less=True,
|
||||
use_ipv6=ipv6_utils.is_enabled_and_bind_by_default(),
|
||||
namespace=namespace)
|
||||
# TODO(majopela, shihanzhang): refactor out ipset to a separate
|
||||
@ -722,8 +723,9 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
|
||||
|
||||
if (is_port and rule_protocol in constants.IPTABLES_PROTOCOL_MAP):
|
||||
# iptables adds '-m protocol' when the port number is specified
|
||||
iptables_rule += ['-m',
|
||||
constants.IPTABLES_PROTOCOL_MAP[rule_protocol]]
|
||||
iptables_rule += [
|
||||
'-m', constants.IPTABLES_PROTOCOL_MAP[rule_protocol]
|
||||
]
|
||||
return iptables_rule
|
||||
|
||||
def _port_arg(self, direction, protocol, port_range_min, port_range_max):
|
||||
|
@ -815,7 +815,7 @@ def _get_rules_by_chain(rules):
|
||||
|
||||
|
||||
def _generate_chain_diff_iptables_commands(chain, old_chain_rules,
|
||||
new_chain_rules):
|
||||
new_chain_rules):
|
||||
# keep track of the old index because we have to insert rules
|
||||
# in the right position
|
||||
old_index = 1
|
||||
|
@ -326,7 +326,8 @@ class ConjIPFlowManager(object):
|
||||
# no address overlaps.
|
||||
addr_to_conj = self._build_addr_conj_id_map(
|
||||
ethertype, sg_conj_id_map)
|
||||
self._update_flows_for_vlan_subr(direction, ethertype, vlan_tag,
|
||||
self._update_flows_for_vlan_subr(
|
||||
direction, ethertype, vlan_tag,
|
||||
self.flow_state[vlan_tag][(direction, ethertype)],
|
||||
addr_to_conj)
|
||||
self.flow_state[vlan_tag][(direction, ethertype)] = addr_to_conj
|
||||
|
@ -98,20 +98,28 @@ class DaemonMonitor(object):
|
||||
for p in router_ports:
|
||||
subnets = p.get('subnets', [])
|
||||
v6_subnets = [subnet for subnet in subnets if
|
||||
netaddr.IPNetwork(subnet['cidr']).version == 6]
|
||||
netaddr.IPNetwork(subnet['cidr']).version == 6]
|
||||
if not v6_subnets:
|
||||
continue
|
||||
ra_modes = {subnet['ipv6_ra_mode'] for subnet in v6_subnets}
|
||||
auto_config_prefixes = [subnet['cidr'] for subnet in v6_subnets if
|
||||
subnet['ipv6_ra_mode'] == constants.IPV6_SLAAC or
|
||||
subnet['ipv6_ra_mode'] == constants.DHCPV6_STATELESS]
|
||||
stateful_config_prefixes = [subnet['cidr'] for subnet in v6_subnets
|
||||
if subnet['ipv6_ra_mode'] == constants.DHCPV6_STATEFUL]
|
||||
auto_config_prefixes = [
|
||||
subnet['cidr'] for subnet in v6_subnets
|
||||
if (subnet['ipv6_ra_mode'] == constants.IPV6_SLAAC or
|
||||
subnet['ipv6_ra_mode'] == constants.DHCPV6_STATELESS)
|
||||
]
|
||||
stateful_config_prefixes = [
|
||||
subnet['cidr'] for subnet in v6_subnets
|
||||
if subnet['ipv6_ra_mode'] == constants.DHCPV6_STATEFUL
|
||||
]
|
||||
interface_name = self._dev_name_helper(p['id'])
|
||||
slaac_subnets = [subnet for subnet in v6_subnets if
|
||||
subnet['ipv6_ra_mode'] == constants.IPV6_SLAAC]
|
||||
dns_servers = list(iter_chain(*[subnet['dns_nameservers'] for
|
||||
subnet in slaac_subnets if subnet.get('dns_nameservers')]))
|
||||
slaac_subnets = [
|
||||
subnet for subnet in v6_subnets
|
||||
if subnet['ipv6_ra_mode'] == constants.IPV6_SLAAC
|
||||
]
|
||||
dns_servers = list(iter_chain(*[
|
||||
subnet['dns_nameservers'] for subnet in slaac_subnets
|
||||
if subnet.get('dns_nameservers')
|
||||
]))
|
||||
network_mtu = p.get('mtu', 0)
|
||||
|
||||
buf.write('%s' % CONFIG_TEMPLATE.render(
|
||||
|
@ -162,9 +162,11 @@ class MetadataProxyHandler(object):
|
||||
ports = self._get_ports(remote_address, network_id, router_id)
|
||||
LOG.debug("Gotten ports for remote_address %(remote_address)s, "
|
||||
"network_id %(network_id)s, router_id %(router_id)s are: "
|
||||
"%(ports)s", {"remote_address": remote_address,
|
||||
"network_id": network_id, "router_id": router_id,
|
||||
"ports": ports})
|
||||
"%(ports)s",
|
||||
{"remote_address": remote_address,
|
||||
"network_id": network_id,
|
||||
"router_id": router_id,
|
||||
"ports": ports})
|
||||
|
||||
if len(ports) == 1:
|
||||
return ports[0]['device_id'], ports[0]['tenant_id']
|
||||
|
@ -131,7 +131,7 @@ class ResourceConsumerTracker(object):
|
||||
"""
|
||||
for resource_type, resource_version in versions.items():
|
||||
self._set_version(consumer, resource_type,
|
||||
resource_version)
|
||||
resource_version)
|
||||
|
||||
if versions:
|
||||
self._cleanup_removed_versions(consumer, versions)
|
||||
|
@ -296,7 +296,7 @@ class DhcpRpcCallback(object):
|
||||
old_port['device_id'] !=
|
||||
utils.get_dhcp_agent_device_id(network_id, host) or
|
||||
not self._is_dhcp_agent_hosting_network(plugin, context, host,
|
||||
network_id)):
|
||||
network_id)):
|
||||
raise exceptions.DhcpPortInUse(port_id=port['id'])
|
||||
LOG.debug('Update dhcp port %(port)s '
|
||||
'from %(host)s.',
|
||||
|
@ -102,8 +102,7 @@ class DVRServerRpcCallback(object):
|
||||
host = kwargs.get('host')
|
||||
subnet = kwargs.get('subnet')
|
||||
LOG.debug("DVR Agent requests list of VM ports on host %s", host)
|
||||
return self.plugin.get_ports_on_host_by_subnet(context,
|
||||
host, subnet)
|
||||
return self.plugin.get_ports_on_host_by_subnet(context, host, subnet)
|
||||
|
||||
def get_subnet_for_dvr(self, context, **kwargs):
|
||||
fixed_ips = kwargs.get('fixed_ips')
|
||||
|
@ -230,11 +230,10 @@ class L3RpcCallback(object):
|
||||
# of hosts on which DVR router interfaces are spawned). Such
|
||||
# bindings are created/updated here by invoking
|
||||
# update_distributed_port_binding
|
||||
self.plugin.update_distributed_port_binding(context, port['id'],
|
||||
{'port':
|
||||
{portbindings.HOST_ID: host,
|
||||
'device_id': router_id}
|
||||
})
|
||||
self.plugin.update_distributed_port_binding(
|
||||
context, port['id'],
|
||||
{'port': {portbindings.HOST_ID: host,
|
||||
'device_id': router_id}})
|
||||
|
||||
def get_external_network_id(self, context, **kwargs):
|
||||
"""Get one external network id for l3 agent.
|
||||
@ -305,8 +304,9 @@ class L3RpcCallback(object):
|
||||
admin_ctx, network_id, host)
|
||||
self._ensure_host_set_on_port(admin_ctx, host, agent_port)
|
||||
LOG.debug('Agent Gateway port returned : %(agent_port)s with '
|
||||
'host %(host)s', {'agent_port': agent_port,
|
||||
'host': host})
|
||||
'host %(host)s',
|
||||
{'agent_port': agent_port,
|
||||
'host': host})
|
||||
return agent_port
|
||||
|
||||
@db_api.retry_db_errors
|
||||
|
@ -94,7 +94,8 @@ class ResourcesPullRpcApi(object):
|
||||
def pull(self, context, resource_type, resource_id):
|
||||
resource_type_cls = _resource_to_class(resource_type)
|
||||
cctxt = self.client.prepare()
|
||||
primitive = cctxt.call(context, 'pull',
|
||||
primitive = cctxt.call(
|
||||
context, 'pull',
|
||||
resource_type=resource_type,
|
||||
version=resource_type_cls.VERSION, resource_id=resource_id)
|
||||
|
||||
@ -107,7 +108,8 @@ class ResourcesPullRpcApi(object):
|
||||
def bulk_pull(self, context, resource_type, filter_kwargs=None):
|
||||
resource_type_cls = _resource_to_class(resource_type)
|
||||
cctxt = self.client.prepare()
|
||||
primitives = cctxt.call(context, 'bulk_pull',
|
||||
primitives = cctxt.call(
|
||||
context, 'bulk_pull',
|
||||
resource_type=resource_type,
|
||||
version=resource_type_cls.VERSION, filter_kwargs=filter_kwargs)
|
||||
return [resource_type_cls.clean_obj_from_primitive(primitive)
|
||||
|
@ -327,8 +327,8 @@ class Controller(object):
|
||||
fields_to_strip += self._exclude_attributes_by_policy(
|
||||
request.context, obj_list[0])
|
||||
collection = {self._collection:
|
||||
[self._filter_attributes(obj,
|
||||
fields_to_strip=fields_to_strip)
|
||||
[self._filter_attributes(
|
||||
obj, fields_to_strip=fields_to_strip)
|
||||
for obj in obj_list]}
|
||||
pagination_links = pagination_helper.get_links(obj_list)
|
||||
if pagination_links:
|
||||
|
@ -289,7 +289,8 @@ class KeepalivedIPv6Test(object):
|
||||
self.config_path = tempfile.mkdtemp()
|
||||
|
||||
# Instantiate keepalived manager with the IPv6 configuration.
|
||||
self.manager = keepalived.KeepalivedManager('router1', self.config,
|
||||
self.manager = keepalived.KeepalivedManager(
|
||||
'router1', self.config,
|
||||
namespace=self.nsname, process_monitor=self.pm,
|
||||
conf_path=self.config_path)
|
||||
self.manager.spawn()
|
||||
|
@ -32,10 +32,11 @@ class CoreChecks(base.BaseChecks):
|
||||
if cfg.CONF.api_workers and cfg.CONF.rpc_workers:
|
||||
return upgradecheck.Result(
|
||||
upgradecheck.Code.SUCCESS, _("Number of workers already "
|
||||
"defined in config"))
|
||||
"defined in config"))
|
||||
else:
|
||||
return upgradecheck.Result(
|
||||
upgradecheck.Code.WARNING, _("The default number of workers "
|
||||
"has changed. Please see release notes for the new values, "
|
||||
"but it is strongly encouraged for deployers to manually set "
|
||||
"the values for api_workers and rpc_workers."))
|
||||
upgradecheck.Code.WARNING,
|
||||
_("The default number of workers "
|
||||
"has changed. Please see release notes for the new values, "
|
||||
"but it is strongly encouraged for deployers to manually "
|
||||
"set the values for api_workers and rpc_workers."))
|
||||
|
@ -39,12 +39,14 @@ designate_opts = [
|
||||
'context')),
|
||||
cfg.BoolOpt('allow_reverse_dns_lookup', default=True,
|
||||
help=_('Allow the creation of PTR records')),
|
||||
cfg.IntOpt('ipv4_ptr_zone_prefix_size', default=24,
|
||||
cfg.IntOpt(
|
||||
'ipv4_ptr_zone_prefix_size', default=24,
|
||||
help=_('Number of bits in an ipv4 PTR zone that will be considered '
|
||||
'network prefix. It has to align to byte boundary. Minimum '
|
||||
'value is 8. Maximum value is 24. As a consequence, range '
|
||||
'of values is 8, 16 and 24')),
|
||||
cfg.IntOpt('ipv6_ptr_zone_prefix_size', default=120,
|
||||
cfg.IntOpt(
|
||||
'ipv6_ptr_zone_prefix_size', default=120,
|
||||
help=_('Number of bits in an ipv6 PTR zone that will be considered '
|
||||
'network prefix. It has to align to nyble boundary. Minimum '
|
||||
'value is 4. Maximum value is 124. As a consequence, range '
|
||||
@ -59,6 +61,6 @@ designate_opts = [
|
||||
def register_designate_opts(CONF=cfg.CONF):
|
||||
CONF.register_opts(designate_opts, 'designate')
|
||||
loading.register_auth_conf_options(CONF, 'designate')
|
||||
loading.register_session_conf_options(conf=CONF,
|
||||
group='designate',
|
||||
loading.register_session_conf_options(
|
||||
conf=CONF, group='designate',
|
||||
deprecated_opts={'cafile': [cfg.DeprecatedOpt('ca_cert')]})
|
||||
|
@ -384,7 +384,7 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
|
||||
raise das_exc.NetworkHostedByDHCPAgent(
|
||||
network_id=network_id, agent_id=id)
|
||||
network.NetworkDhcpAgentBinding(context, dhcp_agent_id=id,
|
||||
network_id=network_id).create()
|
||||
network_id=network_id).create()
|
||||
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
|
||||
if dhcp_notifier:
|
||||
dhcp_notifier.network_added_to_agent(
|
||||
|
@ -24,7 +24,7 @@ from neutron_lib.objects import exceptions
|
||||
|
||||
from neutron.common import utils
|
||||
from neutron.objects.port.extensions import (allowedaddresspairs
|
||||
as obj_addr_pair)
|
||||
as obj_addr_pair)
|
||||
|
||||
|
||||
@resource_extend.has_resource_extenders
|
||||
|
@ -21,7 +21,8 @@ class DataPlaneStatusMixin(object):
|
||||
"""Mixin class to add data plane status to a port"""
|
||||
|
||||
def _process_create_port_data_plane_status(self, context, data, res):
|
||||
obj = dps_obj.PortDataPlaneStatus(context, port_id=res['id'],
|
||||
obj = dps_obj.PortDataPlaneStatus(
|
||||
context, port_id=res['id'],
|
||||
data_plane_status=data[dps_lib.DATA_PLANE_STATUS])
|
||||
obj.create()
|
||||
res[dps_lib.DATA_PLANE_STATUS] = data[dps_lib.DATA_PLANE_STATUS]
|
||||
|
@ -153,7 +153,7 @@ class DbBasePluginCommon(common_db_mixin.CommonDbMixin):
|
||||
for pool in subnet.allocation_pools]
|
||||
res['host_routes'] = [{'destination': str(route.destination),
|
||||
'nexthop': str(route.nexthop)}
|
||||
for route in subnet.host_routes]
|
||||
for route in subnet.host_routes]
|
||||
res['dns_nameservers'] = [str(dns.address)
|
||||
for dns in subnet.dns_nameservers]
|
||||
res['shared'] = subnet.shared
|
||||
|
@ -681,7 +681,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
||||
l3plugin = directory.get_plugin(plugin_constants.L3)
|
||||
if l3plugin:
|
||||
gw_ports = self._get_router_gw_ports_by_network(context,
|
||||
network['id'])
|
||||
network['id'])
|
||||
router_ids = [p.device_id for p in gw_ports]
|
||||
for id in router_ids:
|
||||
try:
|
||||
@ -711,8 +711,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
||||
return
|
||||
external_gateway_info['external_fixed_ips'].append(
|
||||
{'subnet_id': subnet['id']})
|
||||
info = {'router': {'external_gateway_info':
|
||||
external_gateway_info}}
|
||||
info = {'router': {'external_gateway_info': external_gateway_info}}
|
||||
l3plugin.update_router(context, router_id, info)
|
||||
|
||||
@db_api.retry_if_session_inactive()
|
||||
@ -724,8 +723,8 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
||||
# If this subnet supports auto-addressing, then update any
|
||||
# internal ports on the network with addresses for this subnet.
|
||||
if ipv6_utils.is_auto_address_subnet(result):
|
||||
updated_ports = self.ipam.add_auto_addrs_on_network_ports(context,
|
||||
result, ipam_subnet)
|
||||
updated_ports = self.ipam.add_auto_addrs_on_network_ports(
|
||||
context, result, ipam_subnet)
|
||||
for port_id in updated_ports:
|
||||
port_info = {'port': {'id': port_id}}
|
||||
try:
|
||||
@ -1336,7 +1335,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
for port in port_data:
|
||||
raw_mac_address = port.pop('mac_address',
|
||||
constants.ATTR_NOT_SPECIFIED)
|
||||
constants.ATTR_NOT_SPECIFIED)
|
||||
if raw_mac_address is constants.ATTR_NOT_SPECIFIED:
|
||||
raw_mac_address = macs.pop()
|
||||
eui_mac_address = netaddr.EUI(raw_mac_address, 48)
|
||||
@ -1379,12 +1378,12 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
||||
self._enforce_device_owner_not_router_intf_or_device_id(
|
||||
context, pdata.get('device_owner'),
|
||||
pdata.get('device_id'), pdata.get('tenant_id'))
|
||||
bulk_port_data.append(dict(project_id=pdata.get('project_id'),
|
||||
bulk_port_data.append(dict(
|
||||
project_id=pdata.get('project_id'),
|
||||
name=pdata.get('name'),
|
||||
network_id=pdata.get('network_id'),
|
||||
admin_state_up=pdata.get('admin_state_up'),
|
||||
status=pdata.get('status',
|
||||
constants.PORT_STATUS_ACTIVE),
|
||||
status=pdata.get('status', constants.PORT_STATUS_ACTIVE),
|
||||
mac_address=pdata.get('mac_address'),
|
||||
device_id=pdata.get('device_id'),
|
||||
device_owner=pdata.get('device_owner'),
|
||||
|
@ -92,7 +92,8 @@ class DNSDbMixin(object):
|
||||
context, floatingip_data, req_data))
|
||||
dns_actions_data = None
|
||||
if current_dns_name and current_dns_domain:
|
||||
fip_obj.FloatingIPDNS(context,
|
||||
fip_obj.FloatingIPDNS(
|
||||
context,
|
||||
floatingip_id=floatingip_data['id'],
|
||||
dns_name=req_data[dns_apidef.DNSNAME],
|
||||
dns_domain=req_data[dns_apidef.DNSDOMAIN],
|
||||
@ -149,7 +150,8 @@ class DNSDbMixin(object):
|
||||
else:
|
||||
return
|
||||
if current_dns_name and current_dns_domain:
|
||||
fip_obj.FloatingIPDNS(context,
|
||||
fip_obj.FloatingIPDNS(
|
||||
context,
|
||||
floatingip_id=floatingip_data['id'],
|
||||
dns_name='',
|
||||
dns_domain='',
|
||||
|
@ -143,7 +143,7 @@ class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
|
||||
def _confirm_router_interface_not_in_use(self, context, router_id,
|
||||
subnet_id):
|
||||
super(ExtraRoute_dbonly_mixin,
|
||||
self)._confirm_router_interface_not_in_use(
|
||||
self)._confirm_router_interface_not_in_use(
|
||||
context, router_id, subnet_id)
|
||||
subnet = self._core_plugin.get_subnet(context, subnet_id)
|
||||
subnet_cidr = netaddr.IPNetwork(subnet['cidr'])
|
||||
|
@ -222,8 +222,8 @@ class FlavorsDbMixin(common_db_mixin.CommonDbMixin):
|
||||
marker=None, page_reverse=False):
|
||||
"""From flavor, choose service profile and find provider for driver."""
|
||||
|
||||
objs = obj_flavor.FlavorServiceProfileBinding.get_objects(context,
|
||||
flavor_id=flavor_id)
|
||||
objs = obj_flavor.FlavorServiceProfileBinding.get_objects(
|
||||
context, flavor_id=flavor_id)
|
||||
if not objs:
|
||||
raise flav_exc.FlavorServiceProfileBindingNotFound(
|
||||
sp_id='', fl_id=flavor_id)
|
||||
|
@ -125,7 +125,8 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
||||
if _combine(route) == route_str:
|
||||
route.delete()
|
||||
for route_str in new_route_set - old_route_set:
|
||||
route = subnet_obj.Route(context,
|
||||
route = subnet_obj.Route(
|
||||
context,
|
||||
destination=common_utils.AuthenticIPNetwork(
|
||||
route_str.partition("_")[0]),
|
||||
nexthop=netaddr.IPAddress(route_str.partition("_")[2]),
|
||||
|
@ -50,7 +50,7 @@ def get_ip_update_not_allowed_device_owner_list():
|
||||
def is_neutron_built_in_router(context, router_id):
|
||||
l3plugin = directory.get_plugin(plugin_consts.L3)
|
||||
return bool(l3plugin and
|
||||
l3plugin.router_supports_scheduling(context, router_id))
|
||||
l3plugin.router_supports_scheduling(context, router_id))
|
||||
|
||||
|
||||
class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
|
||||
@ -437,8 +437,8 @@ class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
|
||||
port_copy = copy.deepcopy(original)
|
||||
port_copy.update(new_port)
|
||||
port_copy['fixed_ips'] = auto_assign_subnets
|
||||
self.allocate_ips_for_port_and_store(context,
|
||||
{'port': port_copy}, port_copy['id'])
|
||||
self.allocate_ips_for_port_and_store(
|
||||
context, {'port': port_copy}, port_copy['id'])
|
||||
|
||||
getattr(db_port, 'fixed_ips') # refresh relationship before return
|
||||
|
||||
|
@ -121,7 +121,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
||||
router_id = router['id']
|
||||
agent_id = agent['id']
|
||||
bindings = rb_obj.RouterL3AgentBinding.get_objects(context,
|
||||
router_id=router_id)
|
||||
router_id=router_id)
|
||||
if not bindings:
|
||||
return True
|
||||
for binding in bindings:
|
||||
@ -213,7 +213,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
||||
# the routers should be retained. This flag will be used
|
||||
# to check if there are valid routers in this agent.
|
||||
retain_router = self._check_router_retain_needed(context, router,
|
||||
agent.host)
|
||||
agent.host)
|
||||
if retain_router:
|
||||
l3_notifier.routers_updated_on_host(
|
||||
context, [router_id], agent.host)
|
||||
@ -262,8 +262,8 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
||||
new_hosts = [agent['host'] for agent in new_agents]
|
||||
router = self.get_router(context, router_id)
|
||||
for host in set(old_hosts) - set(new_hosts):
|
||||
retain_router = self._check_router_retain_needed(context,
|
||||
router, host)
|
||||
retain_router = self._check_router_retain_needed(
|
||||
context, router, host)
|
||||
if retain_router:
|
||||
l3_notifier.routers_updated_on_host(
|
||||
context, [router_id], host)
|
||||
@ -362,9 +362,10 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
||||
record_objs = rb_obj.RouterL3AgentBinding.get_objects(
|
||||
context, router_id=router_ids)
|
||||
if admin_state_up is not None:
|
||||
l3_agents = ag_obj.Agent.get_objects(context,
|
||||
id=[obj.l3_agent_id for obj in record_objs],
|
||||
admin_state_up=admin_state_up)
|
||||
l3_agents = ag_obj.Agent.get_objects(
|
||||
context,
|
||||
id=[obj.l3_agent_id for obj in record_objs],
|
||||
admin_state_up=admin_state_up)
|
||||
else:
|
||||
l3_agents = [
|
||||
ag_obj.Agent.get_object(context, id=obj.l3_agent_id)
|
||||
|
@ -471,8 +471,9 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
||||
def _create_gw_port(self, context, router_id, router, new_network_id,
|
||||
ext_ips):
|
||||
new_valid_gw_port_attachment = (
|
||||
new_network_id and (not router.gw_port or
|
||||
router.gw_port['network_id'] != new_network_id))
|
||||
new_network_id and
|
||||
(not router.gw_port or
|
||||
router.gw_port['network_id'] != new_network_id))
|
||||
if new_valid_gw_port_attachment:
|
||||
subnets = self._core_plugin.get_subnets_by_network(context,
|
||||
new_network_id)
|
||||
@ -840,9 +841,11 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
||||
if port:
|
||||
fixed_ips = list(map(dict, port['port']['fixed_ips']))
|
||||
fixed_ips.append(fixed_ip)
|
||||
return self._core_plugin.update_port(context,
|
||||
port['port_id'], {'port':
|
||||
{'fixed_ips': fixed_ips}}), [subnet], False
|
||||
return (self._core_plugin.update_port(
|
||||
context, port['port_id'],
|
||||
{'port': {'fixed_ips': fixed_ips}}),
|
||||
[subnet],
|
||||
False)
|
||||
|
||||
port_data = {'tenant_id': router.tenant_id,
|
||||
'network_id': subnet['network_id'],
|
||||
@ -1041,9 +1044,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
||||
# multiple prefix port - delete prefix from port
|
||||
fixed_ips = [dict(fip) for fip in p['fixed_ips']
|
||||
if fip['subnet_id'] != subnet_id]
|
||||
self._core_plugin.update_port(context, p['id'],
|
||||
{'port':
|
||||
{'fixed_ips': fixed_ips}})
|
||||
self._core_plugin.update_port(
|
||||
context, p['id'], {'port': {'fixed_ips': fixed_ips}})
|
||||
return (p, [subnet])
|
||||
elif subnet_id in port_subnets:
|
||||
# only one subnet on port - delete the port
|
||||
@ -1108,9 +1110,9 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
||||
def _make_floatingip_dict(self, floatingip, fields=None,
|
||||
process_extensions=True):
|
||||
floating_ip_address = (str(floatingip.floating_ip_address)
|
||||
if floatingip.floating_ip_address else None)
|
||||
if floatingip.floating_ip_address else None)
|
||||
fixed_ip_address = (str(floatingip.fixed_ip_address)
|
||||
if floatingip.fixed_ip_address else None)
|
||||
if floatingip.fixed_ip_address else None)
|
||||
res = {'id': floatingip.id,
|
||||
'tenant_id': floatingip.project_id,
|
||||
'floating_ip_address': floating_ip_address,
|
||||
@ -1133,8 +1135,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
||||
internal_subnet_id,
|
||||
external_network_id):
|
||||
subnet = self._core_plugin.get_subnet(context, internal_subnet_id)
|
||||
return self.get_router_for_floatingip(context,
|
||||
internal_port, subnet, external_network_id)
|
||||
return self.get_router_for_floatingip(
|
||||
context, internal_port, subnet, external_network_id)
|
||||
|
||||
# NOTE(yamamoto): This method is an override point for plugins
|
||||
# inheriting this class. Do not optimize this out.
|
||||
@ -1282,7 +1284,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
||||
fixed_ip_address=netaddr.IPAddress(internal_ip_address))
|
||||
if fip_exists:
|
||||
floating_ip_address = (str(floatingip_obj.floating_ip_address)
|
||||
if floatingip_obj.floating_ip_address else None)
|
||||
if floatingip_obj.floating_ip_address
|
||||
else None)
|
||||
raise l3_exc.FloatingIPPortAlreadyAssociated(
|
||||
port_id=fip['port_id'],
|
||||
fip_id=floatingip_obj.id,
|
||||
@ -1319,7 +1322,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
||||
if 'description' in fip:
|
||||
floatingip_obj.description = fip['description']
|
||||
floating_ip_address = (str(floatingip_obj.floating_ip_address)
|
||||
if floatingip_obj.floating_ip_address else None)
|
||||
if floatingip_obj.floating_ip_address else None)
|
||||
return {'fixed_ip_address': internal_ip_address,
|
||||
'fixed_port_id': port_id,
|
||||
'router_id': router_id,
|
||||
@ -1334,7 +1337,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
||||
return any(s.ip_version == 4 for s in net.subnets)
|
||||
|
||||
def _create_floatingip(self, context, floatingip,
|
||||
initial_status=constants.FLOATINGIP_STATUS_ACTIVE):
|
||||
initial_status=constants.FLOATINGIP_STATUS_ACTIVE):
|
||||
try:
|
||||
registry.publish(resources.FLOATING_IP, events.BEFORE_CREATE,
|
||||
self, payload=events.DBEventPayload(
|
||||
@ -1444,7 +1447,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
||||
|
||||
@db_api.retry_if_session_inactive()
|
||||
def create_floatingip(self, context, floatingip,
|
||||
initial_status=constants.FLOATINGIP_STATUS_ACTIVE):
|
||||
initial_status=constants.FLOATINGIP_STATUS_ACTIVE):
|
||||
return self._create_floatingip(context, floatingip, initial_status)
|
||||
|
||||
def _update_floatingip(self, context, id, floatingip):
|
||||
@ -2053,7 +2056,7 @@ class L3_NAT_db_mixin(L3_NAT_dbonly_mixin, L3RpcNotifierMixin):
|
||||
return router_interface_info
|
||||
|
||||
def create_floatingip(self, context, floatingip,
|
||||
initial_status=constants.FLOATINGIP_STATUS_ACTIVE):
|
||||
initial_status=constants.FLOATINGIP_STATUS_ACTIVE):
|
||||
floatingip_dict = super(L3_NAT_db_mixin, self).create_floatingip(
|
||||
context, floatingip, initial_status)
|
||||
router_id = floatingip_dict['router_id']
|
||||
|
@ -711,7 +711,7 @@ class _DVRAgentInterfaceMixin(object):
|
||||
# Collect gw ports only if available
|
||||
if gw_port_id and gw_ports.get(gw_port_id):
|
||||
l3_agent = ag_obj.Agent.get_object(context,
|
||||
id=binding_objs[0].l3_agent_id)
|
||||
id=binding_objs[0].l3_agent_id)
|
||||
return l3_agent.host
|
||||
|
||||
def _build_routers_list(self, context, routers, gw_ports):
|
||||
@ -810,7 +810,7 @@ class _DVRAgentInterfaceMixin(object):
|
||||
|
||||
@log_helper.log_method_call
|
||||
def _get_dvr_sync_data(self, context, host, agent, router_ids=None,
|
||||
active=None):
|
||||
active=None):
|
||||
routers, interfaces, floating_ips = self._get_router_info_list(
|
||||
context, router_ids=router_ids, active=active,
|
||||
device_owners=const.ROUTER_INTERFACE_OWNERS)
|
||||
@ -1098,7 +1098,7 @@ class _DVRAgentInterfaceMixin(object):
|
||||
def _get_address_pair_active_port_with_fip(
|
||||
self, context, port_dict, port_addr_pair_ip):
|
||||
port_valid_state = (port_dict['admin_state_up'] or
|
||||
(port_dict['status'] == const.PORT_STATUS_ACTIVE))
|
||||
port_dict['status'] == const.PORT_STATUS_ACTIVE)
|
||||
if not port_valid_state:
|
||||
return
|
||||
fips = l3_obj.FloatingIP.get_objects(
|
||||
|
@ -125,7 +125,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
|
||||
for agent in snat_agent_list:
|
||||
LOG.debug('DVR: Handle new unbound migration port, '
|
||||
'host %(host)s, router_ids %(router_ids)s',
|
||||
{'host': agent.host, 'router_ids': router_ids})
|
||||
{'host': agent.host, 'router_ids': router_ids})
|
||||
self.l3_rpc_notifier.routers_updated_on_host(
|
||||
context, router_ids, agent.host)
|
||||
if agent.host == port_host:
|
||||
@ -400,7 +400,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
|
||||
|
||||
def _get_router_ids_for_agent(self, context, agent_db, router_ids):
|
||||
result_set = set(super(L3_DVRsch_db_mixin,
|
||||
self)._get_router_ids_for_agent(
|
||||
self)._get_router_ids_for_agent(
|
||||
context, agent_db, router_ids))
|
||||
router_ids = set(router_ids or [])
|
||||
if router_ids and result_set == router_ids:
|
||||
|
@ -63,7 +63,7 @@ class L3_NAT_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
|
||||
old_router = self._make_router_dict(router)
|
||||
router.enable_snat = self._get_enable_snat(info)
|
||||
router_body = {l3_apidef.ROUTER:
|
||||
{l3_apidef.EXTERNAL_GW_INFO: info}}
|
||||
{l3_apidef.EXTERNAL_GW_INFO: info}}
|
||||
registry.publish(resources.ROUTER, events.PRECOMMIT_UPDATE, self,
|
||||
payload=events.DBEventPayload(
|
||||
context, request_body=router_body,
|
||||
|
@ -144,8 +144,8 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
||||
"%(ha_vr_id)d.",
|
||||
{'router_id': router_id, 'ha_vr_id': allocation.vr_id})
|
||||
router_body = {l3_apidef.ROUTER:
|
||||
{l3_ext_ha_apidef.HA_INFO: True,
|
||||
'ha_vr_id': allocation.vr_id}}
|
||||
{l3_ext_ha_apidef.HA_INFO: True,
|
||||
'ha_vr_id': allocation.vr_id}}
|
||||
registry.publish(resources.ROUTER, events.PRECOMMIT_UPDATE,
|
||||
self, payload=events.DBEventPayload(
|
||||
context, request_body=router_body,
|
||||
@ -234,9 +234,11 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
||||
def get_number_of_agents_for_scheduling(self, context):
|
||||
"""Return number of agents on which the router will be scheduled."""
|
||||
|
||||
num_agents = len(self.get_l3_agents(context, active=True,
|
||||
filters={'agent_modes': [constants.L3_AGENT_MODE_LEGACY,
|
||||
constants.L3_AGENT_MODE_DVR_SNAT]}))
|
||||
num_agents = len(
|
||||
self.get_l3_agents(
|
||||
context, active=True,
|
||||
filters={'agent_modes': [constants.L3_AGENT_MODE_LEGACY,
|
||||
constants.L3_AGENT_MODE_DVR_SNAT]}))
|
||||
max_agents = cfg.CONF.max_l3_agents_per_router
|
||||
if max_agents:
|
||||
if max_agents > num_agents:
|
||||
@ -257,8 +259,8 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
||||
port_id=port_id,
|
||||
router_id=router_id,
|
||||
port_type=constants.DEVICE_OWNER_ROUTER_HA_INTF).create()
|
||||
portbinding = l3_hamode.L3HARouterAgentPortBinding(context,
|
||||
port_id=port_id, router_id=router_id)
|
||||
portbinding = l3_hamode.L3HARouterAgentPortBinding(
|
||||
context, port_id=port_id, router_id=router_id)
|
||||
portbinding.create()
|
||||
|
||||
return portbinding
|
||||
@ -671,8 +673,8 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
||||
sync_data = self._get_dvr_sync_data(context, host, agent,
|
||||
router_ids, active)
|
||||
else:
|
||||
sync_data = super(L3_HA_NAT_db_mixin, self).get_sync_data(context,
|
||||
router_ids, active)
|
||||
sync_data = super(L3_HA_NAT_db_mixin, self).get_sync_data(
|
||||
context, router_ids, active)
|
||||
return self._process_sync_ha_data(
|
||||
context, sync_data, host, dvr_agent_mode)
|
||||
|
||||
@ -703,7 +705,8 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
||||
constants.DEVICE_OWNER_ROUTER_SNAT,
|
||||
constants.DEVICE_OWNER_ROUTER_GW]}
|
||||
ports = self._core_plugin.get_ports(admin_ctx, filters=device_filter)
|
||||
active_ports = (port for port in ports
|
||||
active_ports = (
|
||||
port for port in ports
|
||||
if states[port['device_id']] == n_const.HA_ROUTER_STATE_ACTIVE)
|
||||
|
||||
for port in active_ports:
|
||||
|
@ -196,7 +196,8 @@ def upgrade():
|
||||
sa.ForeignKeyConstraint(['router_id'], ['routers.id']),
|
||||
sa.PrimaryKeyConstraint('router_id'))
|
||||
|
||||
op.create_table('cisco_hosting_devices',
|
||||
op.create_table(
|
||||
'cisco_hosting_devices',
|
||||
sa.Column('tenant_id', sa.String(length=255), nullable=True,
|
||||
index=True),
|
||||
sa.Column('id', sa.String(length=36), nullable=False),
|
||||
@ -213,7 +214,8 @@ def upgrade():
|
||||
ondelete='SET NULL'),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_table('cisco_port_mappings',
|
||||
op.create_table(
|
||||
'cisco_port_mappings',
|
||||
sa.Column('logical_resource_id', sa.String(length=36), nullable=False),
|
||||
sa.Column('logical_port_id', sa.String(length=36), nullable=False),
|
||||
sa.Column('port_type', sa.String(length=32), nullable=True),
|
||||
@ -227,7 +229,8 @@ def upgrade():
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('logical_resource_id', 'logical_port_id')
|
||||
)
|
||||
op.create_table('cisco_router_mappings',
|
||||
op.create_table(
|
||||
'cisco_router_mappings',
|
||||
sa.Column('router_id', sa.String(length=36), nullable=False),
|
||||
sa.Column('auto_schedule', sa.Boolean(), nullable=False),
|
||||
sa.Column('hosting_device_id', sa.String(length=36), nullable=True),
|
||||
|
@ -138,7 +138,7 @@ def upgrade():
|
||||
sa.Column('network_id', sa.String(length=36), nullable=False),
|
||||
sa.Column('dhcp_agent_id', sa.String(length=36), nullable=False),
|
||||
sa.ForeignKeyConstraint(['dhcp_agent_id'], ['agents.id'],
|
||||
ondelete='CASCADE'),
|
||||
ondelete='CASCADE'),
|
||||
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
|
||||
ondelete='CASCADE'),
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('network_id', 'dhcp_agent_id'))
|
||||
|
@ -46,7 +46,7 @@ def contract_creation_exceptions():
|
||||
def upgrade():
|
||||
op.add_column(ROUTER_L3_AGENT_BINDING,
|
||||
sa.Column('binding_index', sa.Integer(), nullable=False,
|
||||
server_default='1'))
|
||||
server_default='1'))
|
||||
|
||||
bindings_table = sa.Table(
|
||||
ROUTER_L3_AGENT_BINDING,
|
||||
|
@ -25,7 +25,7 @@ down_revision = '89ab9a816d70'
|
||||
|
||||
def upgrade():
|
||||
op.add_column('subnets',
|
||||
sa.Column('segment_id', sa.String(length=36), nullable=True))
|
||||
sa.Column('segment_id', sa.String(length=36), nullable=True))
|
||||
op.create_foreign_key(
|
||||
None, 'subnets', 'networksegments', ['segment_id'], ['id'])
|
||||
|
||||
|
@ -43,6 +43,7 @@ def upgrade():
|
||||
constants.INGRESS_DIRECTION,
|
||||
name='directions'),
|
||||
nullable=False, server_default=constants.EGRESS_DIRECTION),
|
||||
sa.UniqueConstraint('qos_policy_id', 'direction',
|
||||
sa.UniqueConstraint(
|
||||
'qos_policy_id', 'direction',
|
||||
name='qos_minimum_bandwidth_rules0qos_policy_id0direction')
|
||||
)
|
||||
|
@ -22,7 +22,8 @@ down_revision = '45f8dd33480b'
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.create_table('trunks',
|
||||
op.create_table(
|
||||
'trunks',
|
||||
sa.Column('admin_state_up', sa.Boolean(),
|
||||
nullable=False, server_default=sql.true()),
|
||||
sa.Column('tenant_id', sa.String(length=255), nullable=True,
|
||||
@ -42,7 +43,8 @@ def upgrade():
|
||||
sa.UniqueConstraint('port_id'),
|
||||
sa.UniqueConstraint('standard_attr_id')
|
||||
)
|
||||
op.create_table('subports',
|
||||
op.create_table(
|
||||
'subports',
|
||||
sa.Column('port_id', sa.String(length=36)),
|
||||
sa.Column('trunk_id', sa.String(length=36), nullable=False),
|
||||
sa.Column('segmentation_type', sa.String(length=32), nullable=False),
|
||||
@ -51,6 +53,7 @@ def upgrade():
|
||||
sa.ForeignKeyConstraint(['trunk_id'], ['trunks.id'],
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('port_id'),
|
||||
sa.UniqueConstraint('trunk_id', 'segmentation_type', 'segmentation_id',
|
||||
sa.UniqueConstraint(
|
||||
'trunk_id', 'segmentation_type', 'segmentation_id',
|
||||
name='uniq_subport0trunk_id0segmentation_type0segmentation_id')
|
||||
)
|
||||
|
@ -30,7 +30,8 @@ down_revision = '030a959ceafa'
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.create_table('subnet_service_types',
|
||||
op.create_table(
|
||||
'subnet_service_types',
|
||||
sa.Column('subnet_id', sa.String(length=36)),
|
||||
sa.Column('service_type', sa.String(length=255)),
|
||||
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
|
||||
|
@ -43,13 +43,13 @@ def upgrade():
|
||||
|
||||
op.add_column(ML2_PORT_BINDING,
|
||||
sa.Column('status',
|
||||
sa.String(length=16),
|
||||
nullable=False,
|
||||
server_default=constants.ACTIVE))
|
||||
sa.String(length=16),
|
||||
nullable=False,
|
||||
server_default=constants.ACTIVE))
|
||||
|
||||
if (engine.name == MYSQL_ENGINE):
|
||||
op.execute("ALTER TABLE ml2_port_bindings DROP PRIMARY KEY,"
|
||||
"ADD PRIMARY KEY(port_id, host);")
|
||||
"ADD PRIMARY KEY(port_id, host);")
|
||||
else:
|
||||
inspector = insp.from_engine(bind)
|
||||
pk_constraint = inspector.get_pk_constraint(ML2_PORT_BINDING)
|
||||
|
@ -31,7 +31,8 @@ down_revision = '0ff9e3881597'
|
||||
|
||||
|
||||