Shared VxLAN (Part5: bulk create shadow port)

1. What is the problem?
VLAN network has some restrictions that VxLAN network doesn't have.
For more flexible networking deployment, we consider supporting
cross-pod VxLAN network.

We are going to use shadow agent/port mechanism to synchronize VTEP
information and make cross-pod VxLAN networking available, as discussed
in the specification document[1].

In the previous implementation, we use a loop to create shadow ports
one by one. When there are large numbers of shadow ports need to be
created, the large numbers of API requests will affect the performance.

2. What is the solution to the problem?
Use bulk creation API to create shadow port.

3. What features need to be implemented to the Tricircle
to realize the solution?
This is the fifth patch for cross-pod VxLAN networking support, which
introduces the following changes:

(1) Use bulk API to create shadow ports
(2) Do not create resource routing entries for shadow ports

[1] https://review.openstack.org/#/c/429155/

Change-Id: I8b2dc98d84385433727e55584c80e1054fce406f
This commit is contained in:
zhiyuan_cai 2017-03-09 14:51:24 +08:00
parent 0e5e2b7fc9
commit fde488c99e
8 changed files with 237 additions and 51 deletions

View File

@ -69,7 +69,9 @@ client_opts = [
help='cidr pool of the bridge network'),
cfg.StrOpt('cross_pod_vxlan_mode', default='p2p',
choices=['p2p', 'l2gw', 'noop'],
help='Cross-pod VxLAN networking support mode')
help='Cross-pod VxLAN networking support mode'),
cfg.IntOpt('max_shadow_port_bulk_size', default=100,
help='max bulk size to create shadow ports')
]
client_opt_group = cfg.OptGroup('client')
cfg.CONF.register_group(client_opt_group)

View File

@ -733,10 +733,12 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
# we use a "delete_server_port" job to delete the local ports.
if port.get('device_owner') not in NON_VM_PORT_TYPES:
try:
for pod, bottom_port_id in (
self.helper.get_real_shadow_resource_iterator(
t_ctx, t_constants.RT_PORT, port_id)):
self.xjob_handler.delete_server_port(t_ctx, bottom_port_id,
# since we don't create resource routing entries for shadow
# ports, we traverse pods where the network is located to
# delete ports
for pod, _id in self.helper.get_real_shadow_resource_iterator(
t_ctx, t_constants.RT_NETWORK, port['network_id']):
self.xjob_handler.delete_server_port(t_ctx, port_id,
pod['pod_id'])
except Exception:
raise
@ -1166,13 +1168,13 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
self._delete_top_bridge_resource(t_ctx, q_ctx, t_constants.RT_PORT,
bridge_port_id, bridge_port_name)
def _delete_shadow_bridge_port(self, t_ctx, bridge_port_id):
for pod, b_port_id in db_api.get_bottom_mappings_by_top_id(
t_ctx, bridge_port_id, t_constants.RT_SD_PORT):
def _delete_shadow_bridge_port(self, t_ctx, bridge_port):
mappings = db_api.get_bottom_mappings_by_top_id(
t_ctx, bridge_port['network_id'], t_constants.RT_NETWORK)
for pod, _id in mappings:
region_name = pod['region_name']
self._get_client(region_name).delete_ports(t_ctx, b_port_id)
db_api.delete_mappings_by_top_id(t_ctx, bridge_port_id,
pod_id=pod['pod_id'])
self._get_client(region_name).delete_ports(t_ctx,
bridge_port['id'])
def delete_router(self, context, _id):
router = super(TricirclePlugin,
@ -1197,7 +1199,8 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
if bridge_ports:
# we will not create bridge ports for local router, so here no
# need to check "is_local_router" again
t_bridge_port_id = bridge_ports[0]['id']
t_bridge_port = bridge_ports[0]
t_bridge_port_id = t_bridge_port['id']
if not is_ns:
b_client.action_routers(t_ctx, 'remove_gateway',
@ -1215,7 +1218,7 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
pass
raise
self._delete_shadow_bridge_port(t_ctx, t_bridge_port_id)
self._delete_shadow_bridge_port(t_ctx, t_bridge_port)
self._delete_top_bridge_port(t_ctx, context, t_bridge_port_id,
bridge_port_name)
b_client.delete_routers(t_ctx, b_router_id)

View File

@ -15,8 +15,11 @@
import copy
import netaddr
import re
import six
from six.moves import xrange
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import provider_net
from neutron_lib import constants
import neutronclient.common.exceptions as q_cli_exceptions
@ -37,7 +40,6 @@ AZ_HINTS = 'availability_zone_hints'
EXTERNAL = 'router:external' # neutron.extensions.external_net.EXTERNAL
TYPE_VLAN = 'vlan' # neutron.plugins.common.constants.TYPE_VLAN
TYPE_VXLAN = 'vxlan' # neutron.plugins.common.constants.TYPE_VXLAN
VIF_TYPE_OVS = 'ovs' # neutron.extensions.portbindings.VIF_TYPE_OVS
OVS_AGENT_DATA_TEMPLATE = {
'agent_type': None,
@ -64,7 +66,7 @@ OVS_AGENT_DATA_TEMPLATE = {
'bridge_mappings': {}}}
VIF_AGENT_TYPE_MAP = {
VIF_TYPE_OVS: constants.AGENT_TYPE_OVS}
portbindings.VIF_TYPE_OVS: constants.AGENT_TYPE_OVS}
AGENT_DATA_TEMPLATE_MAP = {
constants.AGENT_TYPE_OVS: OVS_AGENT_DATA_TEMPLATE}
@ -73,6 +75,8 @@ TUNNEL_IP_HANDLE_MAP = {
constants.AGENT_TYPE_OVS: lambda agent: agent[
'configurations']['tunneling_ip']}
MAC_PATTERN = re.compile('([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}')
class NetworkHelper(object):
def __init__(self, call_obj=None):
@ -584,7 +588,7 @@ class NetworkHelper(object):
'ip_address': port['fixed_ips'][0]['ip_address']}
],
'mac_address': port['mac_address'],
'binding:profile': {},
portbindings.PROFILE: {},
'device_id': 'reserved_dhcp_port',
'device_owner': 'network:dhcp',
}
@ -609,7 +613,7 @@ class NetworkHelper(object):
'admin_state_up': True,
'network_id': t_net_id,
'name': t_dhcp_name,
'binding:profile': {},
portbindings.PROFILE: {},
'device_id': 'reserved_dhcp_port',
'device_owner': 'network:dhcp',
}
@ -792,6 +796,88 @@ class NetworkHelper(object):
db_api.ensure_agent_exists(t_ctx, pod['pod_id'], agent_host,
agent_type, agent_tunnel)
@staticmethod
def fill_binding_info(port_body):
agent_type = port_body[portbindings.PROFILE]
# TODO(zhiyuan) support other agent types
if agent_type == constants.AGENT_TYPE_OVS:
port_body[portbindings.VIF_DETAILS] = {'port_filter': True,
'ovs_hybrid_plug': True}
port_body[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_OVS
port_body[portbindings.VNIC_TYPE] = portbindings.VNIC_NORMAL
def _prepare_shadow_ports_with_retry(self, ctx, client, req_create_bodys):
create_body_map = dict(
[(create_body['mac_address'],
create_body) for create_body in req_create_bodys])
max_tries = 5
conflict_port_ids = []
for i in xrange(max_tries):
create_bodys = list(create_body_map.values())
if not create_bodys:
ret_ports = []
break
try:
ret_ports = client.create_ports(ctx, {'ports': create_bodys})
break
except q_cli_exceptions.MacAddressInUseClient as e:
if i == max_tries - 1:
# we fail in the last try, just raise exception
raise
match = MAC_PATTERN.search(e.message)
if match:
conflict_mac = match.group()
if conflict_mac not in create_body_map:
# rare case, we conflicted with an unrecognized mac
raise
conflict_port = create_body_map.pop(conflict_mac)
conflict_port_ids.append(
conflict_port['name'].split('_')[-1])
else:
# the exception no longer contains mac information
raise
ret_port_ids = [ret_port['id'] for ret_port in ret_ports]
ret_port_ids.extend(conflict_port_ids)
return ret_port_ids
def prepare_shadow_ports(self, ctx, project_id, target_pod, net_id,
port_bodys, agents, max_bulk_size):
if not port_bodys:
return []
full_create_bodys = []
for port_body, agent in zip(port_bodys, agents):
host = port_body[portbindings.HOST_ID]
create_body = {
'port': {
'tenant_id': project_id,
'admin_state_up': True,
'name': t_constants.shadow_port_name % port_body['id'],
'network_id': net_id,
'fixed_ips': [{
'ip_address': port_body[
'fixed_ips'][0]['ip_address']}],
'mac_address': port_body['mac_address'],
'device_owner': t_constants.DEVICE_OWNER_SHADOW,
portbindings.HOST_ID: host
}
}
if agent:
create_body['port'].update(
{portbindings.PROFILE: {
t_constants.PROFILE_AGENT_TYPE: agent['type'],
t_constants.PROFILE_TUNNEL_IP: agent['tunnel_ip']}})
full_create_bodys.append(create_body['port'])
cursor = 0
ret_port_ids = []
client = self._get_client(target_pod['region_name'])
while cursor < len(full_create_bodys):
ret_port_ids.extend(self._prepare_shadow_ports_with_retry(
ctx, client,
full_create_bodys[cursor: cursor + max_bulk_size]))
cursor += max_bulk_size
return ret_port_ids
def prepare_shadow_port(self, ctx, project_id, target_pod, net_id,
port_body, agent=None):
host = port_body['binding:host_id']

View File

@ -57,6 +57,9 @@ LOG = log.getLogger(__name__)
class TricirclePlugin(plugin.Ml2Plugin):
__native_bulk_support = True
def __init__(self):
super(TricirclePlugin, self).__init__()
core_plugins_namespace = 'neutron.core_plugins'
@ -423,6 +426,19 @@ class TricirclePlugin(plugin.Ml2Plugin):
t_subnet = self.neutron_handle.handle_get(t_ctx, 'subnet', subnet_id)
port['fixed_ips'][0]['ip_address'] = t_subnet['gateway_ip']
def create_port_bulk(self, context, ports):
# NOTE(zhiyuan) currently this bulk operation is only for shadow port
# creation optimization
for port in ports['ports']:
port_body = port['port']
self._create_shadow_agent(context, port_body)
self.get_network(context, port_body['network_id'])
port_body['id'] = port_body['name'].split('_')[-1]
helper.NetworkHelper.fill_binding_info(port_body)
# clear binding profile set by xmanager
port_body[portbindings.PROFILE] = {}
return self.core_plugin.create_port_bulk(context, ports)
def create_port(self, context, port):
port_body = port['port']
network_id = port_body['network_id']

View File

@ -485,6 +485,11 @@ class FakeClient(object):
reason=_("updated subnet id not found"))
def create_ports(self, ctx, body):
if 'ports' in body:
ret = []
for port in body['ports']:
ret.append(self.create_resources('port', ctx, {'port': port}))
return ret
return self.create_resources('port', ctx, body)
def list_ports(self, ctx, filters=None):
@ -533,7 +538,7 @@ class FakeClient(object):
'mac_address': '',
'device_id': router_id,
'device_owner': 'network:router_gateway',
'binding:vif_type': helper.VIF_TYPE_OVS,
'binding:vif_type': portbindings.VIF_TYPE_OVS,
'binding:host_id': host_id
}})
except q_exceptions.IpAddressInUseClient:
@ -1412,21 +1417,33 @@ class PluginTest(unittest.TestCase,
def test_delete_port(self, mock_client_method, mock_plugin_method,
mock_context_method):
self._basic_pod_route_setup()
self._basic_port_setup()
fake_plugin = FakePlugin()
neutron_context = FakeNeutronContext()
tricircle_context = context.get_db_context()
mock_context_method.return_value = tricircle_context
project_id = 'project_id'
fake_plugin.delete_port(neutron_context, 'top_id_0')
fake_plugin.delete_port(neutron_context, 'top_id_1')
(t_net_id, t_subnet_id,
b_net_id, b_subnet_id) = self._prepare_network_subnet(
project_id, tricircle_context, 'pod_1', 1)
t_port_id1, _ = self._prepare_port_test(
project_id, tricircle_context, 'pod_1', 1, t_net_id, b_net_id,
t_subnet_id, b_subnet_id)
t_port_id2, _ = self._prepare_port_test(
project_id, tricircle_context, 'pod_1', 2, t_net_id, b_net_id,
t_subnet_id, b_subnet_id)
calls = [mock.call(neutron_context, 'top_id_0'),
mock.call(neutron_context, 'top_id_1')]
mock_plugin_method.assert_has_calls(calls)
mock_client_method.assert_called_once_with(tricircle_context,
'bottom_id_1', 'pod_id_1')
fake_plugin.delete_port(neutron_context, t_port_id1)
fake_plugin.delete_port(neutron_context, t_port_id2)
plugin_calls = [mock.call(neutron_context, t_port_id1),
mock.call(neutron_context, t_port_id2)]
client_calls = [
mock.call(tricircle_context, t_port_id1, 'pod_id_1'),
mock.call(tricircle_context, t_port_id2, 'pod_id_1')]
mock_plugin_method.assert_has_calls(plugin_calls)
mock_client_method.assert_has_calls(client_calls)
@patch.object(context, 'get_context_from_neutron_context')
@patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'update_network')
@ -1768,7 +1785,8 @@ class PluginTest(unittest.TestCase,
ip_address = ''
for subnet in TOP_SUBNETS:
if subnet['id'] == t_subnet_id:
ip_address = subnet['cidr'].replace('.0/24', '.5')
ip_address = subnet['cidr'].replace('.0/24',
'.%d' % (index + 4))
t_port = {
'id': t_port_id,
@ -1951,6 +1969,7 @@ class PluginTest(unittest.TestCase,
'device_owner': 'compute:None',
'fixed_ips': [{'subnet_id': t_subnets[0]['id'],
'ip_address': '10.0.%d.%d' % (index, ip_suffix)}],
'mac_address': 'fa:16:3e:d4:%02x:%02x' % (index, ip_suffix),
'security_groups': [],
'tenant_id': project_id
}
@ -1963,6 +1982,7 @@ class PluginTest(unittest.TestCase,
'device_owner': 'compute:None',
'fixed_ips': [{'subnet_id': t_subnets[0]['id'],
'ip_address': '10.0.%d.%d' % (index, ip_suffix)}],
'mac_address': 'fa:16:3e:d4:%02x:%02x' % (index, ip_suffix),
'security_groups': [],
'tenant_id': project_id
}
@ -3189,7 +3209,8 @@ class PluginTest(unittest.TestCase,
t_ctx, t_port['fixed_ips'][0]['subnet_id'], constants.RT_SD_SUBNET)
cp_network_mappings = db_api.get_bottom_mappings_by_top_id(
t_ctx, t_port['network_id'], constants.RT_SD_NETWORK)
self.assertEqual(1, len(cp_port_mappings))
# no resource routing entry for shadow port
self.assertEqual(0, len(cp_port_mappings))
self.assertEqual(1, len(cp_subnet_mappings))
self.assertEqual(1, len(cp_network_mappings))
@ -3270,12 +3291,10 @@ class PluginTest(unittest.TestCase,
self.assertIsNone(TOP_FLOATINGIPS[0]['fixed_ip_address'])
self.assertIsNone(TOP_FLOATINGIPS[0]['router_id'])
# both creating floating ip and booting instance in vxlan network will
# create shadow port, so we leave shadow port deletion work to central
# plugin, it will delete shadow port when deleting instance port
# no resource routing entry for shadow port
cp_port_mappings = db_api.get_bottom_mappings_by_top_id(
t_ctx, t_port_id, constants.RT_SD_PORT)
self.assertEqual(1, len(cp_port_mappings))
self.assertEqual(0, len(cp_port_mappings))
@patch.object(directory, 'get_plugin', new=fake_get_plugin)
@patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
@ -3517,7 +3536,7 @@ class PluginTest(unittest.TestCase,
t_port_id1, b_port_id1 = self._prepare_port(
TEST_TENANT_ID, t_ctx, 'pod_1', 1,
{'binding:host_id': 'host1',
'binding:vif_type': helper.VIF_TYPE_OVS})
'binding:vif_type': portbindings.VIF_TYPE_OVS})
update_body = {'port': {
'binding:profile': {
constants.PROFILE_REGION: 'pod_1',
@ -3530,7 +3549,7 @@ class PluginTest(unittest.TestCase,
t_port_id2, b_port_id2 = self._prepare_port(
TEST_TENANT_ID, t_ctx, 'pod_2', 1,
{'binding:host_id': 'host2',
'binding:vif_type': helper.VIF_TYPE_OVS})
'binding:vif_type': portbindings.VIF_TYPE_OVS})
update_body = {'port': {
'binding:profile': {
constants.PROFILE_REGION: 'pod_2',
@ -3541,11 +3560,13 @@ class PluginTest(unittest.TestCase,
fake_plugin.update_port(q_ctx, t_port_id2, update_body)
# shadow port is created
b_sd_port_id1 = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, t_port_id1, 'pod_2', constants.RT_SD_PORT)
client = FakeClient('pod_2')
b_sd_port1 = client.list_ports(
t_ctx, [{'key': 'name', 'comparator': 'eq',
'value': constants.shadow_port_name % t_port_id1}])[0]
# shadow port is updated to active
mock_update.assert_called_once_with(
t_ctx, b_sd_port_id1, {'port': {
t_ctx, b_sd_port1['id'], {'port': {
'binding:profile': {constants.PROFILE_FORCE_UP: 'True'}}})
# asynchronous job in pod_1 is registered
mock_setup.assert_called_once_with(t_ctx, 'pod_id_1', t_net_id)

View File

@ -13,14 +13,30 @@
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
import six
import unittest
import neutronclient.common.exceptions as q_cli_exceptions
from oslo_utils import uuidutils
from tricircle.network import helper
class FakeClient(object):
def __init__(self, region_name=None):
pass
def create_ports(self, context, body):
for port in body['ports']:
index = int(port['name'].split('-')[-1])
if index in (1, 3, 6, 7, 8, 14, 19):
raise q_cli_exceptions.MacAddressInUseClient(
message='fa:16:3e:d4:01:%02x' % index)
port['id'] = port['name'].split('_')[-1]
return body['ports']
class HelperTest(unittest.TestCase):
def setUp(self):
self.helper = helper.NetworkHelper()
@ -88,3 +104,21 @@ class HelperTest(unittest.TestCase):
{'start': '10.0.1.6', 'end': '10.0.1.254'}],
body['subnet']['allocation_pools'])
self.assertEqual('10.0.1.5', body['subnet']['gateway_ip'])
@patch.object(helper.NetworkHelper, '_get_client', new=FakeClient)
def test_prepare_shadow_ports(self):
port_bodys = [{
'id': 'port-id-%d' % i,
'fixed_ips': [{'ip_address': '10.0.1.%d' % i}],
'mac_address': 'fa:16:3e:d4:01:%02x' % i,
'binding:host_id': 'host1'
} for i in range(1, 20)]
agents = [{'type': 'Open vSwitch agent',
'tunnel_ip': '192.168.1.101'} for _ in range(1, 20)]
# we just want to test the logic, so we pass None for context, a
# malformed dict for target_pod
ret_port_ids = self.helper.prepare_shadow_ports(
None, 'project_id', {'region_name': 'pod1'}, 'net-id-1',
port_bodys, agents, 5)
req_port_ids = [port['id'] for port in port_bodys]
six.assertCountEqual(self, ret_port_ids, req_port_ids)

View File

@ -131,6 +131,14 @@ class FakeClient(object):
if res['id'] == _id:
res.update(body[resource])
def create_ports(self, ctx, body):
if 'ports' in body:
ret = []
for port in body['ports']:
ret.append(self.create_resources('port', ctx, {'port': port}))
return ret
return self.create_resources('port', ctx, body)
def list_ports(self, cxt, filters=None):
return self.list_resources('port', cxt, filters)
@ -535,6 +543,7 @@ class XManagerTest(unittest.TestCase):
'device_owner': 'compute:None',
'binding:vif_type': 'ovs',
'binding:host_id': 'host1',
'mac_address': 'fa:16:3e:d4:01:03',
'fixed_ips': [{'subnet_id': subnet1_id,
'ip_address': '10.0.1.3'}]})
BOTTOM2_PORT.append({'id': port2_id,
@ -542,6 +551,7 @@ class XManagerTest(unittest.TestCase):
'device_owner': 'compute:None',
'binding:vif_type': 'ovs',
'binding:host_id': 'host2',
'mac_address': 'fa:16:3e:d4:01:03',
'fixed_ips': [{'subnet_id': subnet1_id,
'ip_address': '10.0.1.4'}]})
db_api.ensure_agent_exists(

View File

@ -429,9 +429,10 @@ class XManager(PeriodicTasks):
agent_type = self.helper.get_agent_type_by_vif(
b_int_port['binding:vif_type'])
agent = db_api.get_agent_by_host_type(ctx, host, agent_type)
self.helper.prepare_shadow_port(
max_bulk_size = CONF.client.max_shadow_port_bulk_size
self.helper.prepare_shadow_ports(
ctx, project_id, b_ext_pod, t_int_net_id,
b_int_port, agent)
[b_int_port], [agent], max_bulk_size)
# create routing entries for shadow network and subnet so we
# can easily find them during central network and subnet
@ -614,7 +615,10 @@ class XManager(PeriodicTasks):
if router_id == b_router_id:
continue
for cidr, ips in six.iteritems(cidr_ips_map):
if cidr in router_ips_map[b_router_id]:
if router_ips_map[b_router_id].get(cidr):
# if the ip list is not empty, meaning that there are
# already vm ports in the pod of b_router, so no need
# to add extra routes
continue
for ip in ips:
route = {'nexthop': router_ew_bridge_ip_map[router_id],
@ -924,11 +928,13 @@ class XManager(PeriodicTasks):
'value': b_net_id},
{'key': 'device_owner', 'comparator': 'eq',
'value': constants.DEVICE_OWNER_SHADOW},
{'key': 'status', 'comparator': 'eq',
'value': q_constants.PORT_STATUS_ACTIVE},
{'key': 'fields', 'comparator': 'eq',
'value': 'id'}])
'value': ['id', 'status']}])
b_sw_port_ids = set([port['id'] for port in b_sw_ports])
if b_pod['pod_id'] == target_pod['pod_id']:
b_down_sw_port_ids = set(
[port['id'] for port in b_sw_ports if (
port['status'] == q_constants.PORT_STATUS_DOWN)])
pod_sw_port_ids_map[b_pod['pod_id']] = b_sw_port_ids
# port table has (network_id, device_owner) index
b_ports = b_client.list_ports(
@ -937,7 +943,7 @@ class XManager(PeriodicTasks):
{'key': 'fields', 'comparator': 'eq',
'value': ['id', 'binding:vif_type',
'binding:host_id', 'fixed_ips',
'device_owner']}])
'device_owner', 'mac_address']}])
LOG.debug('Shadow ports %s in pod %s %s',
b_sw_ports, target_pod_id, run_label)
LOG.debug('Ports %s in pod %s %s',
@ -970,6 +976,8 @@ class XManager(PeriodicTasks):
LOG.debug('Sync pod ids %s %s', sync_pod_list, run_label)
agent_info_map = {}
port_bodys = []
agents = []
for port_id in sync_port_ids:
port_body = port_info_map[port_id]
host = port_body['binding:host_id']
@ -989,15 +997,21 @@ class XManager(PeriodicTasks):
'host': host})
continue
agent_info_map[key] = agent
port_bodys.append(port_body)
agents.append(agent)
sw_port_id = self.helper.prepare_shadow_port(
ctx, project_id, target_pod, t_net_id, port_body, agent)
# value for key constants.PROFILE_FORCE_UP does not matter
update_body = {
'port': {
'binding:profile': {constants.PROFILE_FORCE_UP: 'True'}
}
max_bulk_size = CONF.client.max_shadow_port_bulk_size
sw_port_ids = self.helper.prepare_shadow_ports(
ctx, project_id, target_pod, t_net_id, port_bodys, agents,
max_bulk_size)
b_down_sw_port_ids = b_down_sw_port_ids | set(sw_port_ids)
# value for key constants.PROFILE_FORCE_UP does not matter
update_body = {
'port': {
'binding:profile': {constants.PROFILE_FORCE_UP: 'True'}
}
}
for sw_port_id in b_down_sw_port_ids:
self._get_client(target_pod['region_name']).update_ports(
ctx, sw_port_id, update_body)