Browse Source

modify ml2 dirver to support neutron trunk port & subport.

Change-Id: I97f568ac49a518bea2b6d31b1ecc7d17c342269c
changes/45/493345/11
wei wang 4 years ago
parent
commit
0996ad1257
  1. 32
      networking_arista/common/db_lib.py
  2. 118
      networking_arista/ml2/mechanism_arista.py
  3. 109
      networking_arista/ml2/rpc/arista_eapi.py
  4. 82
      networking_arista/ml2/rpc/arista_json.py
  5. 6
      networking_arista/ml2/rpc/base.py
  6. 259
      networking_arista/tests/unit/ml2/rpc/test_arista_eapi_rpc_wrapper.py
  7. 228
      networking_arista/tests/unit/ml2/rpc/test_arista_json_rpc_wrapper.py
  8. 313
      networking_arista/tests/unit/ml2/test_mechanism_arista.py

32
networking_arista/common/db_lib.py

@ -19,9 +19,11 @@ from neutron_lib.plugins.ml2 import api as driver_api
import neutron.db.api as db
from neutron.db import db_base_plugin_v2
from neutron.db.models import segment as segment_models
from neutron.db import securitygroups_db as sec_db
from neutron.db import segments_db
from neutron.plugins.ml2 import models as ml2_models
from neutron.services.trunk import models as trunk_models
from networking_arista.common import db as db_models
@ -454,6 +456,36 @@ def get_port_binding_level(filters):
filter_by(**filters).all())
def get_network_segments_by_port_id(port_id):
session = db.get_reader_session()
with session.begin():
segments = (session.query(segment_models.NetworkSegment,
ml2_models.PortBindingLevel).
join(ml2_models.PortBindingLevel).
filter_by(port_id=port_id).all())
return [segment[0] for segment in segments]
def get_trunk_port_by_subport_id(subport_id):
"""Returns trunk parent port based on sub port id."""
session = db.get_reader_session()
with session.begin():
subport = (session.query(trunk_models.SubPort).
filter_by(port_id=subport_id).first())
if subport:
trunk_id = subport.trunk_id
return get_trunk_port_by_trunk_id(trunk_id)
def get_trunk_port_by_trunk_id(trunk_id):
session = db.get_reader_session()
with session.begin():
trunk_port = (session.query(trunk_models.Trunk).
filter_by(id=trunk_id).first())
if trunk_port:
return trunk_port.port
class NeutronNets(db_base_plugin_v2.NeutronDbPluginV2,
sec_db.SecurityGroupDbMixin):
"""Access to Neutron DB.

118
networking_arista/ml2/mechanism_arista.py

@ -13,15 +13,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import threading
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib import constants as n_const
from neutron_lib.plugins.ml2 import api as driver_api
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from neutron.services.trunk import constants as trunk_consts
from networking_arista._i18n import _, _LI, _LE
from networking_arista.common import constants
from networking_arista.common import db
@ -38,7 +43,6 @@ cfg.CONF.import_group('ml2_arista', 'networking_arista.common.config')
def pretty_log(tag, obj):
import json
log_data = json.dumps(obj, sort_keys=True, indent=4)
LOG.debug(tag)
LOG.debug(log_data)
@ -91,6 +95,10 @@ class AristaDriver(driver_api.MechanismDriver):
self.rpc.check_supported_features()
self.sg_handler = sec_group_callback.AristaSecurityGroupHandler(self)
registry.subscribe(self.set_subport,
trunk_consts.SUBPORTS, events.AFTER_CREATE)
registry.subscribe(self.unset_subport,
trunk_consts.SUBPORTS, events.AFTER_DELETE)
def get_workers(self):
return [arista_sync.AristaSyncWorker(self.rpc, self.ndb)]
@ -378,7 +386,8 @@ class AristaDriver(driver_api.MechanismDriver):
if any([device_owner in supported_device_owner,
device_owner.startswith('compute') and
device_owner != 'compute:probe',
device_owner.startswith('baremetal')]):
device_owner.startswith('baremetal'),
device_owner.startswith('trunk')]):
return True
LOG.debug('Unsupported device owner: %s', device_owner)
@ -625,6 +634,11 @@ class AristaDriver(driver_api.MechanismDriver):
host = context.host
is_vm_boot = device_id and device_owner
# When delete a vm, the trunk port context has no device_owner
# Keep device_owner as in original port
if not device_owner and orig_port.get('trunk_details'):
device_owner = orig_port['device_owner']
if not self._supported_device_owner(device_owner):
return
@ -651,11 +665,14 @@ class AristaDriver(driver_api.MechanismDriver):
# Return from here as port migration is already handled.
return
seg_info = self._bound_segments(context)
if not seg_info:
LOG.debug("Ignoring the update as the port is not managed by "
"Arista switches.")
return
# Check if it is trunk_port deletion case
seg_info = []
if not port.get('trunk_details') or host:
seg_info = self._bound_segments(context)
if not seg_info:
LOG.debug("Ignoring the update as the port is not managed by "
"Arista switches.")
return
with self.eos_sync_lock:
hostname = self._host_name(host)
@ -693,8 +710,10 @@ class AristaDriver(driver_api.MechanismDriver):
try:
orig_host = context.original_host
port_down = False
if(port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE):
# We care about port status only for DVR ports
if(port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE
or port.get('trunk_details')):
# We care about port status only for DVR ports and
# trunk ports
port_down = context.status == n_const.PORT_STATUS_DOWN
if orig_host and (port_down or host != orig_host or
@ -704,12 +723,14 @@ class AristaDriver(driver_api.MechanismDriver):
# connected to the port was deleted or its in DOWN
# state. So delete the old port on the old host.
self._delete_port(orig_port, orig_host, tenant_id)
if(port_provisioned and net_provisioned and hostname and
is_vm_boot and not port_down and
device_id != n_const.DEVICE_ID_RESERVED_DHCP_PORT):
LOG.info(_LI("Port plugged into network"))
# Plug port into the network only if it exists in the db
# and is bound to a host and the port is up.
trunk_details = port.get('trunk_details')
self.rpc.plug_port_into_network(device_id,
hostname,
port_id,
@ -720,7 +741,9 @@ class AristaDriver(driver_api.MechanismDriver):
sg, orig_sg,
vnic_type,
segments=segments,
switch_bindings=bindings)
switch_bindings=bindings,
trunk_details=trunk_details
)
else:
LOG.info(_LI("Port not plugged into network"))
except arista_exc.AristaRpcError as err:
@ -803,10 +826,12 @@ class AristaDriver(driver_api.MechanismDriver):
# If we do not have network associated with this, ignore it
return
hostname = self._host_name(host)
trunk_details = port.get('trunk_details')
self.rpc.unplug_port_from_network(device_id, device_owner,
hostname, port_id, network_id,
tenant_id, sg, vnic_type,
switch_bindings=switch_bindings)
switch_bindings=switch_bindings,
trunk_details=trunk_details)
self.rpc.remove_security_group(sg, switch_bindings)
# if necessary, delete tenant as well.
@ -981,3 +1006,74 @@ class AristaDriver(driver_api.MechanismDriver):
msg = (_('Failed to delete ACL rule on EOS %s') % sgr)
LOG.exception(msg)
raise arista_exc.AristaSecurityGroupError(msg=msg)
def unset_subport(self, resource, event, trigger, **kwargs):
payload = kwargs['payload']
trunk_id = payload.trunk_id
subports = payload.subports
trunk_port = db_lib.get_trunk_port_by_trunk_id(trunk_id)
if trunk_port:
device_id = trunk_port.device_id
tenant_id = trunk_port.tenant_id
host = trunk_port.port_binding.host
vnic_type = trunk_port.port_binding.vnic_type
profile = trunk_port.port_binding.profile
if profile:
profile = json.loads(profile)
for subport in subports:
subport_id = subport.port_id
subport_current = self.ndb.get_port(subport_id)
subport_current['device_id'] = device_id
subport_current['binding:vnic_type'] = vnic_type
subport_current['binding:profile'] = profile
subport_current['device_owner'] = 'trunk:subport'
self._delete_port(subport_current, host, tenant_id)
else:
LOG.warning('Unable to unset the subport, no trunk port found')
def set_subport(self, resource, event, trigger, **kwargs):
payload = kwargs['payload']
trunk_id = payload.trunk_id
subports = payload.subports
device_owner = 'trunk:subport'
trunk_port = db_lib.get_trunk_port_by_trunk_id(trunk_id)
if not trunk_port:
return
device_id = trunk_port.device_id
tenant_id = trunk_port.tenant_id
host = trunk_port.port_binding.host
if not host:
return
hostname = self._host_name(host)
vnic_type = trunk_port.port_binding.vnic_type
profile = trunk_port.port_binding.profile
bindings = []
if profile:
profile = json.loads(profile)
bindings = profile.get('local_link_information', [])
for subport in subports:
subport_id = subport.port_id
subport_current = self.ndb.get_port(subport_id)
network_id = self.ndb.get_network_id_from_port_id(subport_id)
port_name = subport_current.get('name')
sg = subport_current.get('security_groups')
orig_sg = None
segments = db_lib.get_network_segments_by_port_id(subport_id)
self.rpc.plug_port_into_network(device_id,
hostname,
subport_id,
network_id,
tenant_id,
port_name,
device_owner,
sg, orig_sg,
vnic_type,
segments=segments,
switch_bindings=bindings)

109
networking_arista/ml2/rpc/arista_eapi.py

@ -25,6 +25,7 @@ import six
from networking_arista._i18n import _, _LI, _LW, _LE
from networking_arista.common import constants as const
from networking_arista.common import db_lib
from networking_arista.common import exceptions as arista_exc
from networking_arista.ml2.rpc.base import AristaRPCWrapperBase
@ -219,7 +220,7 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
def plug_port_into_network(self, device_id, host_id, port_id,
net_id, tenant_id, port_name, device_owner,
sg, orig_sg, vnic_type, segments,
switch_bindings=None):
switch_bindings=None, trunk_details=None):
if device_owner == n_const.DEVICE_OWNER_DHCP:
self.plug_dhcp_port_into_network(device_id,
host_id,
@ -229,7 +230,8 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
segments,
port_name)
elif (device_owner.startswith('compute') or
device_owner.startswith('baremetal')):
device_owner.startswith('baremetal') or
device_owner.startswith('trunk')):
if vnic_type == 'baremetal':
self.plug_baremetal_into_network(device_id,
host_id,
@ -238,9 +240,11 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
tenant_id,
segments,
port_name,
device_owner,
sg, orig_sg,
vnic_type,
switch_bindings)
switch_bindings,
trunk_details)
else:
self.plug_host_into_network(device_id,
host_id,
@ -248,7 +252,8 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
net_id,
tenant_id,
segments,
port_name)
port_name,
trunk_details)
elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self.plug_distributed_router_port_into_network(device_id,
host_id,
@ -259,7 +264,7 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
def unplug_port_from_network(self, device_id, device_owner, hostname,
port_id, network_id, tenant_id, sg, vnic_type,
switch_bindings=None):
switch_bindings=None, trunk_details=None):
if device_owner == n_const.DEVICE_OWNER_DHCP:
self.unplug_dhcp_port_from_network(device_id,
hostname,
@ -267,7 +272,8 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
network_id,
tenant_id)
elif (device_owner.startswith('compute') or
device_owner.startswith('baremetal')):
device_owner.startswith('baremetal') or
device_owner.startswith('trunk')):
if vnic_type == 'baremetal':
self.unplug_baremetal_from_network(device_id,
hostname,
@ -276,13 +282,15 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
tenant_id,
sg,
vnic_type,
switch_bindings)
switch_bindings,
trunk_details)
else:
self.unplug_host_from_network(device_id,
hostname,
port_id,
network_id,
tenant_id)
tenant_id,
trunk_details)
elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self.unplug_distributed_router_port_from_network(device_id,
port_id,
@ -290,7 +298,8 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
tenant_id)
def plug_host_into_network(self, vm_id, host, port_id,
network_id, tenant_id, segments, port_name):
network_id, tenant_id, segments, port_name,
trunk_details=None):
cmds = ['tenant %s' % tenant_id,
'vm id %s hostid %s' % (vm_id, host)]
if port_name:
@ -302,12 +311,28 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
cmds.extend(
'segment level %d id %s' % (level, segment['id'])
for level, segment in enumerate(segments))
if trunk_details and trunk_details.get('sub_ports'):
for subport in trunk_details['sub_ports']:
port_id = subport['port_id']
net_id = self._ndb.get_network_id_from_port_id(port_id)
filters = {'port_id': port_id}
segments = db_lib.get_port_binding_level(filters)
cmds.append('port id %s network-id %s' %
(port_id, net_id))
cmds.extend(
'segment level %d id %s' % (s.level, s.segment_id)
for s in segments
)
self._run_openstack_cmds(cmds)
def plug_baremetal_into_network(self, vm_id, host, port_id,
network_id, tenant_id, segments, port_name,
device_owner,
sg=None, orig_sg=None,
vnic_type=None, switch_bindings=None):
vnic_type=None, switch_bindings=None,
trunk_details=None):
# Basic error checking for baremental deployments
# notice that the following method throws and exception
# if an error condition exists
@ -325,21 +350,44 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
if not binding:
# skip all empty entries
continue
if device_owner.startswith('trunk'):
vlan_type = 'allowed'
else:
vlan_type = 'native'
# Ensure that binding contains switch and port ID info
if binding['switch_id'] and binding['port_id']:
if port_name:
cmds.append('port id %s name "%s" network-id %s '
'type native switch-id %s switchport %s' %
'type %s switch-id %s switchport %s' %
(port_id, port_name, network_id,
binding['switch_id'], binding['port_id']))
vlan_type, binding['switch_id'],
binding['port_id']))
else:
cmds.append('port id %s network-id %s type native '
cmds.append('port id %s network-id %s type %s '
'switch-id %s switchport %s' %
(port_id, network_id, binding['switch_id'],
binding['port_id']))
(port_id, network_id, vlan_type,
binding['switch_id'],
binding['port_id']))
cmds.extend('segment level %d id %s' % (level,
segment['id'])
for level, segment in enumerate(segments))
if trunk_details and trunk_details.get('sub_ports'):
for subport in trunk_details['sub_ports']:
port_id = subport['port_id']
net_id = self._ndb.get_network_id_from_port_id(
port_id)
filters = {'port_id': port_id}
segments = db_lib.get_port_binding_level(filters)
cmds.append('port id %s network-id %s type allowed'
' switch-id %s switchport %s' %
(port_id, net_id, binding['switch_id'],
binding['port_id']))
cmds.extend(
'segment level %d id %s' %
(s.level, s.segment_id) for s in segments
)
else:
msg = _('switch and port ID not specified for baremetal')
LOG.error(msg)
@ -385,16 +433,22 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
self._run_openstack_cmds(cmds)
def unplug_host_from_network(self, vm_id, host, port_id,
network_id, tenant_id):
network_id, tenant_id, trunk_details=None):
cmds = ['tenant %s' % tenant_id,
'vm id %s hostid %s' % (vm_id, host),
'no port id %s' % port_id,
]
if trunk_details and trunk_details.get('sub_ports'):
cmds.extend(
'no port id %s' % subport['port_id']
for subport in trunk_details['sub_ports']
)
cmds.append('no port id %s' % port_id)
self._run_openstack_cmds(cmds)
def unplug_baremetal_from_network(self, vm_id, host, port_id,
network_id, tenant_id, sg, vnic_type,
switch_bindings=None):
switch_bindings=None,
trunk_details=None):
# Basic error checking for baremental deployments
# notice that the following method throws and exception
# if an error condition exists
@ -403,6 +457,11 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
# Following is a temporary code for native VLANs - should be removed
cmds = ['tenant %s' % tenant_id]
cmds.append('instance id %s hostid %s type baremetal' % (vm_id, host))
if trunk_details and trunk_details.get('sub_ports'):
cmds.extend(
'no port id %s' % subport['port_id']
for subport in trunk_details['sub_ports']
)
cmds.append('no port id %s' % port_id)
self._run_openstack_cmds(cmds)
@ -564,7 +623,8 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
'segment level %d id %s' % (level, segment['id'])
for level, segment in enumerate(segments))
elif (device_owner.startswith('compute') or
device_owner.startswith('baremetal')):
device_owner.startswith('baremetal') or
device_owner.startswith('trunk')):
if vnic_type == 'baremetal':
append_cmd('instance id %s hostid %s type baremetal' %
(vm['vmId'], v_port['hosts'][0]))
@ -574,21 +634,26 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
if not binding or not isinstance(binding, dict):
# skip all empty entries
continue
if device_owner.startswith('trunk'):
vlan_type = 'allowed'
else:
vlan_type = 'native'
# Ensure that profile contains local link info
if binding['switch_id'] and binding['port_id']:
if port_name:
cmds.append('port id %s name "%s" '
'network-id %s type native '
'network-id %s type %s '
'switch-id %s switchport %s' %
(port_id, port_name,
network_id,
network_id, vlan_type,
binding['switch_id'],
binding['port_id']))
else:
cmds.append('port id %s network-id %s '
'type native '
'type %s '
'switch-id %s switchport %s' %
(port_id, network_id,
vlan_type,
binding['switch_id'],
binding['port_id']))
cmds.extend('segment level %d id %s' % (

82
networking_arista/ml2/rpc/arista_json.py

@ -24,6 +24,7 @@ import six
from networking_arista._i18n import _, _LI, _LW, _LE
from networking_arista.common import constants as const
from networking_arista.common import db_lib
from networking_arista.common import exceptions as arista_exc
from networking_arista.ml2.rpc.base import AristaRPCWrapperBase
@ -356,11 +357,13 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
}
def _create_port_data(self, port_id, tenant_id, network_id, instance_id,
name, instance_type, hosts):
name, instance_type, hosts, device_owner=None):
vlan_type = 'allowed'
if instance_type in const.InstanceType.BAREMETAL_INSTANCE_TYPES:
vlan_type = 'native'
if device_owner and device_owner.startswith('trunk'):
vlan_type = 'allowed'
return {
'id': port_id,
@ -427,7 +430,8 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
if inst_id not in dhcpInst:
dhcpInst[inst_id] = instance
elif (device_owner.startswith('compute') or
device_owner.startswith('baremetal')):
device_owner.startswith('baremetal') or
device_owner.startswith('trunk')):
if vnic_type == 'baremetal':
instance_type = const.InstanceType.BAREMETAL
if inst_id not in baremetalInst:
@ -454,7 +458,8 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
port = self._create_port_data(port_id, tenant_id,
network_id, inst_id,
neutron_port.get('name'),
instance_type, v_port['hosts'])
instance_type, v_port['hosts'],
device_owner)
portInst.append(port)
if instance_type in const.InstanceType.VIRTUAL_INSTANCE_TYPES:
@ -513,11 +518,13 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
self.delete_instance_bulk(tenant_id, dhcp_id_list,
const.InstanceType.DHCP)
def delete_port(self, port_id, instance_id, instance_type):
def delete_port(self, port_id, instance_id, instance_type,
device_owner=None):
path = ('region/%s/port?portId=%s&id=%s&type=%s' %
(self.region, port_id, instance_id, instance_type))
port = self._create_port_data(port_id, None, None, instance_id,
None, instance_type, None)
None, instance_type, None,
device_owner)
return self._send_api_request(path, 'DELETE', [port])
def get_instance_ports(self, instance_id, instance_type):
@ -528,12 +535,13 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
def plug_port_into_network(self, device_id, host_id, port_id,
net_id, tenant_id, port_name, device_owner,
sg, orig_sg, vnic_type, segments,
switch_bindings=None):
switch_bindings=None, trunk_details=None):
device_type = ''
if device_owner == n_const.DEVICE_OWNER_DHCP:
device_type = const.InstanceType.DHCP
elif (device_owner.startswith('compute')
or device_owner.startswith('baremetal')):
or device_owner.startswith('baremetal')
or device_owner.startswith('trunk')):
if vnic_type == 'baremetal':
device_type = const.InstanceType.BAREMETAL
else:
@ -547,7 +555,8 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
self._create_tenant_if_needed(tenant_id)
instance = self._create_instance_data(device_id, host_id)
port = self._create_port_data(port_id, tenant_id, net_id, device_id,
port_name, device_type, [host_id])
port_name, device_type, [host_id],
device_owner)
url = 'region/%(region)s/%(device_type)s?tenantId=%(tenant_id)s' % {
'region': self.region,
'device_type': device_type,
@ -556,11 +565,45 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
self._send_api_request(url, 'POST', [instance])
self._send_api_request('region/' + self.region + '/port', 'POST',
[port])
if trunk_details and trunk_details.get('sub_ports'):
for subport in trunk_details['sub_ports']:
subport_id = subport['port_id']
subport_net_id = self._ndb.get_network_id_from_port_id(
subport_id)
subport_name = 'name_%s' % subport_id
sub_device_owner = 'trunk:subport'
port = self._create_port_data(subport_id, tenant_id,
subport_net_id, device_id,
subport_name, device_type,
[host_id], sub_device_owner)
self._send_api_request('region/' + self.region + '/port',
'POST', [port])
if device_type in const.InstanceType.VIRTUAL_INSTANCE_TYPES:
self.bind_port_to_host(port_id, host_id, net_id, segments)
if trunk_details and trunk_details.get('sub_ports'):
for subport in trunk_details['sub_ports']:
subport_id = subport['port_id']
subport_net_id = self._ndb.get_network_id_from_port_id(
subport_id)
sub_segments = db_lib.get_network_segments_by_port_id(
subport_id)
self.bind_port_to_host(subport_id, host_id,
subport_net_id, sub_segments)
elif device_type in const.InstanceType.BAREMETAL_INSTANCE_TYPES:
self.bind_port_to_switch_interface(port_id, host_id, net_id,
switch_bindings, segments)
if trunk_details and trunk_details.get('sub_ports'):
for subport in trunk_details['sub_ports']:
subport_id = subport['port_id']
subport_net_id = self._ndb.get_network_id_from_port_id(
subport_id)
sub_segments = db_lib.get_network_segments_by_port_id(
subport_id)
self.bind_port_to_switch_interface(subport_id, host_id,
subport_net_id,
switch_bindings,
sub_segments)
if sg:
self.apply_security_group(sg, switch_bindings)
else:
@ -571,12 +614,13 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
def unplug_port_from_network(self, device_id, device_owner, hostname,
port_id, network_id, tenant_id, sg, vnic_type,
switch_bindings=None):
switch_bindings=None, trunk_details=None):
device_type = ''
if device_owner == n_const.DEVICE_OWNER_DHCP:
device_type = const.InstanceType.DHCP
elif (device_owner.startswith('compute') or
device_owner.startswith('baremetal')):
device_owner.startswith('baremetal') or
device_owner.startswith('trunk')):
if vnic_type == 'baremetal':
device_type = const.InstanceType.BAREMETAL
else:
@ -588,11 +632,27 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
return
if device_type in const.InstanceType.VIRTUAL_INSTANCE_TYPES:
if trunk_details and trunk_details.get('sub_ports'):
for subport in trunk_details['sub_ports']:
subport_id = subport['port_id']
subport_device_owner = 'trunk:subport'
self.unbind_port_from_host(subport_id, hostname)
self.delete_port(subport_id, device_id, device_type,
subport_device_owner)
self.unbind_port_from_host(port_id, hostname)
elif device_type in const.InstanceType.BAREMETAL_INSTANCE_TYPES:
if trunk_details and trunk_details.get('sub_ports'):
for subport in trunk_details['sub_ports']:
subport_id = subport['port_id']
subport_device_owner = 'trunk:subport'
self.unbind_port_from_switch_interface(subport_id,
hostname,
switch_bindings)
self.delete_port(subport_id, device_id, device_type,
subport_device_owner)
self.unbind_port_from_switch_interface(port_id, hostname,
switch_bindings)
self.delete_port(port_id, device_id, device_type)
self.delete_port(port_id, device_id, device_type, device_owner)
port = self.get_instance_ports(device_id, device_type)
if not port:
# If the last port attached to an instance is deleted, cleanup the

6
networking_arista/ml2/rpc/base.py

@ -173,7 +173,7 @@ class AristaRPCWrapperBase(object):
def plug_port_into_network(self, device_id, host_id, port_id,
net_id, tenant_id, port_name, device_owner,
sg, orig_sg, vnic_type, segments=None,
switch_bindings=None):
switch_bindings=None, trunk_details=None):
"""Generic routine plug a port of a VM instace into network.
:param device_id: globally unique identifier for the device
@ -188,12 +188,13 @@ class AristaRPCWrapperBase(object):
:param vnic_type: VNIC type for the port
:param segments: list of network segments the port is bound to
:param switch_bindings: List of switch_bindings
:param trunk_details: List of subports of a trunk port
"""
@abc.abstractmethod
def unplug_port_from_network(self, device_id, device_owner, hostname,
port_id, network_id, tenant_id, sg, vnic_type,
switch_bindings=None):
switch_bindings=None, trunk_details=None):
"""Removes a port from the device
:param device_id: globally unique identifier for the device
@ -201,6 +202,7 @@ class AristaRPCWrapperBase(object):
:param port_id: globally unique port ID that connects device to network
:param network_id: globally unique neutron network identifier
:param tenant_id: globally unique neutron tenant identifier
:param trunk_details: List of subports of a trunk port
"""
def _clean_acls(self, sg, failed_switch, switches_to_clean):

259
networking_arista/tests/unit/ml2/rpc/test_arista_eapi_rpc_wrapper.py

@ -26,6 +26,8 @@ from networking_arista.common import constants
from networking_arista.common import db_lib
from networking_arista.common import exceptions as arista_exc
from networking_arista.ml2.rpc import arista_eapi
from networking_arista.tests.unit.ml2.test_arista_mechanism_driver import \
FakePortBindingLevel
import networking_arista.tests.unit.ml2.utils as utils
@ -113,6 +115,74 @@ class PositiveRPCWrapperValidConfigTestCase(testlib_api.SqlTestCase):
]
self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2])
@patch(EAPI_SEND_FUNC)
def test_plug_baremetal_into_network(self, mock_send_eapi_req):
tenant_id = 'ten-1'
network_id = 'net-id-1'
bm_id = 'bm-1'
port_id = 'p1'
host = 'host'
port_name = 'name_p1'
device_owner = 'compute:None'
segments = [{'segmentation_id': 1001,
'id': 'segment_id_1',
'network_type': 'vlan',
'is_dynamic': False}]
switch_bindings = {'local_link_information': [
{'port_id': 'Eth1', 'switch_id': 'switch-id-1',
'switch_info': 'switch-1'}]}
bindings = switch_bindings['local_link_information']
self.drv.bm_and_dvr_supported = mock.MagicMock(return_value=True)
self.drv.plug_baremetal_into_network(bm_id, host, port_id,
network_id, tenant_id,
segments, port_name,
device_owner,
None, None, 'baremetal',
bindings)
cmd1 = ['show openstack agent uuid']
cmd2 = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1',
'instance id bm-1 hostid host type baremetal',
'port id p1 name "name_p1" network-id net-id-1 '
'type native switch-id switch-id-1 switchport Eth1',
]
for level, segment in enumerate(segments):
cmd2.append('segment level %s id %s' % (level, segment['id']))
cmd2.append('exit')
self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2])
@patch(EAPI_SEND_FUNC)
def test_unplug_baremetal_from_network(self, mock_send_eapi_req):
tenant_id = 'ten-1'
network_id = 'net-id-1'
bm_id = 'bm-1'
port_id = 111
host = 'host'
switch_bindings = {'local_link_information': [
{'port_id': 'Eth1', 'switch_id': 'switch-id-1',
'switch_info': 'switch-1'}]}
bindings = switch_bindings['local_link_information']
self.drv.bm_and_dvr_supported = mock.MagicMock(return_value=True)
self.drv.unplug_baremetal_from_network(bm_id, host, port_id,
network_id, tenant_id,
None, 'baremetal',
bindings)
cmd1 = ['show openstack agent uuid']
cmd2 = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1',
'instance id bm-1 hostid host type baremetal',
'no port id 111',
]
self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2])
@patch(EAPI_SEND_FUNC)
def test_unplug_host_from_network(self, mock_send_eapi_req):
tenant_id = 'ten-1'
@ -734,3 +804,192 @@ class NegativeRPCWrapperTestCase(testlib_api.SqlTestCase):
with mock.patch.object(arista_eapi.LOG, 'error') as log_err:
self.assertRaises(arista_exc.AristaRpcError, drv.get_tenants)
log_err.assert_called_once_with(mock.ANY)
class RPCWrapperEapiValidConfigTrunkTestCase(testlib_api.SqlTestCase):
"""Test cases to test plug trunk port into network."""
def setUp(self):
super(RPCWrapperEapiValidConfigTrunkTestCase, self).setUp()
setup_valid_config()
ndb = mock.MagicMock()
self.drv = arista_eapi.AristaRPCWrapperEapi(ndb)
self.drv._server_ip = "10.11.12.13"
self.region = 'RegionOne'
arista_eapi.db_lib = mock.MagicMock()
@patch(EAPI_SEND_FUNC)
def test_plug_host_into_network(self, mock_send_eapi_req):
tenant_id = 'ten-1'
network_id = 'net-id-1'
vm_id = 'vm-1'
port_id = 111
host = 'host'
port_name = '111-port'
sub_segment_id = 'sub_segment_id_1'
sub_segmentation_id = 1002
sub_network_id = 'subnet-id'
subport_id = 222
segment_id = 'segment_id_1'
segments = [{'network_type': 'vlan', 'physical_network': 'default',
'segmentation_id': 1234, 'id': segment_id}]
binding_level = FakePortBindingLevel(subport_id, 0, 'vendor-1',
sub_segment_id)
subport_segments = [binding_level]
trunk_details = {'sub_ports': [{'mac_address': 'mac_address',
'port_id': subport_id,
'segmentation_id': sub_segmentation_id,
'segmentation_type': 'vlan'}],
'trunk_id': 'trunk_id'}
self.drv._ndb.get_network_id_from_port_id.return_value = sub_network_id
arista_eapi.db_lib.get_port_binding_level.return_value = \
subport_segments
self.drv.plug_host_into_network(vm_id, host, port_id,
network_id, tenant_id, segments,
port_name, trunk_details=trunk_details)
cmd1 = ['show openstack agent uuid']
cmd2 = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'vm id vm-1 hostid host',
'port id 111 name "111-port" network-id net-id-1',
]
for level, segment in enumerate(segments):
cmd2.append('segment level %s id %s' % (level, segment['id']))
cmd2.append('port id 222 network-id subnet-id')
for segment in subport_segments:
cmd2.append('segment level %s id %s' % (segment.level,
segment.segment_id))
self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2])
@patch(EAPI_SEND_FUNC)
def test_plug_baremetal_into_network(self, mock_send_eapi_req):
tenant_id = 'ten-2'
network_id = 'net-id-1'
bm_id = 'bm-1'
port_id = 'p1'
host = 'host'
port_name = 'name_p1'
device_owner = 'compute:None'
subport_id = 222
sub_segment_id = 'sub_segment_id_1'
segments = [{'segmentation_id': 1001,
'id': 'segment_id_1',
'network_type': 'vlan',
'is_dynamic': False}]
subport_net_id = 'net-id-2'
binding_level = FakePortBindingLevel(subport_id, 0, 'vendor-1',
sub_segment_id)
subport_segments = [binding_level]
trunk_details = {'sub_ports': [{'mac_address': 'mac_address',
'port_id': 'p2',
'segmentation_id': 1002,
'segmentation_type': 'vlan'}],
'trunk_id': 'trunk_id'}
switch_bindings = {'local_link_information': [
{'port_id': 'Eth1', 'switch_id': 'switch-id-1',
'switch_info': 'switch-1'}]}
bindings = switch_bindings['local_link_information']
self.drv._ndb.get_network_id_from_port_id.return_value = subport_net_id
arista_eapi.db_lib.get_port_binding_level.return_value = \
subport_segments
self.drv.bm_and_dvr_supported = mock.MagicMock(return_value=True)
self.drv.plug_baremetal_into_network(bm_id, host, port_id,
network_id, tenant_id,
segments, port_name,
device_owner,
None, None, 'baremetal',
bindings, trunk_details)
cmd1 = ['show openstack agent uuid']
cmd2 = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-2',
'instance id bm-1 hostid host type baremetal',
'port id p1 name "name_p1" network-id net-id-1 '
'type native switch-id switch-id-1 switchport Eth1',
]
for level, segment in enumerate(segments):
cmd2.append('segment level %s id %s' % (level, segment['id']))
cmd2.append('port id p2 network-id net-id-2 '
'type allowed switch-id switch-id-1 switchport Eth1', )
for segment in subport_segments:
cmd2.append('segment level %s id %s' % (segment.level,
segment.segment_id))
cmd2.append('exit')
self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2])
@patch(EAPI_SEND_FUNC)
def test_unplug_host_from_network(self, mock_send_eapi_req):
tenant_id = 'ten-2'
network_id = 'net-id-1'
vm_id = 'vm-2'
port_id = 111
host = 'host'
subport_id = 222
trunk_details = {'sub_ports': [{'mac_address': 'mac_address',
'port_id': subport_id,
'segmentation_id': 123,
'segmentation_type': 'vlan'}],
'trunk_id': 'trunk_id'}
self.drv.unplug_host_from_network(vm_id, host, port_id,
network_id, tenant_id,
trunk_details)
cmd1 = ['show openstack agent uuid']
cmd2 = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-2', 'vm id vm-2 hostid host',
'no port id 222',
'no port id 111',
]
self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2])
@patch(EAPI_SEND_FUNC)
def test_unplug_baremetal_from_network(self, mock_send_eapi_req):
tenant_id = 'ten-2'
network_id = 'net-id-1'
bm_id = 'bm-2'
port_id = 111
host = 'host'
subport_id = 222
trunk_details = {'sub_ports': [{'mac_address': 'mac_address',
'port_id': subport_id,
'segmentation_id': 123,
'segmentation_type': 'vlan'}],
'trunk_id': 'trunk_id'}
switch_bindings = {'local_link_information': [
{'port_id': 'Eth1', 'switch_id': 'switch-id-1',
'switch_info': 'switch-1'}]}
bindings = switch_bindings['local_link_information']
self.drv.bm_and_dvr_supported = mock.MagicMock(return_value=True)
self.drv.unplug_baremetal_from_network(bm_id, host, port_id,
network_id, tenant_id,
None, 'baremetal',
bindings, trunk_details)
cmd1 = ['show openstack agent uuid']
cmd2 = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-2',
'instance id bm-2 hostid host type baremetal',
'no port id 222',
'no port id 111',
]
self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2])
def _verify_send_eapi_request_calls(self, mock_send_eapi_req, cmds,
commands_to_log=None):
calls = []
calls.extend(
mock.call(cmds=cmd, commands_to_log=log_cmd)
for cmd, log_cmd in six.moves.zip(cmds, commands_to_log or cmds))
mock_send_eapi_req.assert_has_calls(calls)

228
networking_arista/tests/unit/ml2/rpc/test_arista_json_rpc_wrapper.py

@ -655,3 +655,231 @@ class TestAristaJSONRPCWrapper(testlib_api.SqlTestCase):
[{'id': 'router1'}])
]
self._verify_send_api_request_call(mock_send_api_req, calls)
class RPCWrapperJSONValidConfigTrunkTestCase(testlib_api.SqlTestCase):
"""Test cases to test plug trunk port into network. """
def setUp(self):
super(RPCWrapperJSONValidConfigTrunkTestCase, self).setUp()
setup_valid_config()
ndb = mock.MagicMock()
self.drv = arista_json.AristaRPCWrapperJSON(ndb)
self.drv._server_ip = "10.11.12.13"
self.region = 'RegionOne'
arista_json.db_lib = mock.MagicMock()
@patch(JSON_SEND_FUNC)
def test_plug_virtual_trunk_port_into_network(self, mock_send_api_req):
# vm
tenant_id = 'ten-1'
network_id = 'net-id-1'
vm_id = 'vm-1'
port_id = 'p1'
host = 'host'
port_name = 'name_p1'
subport_net_id = 'net-id-2'
segments = [{'segmentation_id': 1001,
'id': 'segment_id_1',
'network_type': 'vlan',
'is_dynamic': False}]
subport_segments = [{'id': 'sub_segment_id_1',
'segmentation_id': 1002,
'network_type': 'vlan',
'is_dynamic': False}]
trunk_details = {'sub_ports': [{'mac_address': 'mac_address',
'port_id': 'p2',
'segmentation_id': 1002,
'segmentation_type': 'vlan'}],
'trunk_id': 'trunk_id'}
self.drv._ndb.get_network_id_from_port_id.return_value = subport_net_id
arista_json.db_lib.get_network_segments_by_port_id.return_value = \
subport_segments
self.drv.plug_port_into_network(vm_id, host, port_id, network_id,
tenant_id, port_name,
'compute', None, None, None, segments,
trunk_details=trunk_details)
calls = [
('region/RegionOne/vm?tenantId=ten-1', 'POST',
[{'id': 'vm-1', 'hostId': 'host'}]),
('region/RegionOne/port', 'POST',
[{'id': 'p1', 'hosts': ['host'], 'tenantId': 'ten-1',
'networkId': 'net-id-1', 'instanceId': 'vm-1',
'name': 'name_p1',
'instanceType': 'vm', 'vlanType': 'allowed'}]),
('region/RegionOne/port', 'POST',
[{'id': 'p2', 'hosts': ['host'], 'tenantId': 'ten-1',
'networkId': 'net-id-2', 'instanceId': 'vm-1',
'name': 'name_p2',
'instanceType': 'vm', 'vlanType': 'allowed'}]),
('region/RegionOne/port/p1/binding', 'POST',
[{'portId': 'p1', 'hostBinding': [{'host': 'host', 'segment': [{
'id': 'segment_id_1', 'type': 'vlan', 'segmentationId': 1001,
'networkId': 'net-id-1', 'segment_type': 'static'}]}]}]),
('region/RegionOne/port/p2/binding', 'POST',
[{'portId': 'p2', 'hostBinding': [{'host': 'host', 'segment': [{
'id': 'sub_segment_id_1', 'type': 'vlan',
'segmentationId': 1002,
'networkId': 'net-id-2', 'segment_type': 'static'}]}]}]),
]
self._verify_send_api_request_call(mock_send_api_req, calls)
@patch(JSON_SEND_FUNC)
def test_plug_baremetal_trunk_port_into_network(self, mock_send_api_req):
# baremetal
tenant_id = 'ten-2'
network_id = 'net-id-1'
bm_id = 'bm-1'
port_id = 'p1'
host = 'host'
port_name = 'name_p1'
sg = {'id': 'security-group-1'}
segments = [{'segmentation_id': 1111,
'id': 'segment_id_1',
'network_type': 'vlan',
'is_dynamic': False}]
subport_net_id = 'net-id-2'
subport_segments = [{'id': 'sub_segment_id_1',
'segmentation_id': 1112,
'network_type': 'vlan',
'is_dynamic': False}]
trunk_details = {'sub_ports': [{'mac_address': 'mac_address',
'port_id': 'p2',
'segmentation_id': 1112,
'segmentation_type': 'vlan'}],
'trunk_id': 'trunk_id'}
switch_bindings = {'local_link_information': [
{'port_id': 'Eth1', 'switch_id': 'switch-id-1',
'switch_info': 'switch-1'}]}
bindings = switch_bindings['local_link_information']
self.drv._ndb.get_network_id_from_port_id.return_value = subport_net_id
arista_json.db_lib.get_network_segments_by_port_id.return_value = \
subport_segments
self.drv.plug_port_into_network(bm_id, host, port_id, network_id,
tenant_id, port_name,
'baremetal', sg, None, 'baremetal',
segments, bindings,
trunk_details=trunk_details)
calls = [
('region/RegionOne/baremetal?tenantId=ten-2', 'POST',
[{'id': 'bm-1', 'hostId': 'host'}]),
('region/RegionOne/port', 'POST',
[{'id': 'p1', 'hosts': ['host'], 'tenantId': 'ten-2',
'networkId': 'net-id-1', 'instanceId': 'bm-1',
'name': 'name_p1',
'instanceType': 'baremetal', 'vlanType': 'native'}]),
('region/RegionOne/port', 'POST',
[{'id': 'p2', 'hosts': ['host'], 'tenantId': 'ten-2',
'networkId': 'net-id-2', 'instanceId': 'bm-1',
'name': 'name_p2',
'instanceType': 'baremetal', 'vlanType': 'allowed'}]),
('region/RegionOne/port/p1/binding', 'POST',
[{'portId': 'p1', 'switchBinding': [
{'host': 'host', 'switch': 'switch-id-1',
'interface': 'Eth1', 'segment':
[{'id': 'segment_id_1', 'type': 'vlan',
'segmentationId': 1111, 'networkId': 'net-id-1',
'segment_type': 'static'}]}]}]),
('region/RegionOne/port/p2/binding', 'POST',
[{'portId': 'p2', 'switchBinding':
[{'host': 'host', 'switch': 'switch-id-1',
'interface': 'Eth1', 'segment':
[{'id': 'sub_segment_id_1', 'type': 'vlan',
'segmentationId': 1112, 'networkId': 'net-id-2',
'segment_type': 'static'}]}]}]),
]
self._verify_send_api_request_call(mock_send_api_req, calls)
@patch(JSON_SEND_FUNC)
@patch('networking_arista.ml2.rpc.arista_json.AristaRPCWrapperJSON.'
'get_instance_ports')
def test_unplug_virtual_trunk_port_from_network(self,
mock_get_instance_ports,
mock_send_api_req):
# trunk port
trunk_details = {'sub_ports': [{'mac_address': 'mac_address',
'port_id': 'subport',
'segmentation_id': 1001,
'segmentation_type': 'vlan'}],
'trunk_id': 'trunk_id'}
mock_get_instance_ports.return_value = []
self.drv.unplug_port_from_network('vm1', 'compute', 'h1', 'trunk_port',
'n1', 't1', None, None,
trunk_details=trunk_details)
subport = self.drv._create_port_data('subport', None, None, 'vm1',
None, 'vm', None)
trunk_port = self.drv._create_port_data('trunk_port', None, None,
'vm1', None, 'vm', None)
calls = [
('region/RegionOne/port/subport/binding', 'DELETE',
[{'portId': 'subport', 'hostBinding': [{'host': 'h1'}]}]),
('region/RegionOne/port?portId=subport&id=vm1&type=vm',
'DELETE', [subport]),
('region/RegionOne/port/trunk_port/binding', 'DELETE',
[{'portId': 'trunk_port', 'hostBinding': [{'host': 'h1'}]}]),
('region/RegionOne/port?portId=trunk_port&id=vm1&type=vm',
'DELETE', [trunk_port]),
('region/RegionOne/vm', 'DELETE', [{'id': 'vm1'}])
]
self._verify_send_api_request_call(mock_send_api_req, calls)
@patch(JSON_SEND_FUNC)
@patch('networking_arista.ml2.rpc.arista_json.AristaRPCWrapperJSON.'
'get_instance_ports')
def test_unplug_baremetal_trunk_port_from_network(self,
mock_get_instance_ports,
mock_send_api_req):
# trunk port
trunk_details = {'sub_ports': [{'mac_address': 'mac_address',
'port_id': 'subport',
'segmentation_id': 1001,
'segmentation_type': 'vlan'}],
'trunk_id': 'trunk_id'}
mock_get_instance_ports.return_value = []
switch_bindings = [{'switch_id': 'switch01', 'port_id': 'Ethernet1'}]
self.drv.unplug_port_from_network('bm1', 'baremetal', 'h1', 'p1', 'n1',
't1', None, 'baremetal',
switch_bindings, trunk_details)
subport = self.drv._create_port_data('subport', None, None, 'bm1',
None, 'baremetal', None,
'trunk:subport')
trunk_port = self.drv._create_port_data('p1', None, None, 'bm1',
None, 'baremetal', None)
calls = [
('region/RegionOne/port/subport/binding', 'DELETE',
[{'portId': 'subport', 'switchBinding':
[{'host': 'h1', 'switch': 'switch01', 'segment': [],
'interface': 'Ethernet1'}]}]),
('region/RegionOne/port?portId=subport&id=bm1&type=baremetal',
'DELETE', [subport]),
('region/RegionOne/port/p1/binding', 'DELETE',
[{'portId': 'p1', 'switchBinding':
[{'host': 'h1', 'switch': 'switch01', 'segment': [],
'interface': 'Ethernet1'}]}]),
('region/RegionOne/port?portId=p1&id=bm1&type=baremetal',
'DELETE', [trunk_port]),
('region/RegionOne/baremetal', 'DELETE', [{'id': 'bm1'}])
]
self._verify_send_api_request_call(mock_send_api_req, calls)
def _verify_send_api_request_call(self, mock_send_api_req, calls,
unordered_dict_list=False):
if unordered_dict_list:
wrapper = functools.partial(_UnorderedDictList, sort_key='id')
else:
wrapper = lambda x: x
expected_calls = [
mock.call(c[0], c[1], *(wrapper(d) for d in c[2:])) for c in calls
]
mock_send_api_req.assert_has_calls(expected_calls, any_order=True)

313
networking_arista/tests/unit/ml2/test_mechanism_arista.py

@ -18,6 +18,10 @@ from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as n_const
from neutron_lib.plugins.ml2 import api as driver_api
from neutron.db import models_v2
from neutron.plugins.ml2 import models as port_models
from neutron.services.trunk import callbacks
from neutron.services.trunk import models as trunk_models
from neutron.tests.unit import testlib_api
from networking_arista.ml2 import mechanism_arista
@ -528,7 +532,8 @@ class AristaDriverTestCase(testlib_api.SqlTestCase):
mock.call.unplug_port_from_network(device_id, 'compute', host_id,
port_id, network_id, tenant_id,
None, vnic_type,
switch_bindings=profile),
switch_bindings=profile,
trunk_details=None),
mock.call.remove_security_group(None, profile),
mock.call.num_nets_provisioned(tenant_id),
mock.call.num_vms_provisioned(tenant_id),
@ -578,7 +583,8 @@ class AristaDriverTestCase(testlib_api.SqlTestCase):
port_id, network_id,
INTERNAL_TENANT_ID, None,
vnic_type,
switch_bindings=profile),
switch_bindings=profile,
trunk_details=None),
mock.call.remove_security_group(None, profile),
mock.call.num_nets_provisioned(INTERNAL_TENANT_ID),
mock.call.num_vms_provisioned(INTERNAL_TENANT_ID),
@ -593,6 +599,201 @@ class AristaDriverTestCase(testlib_api.SqlTestCase):
mechanism_arista.db_lib.assert_has_calls(expected_calls)
def test_delete_trunk_port_postcommit(self):
# trunk port
tenant_id = 'ten-3'
network_id = 'net3-id'
segmentation_id = 1003
vm_id = 'vm3'
trunk_details = {'sub_ports': [{'mac_address': 'mac_address',
'port_id': 'subport_id',
'segmentation_id': 123,
'segmentation_type': 'vlan'}],
'trunk_id': 'trunk_id'}
network_context = self._get_network_context(tenant_id,
network_id,
segmentation_id,
False)
port_context = self._get_port_context(tenant_id,
network_id,
vm_id,
network_context)
mechanism_arista.db_lib.num_nets_provisioned.return_value = 0
mechanism_arista.db_lib.num_vms_provisioned.return_value = 0
mechanism_arista.db_lib.is_network_provisioned.return_value = True
port = port_context.current
port['trunk_details'] = trunk_details
device_id = port['device_id']
host_id = port['binding:host_id']
port_id = port['id']
vnic_type = port['binding:vnic_type']
profile = port['binding:profile']
network = {'tenant_id': tenant_id}
self.drv.ndb.get_network_from_net_id.return_value = [network]
physnet = dict(physnet='default')
self.fake_rpc.get_physical_network.return_value = physnet
self.drv.rpc.hpb_supported.return_value = True
self.drv.delete_port_postcommit(port_context)
expected_calls = [
mock.call.NeutronNets(),
mock.call.get_physical_network(host_id),
mock.call.is_network_provisioned(tenant_id, network_id, None,
None),
mock.call.unplug_port_from_network(device_id, 'compute', host_id,
port_id, network_id, tenant_id,
None, vnic_type,
switch_bindings=profile,
trunk_details=trunk_details),
mock.call.remove_security_group(None, profile),
mock.call.num_nets_provisioned(tenant_id),
mock.call.num_vms_provisioned(tenant_id),
mock.call.forget_tenant(tenant_id),
mock.call.delete_tenant(tenant_id),
mock.call.hpb_supported(),