Browse Source

Revert "Remove the EAPI rpc implementation"

This change broke the Arista CI. We also discovered that the Arista CI doesn't apply networking-arista changes when it runs, only neutron changes which is why the CI missed this.

This reverts commit a3aa15abd6.

Change-Id: Ida3fa67ee0290f56c66be0e3364ef5491c147eea
changes/71/509671/1
Mitchell Jameson 4 years ago
parent
commit
f7d5b1b4b6
  1. 1
      devstack/plugin.sh
  2. 2
      devstack/settings
  3. 6
      networking_arista/common/config.py
  4. 10
      networking_arista/common/constants.py
  5. 29
      networking_arista/ml2/arista_sync.py
  6. 40
      networking_arista/ml2/mechanism_arista.py
  7. 610
      networking_arista/ml2/rpc/arista_eapi.py
  8. 14
      networking_arista/ml2/rpc/arista_json.py
  9. 230
      networking_arista/ml2/rpc/base.py
  10. 930
      networking_arista/tests/unit/ml2/rpc/test_arista_eapi_rpc_wrapper.py
  11. 3
      networking_arista/tests/unit/ml2/test_arista_sync.py
  12. 40
      networking_arista/tests/unit/ml2/test_mechanism_arista.py

1
devstack/plugin.sh

@ -18,6 +18,7 @@ function configure_arista() {
iniset $ARISTA_ML2_CONF_FILE ml2_arista eapi_host $ARISTA_EAPI_HOST
iniset $ARISTA_ML2_CONF_FILE ml2_arista eapi_username $ARISTA_EAPI_USERNAME
iniset $ARISTA_ML2_CONF_FILE ml2_arista eapi_password $ARISTA_EAPI_PASSWORD
iniset $ARISTA_ML2_CONF_FILE ml2_arista api_type $ARISTA_API_TYPE
iniset $ARISTA_ML2_CONF_FILE ml2_arista region_name $ARISTA_REGION_NAME
if [ -n "${ARISTA_USE_FQDN+x}" ]; then

2
devstack/settings

@ -6,4 +6,6 @@ ARISTA_DIR=${ARISTA_DIR:-$DEST/networking-arista}
ARISTA_ML2_CONF_SAMPLE=$ARISTA_DIR/etc/ml2_conf_arista.ini
ARISTA_ML2_CONF_FILE=${ARISTA_ML2_CONF_FILE:-"$NEUTRON_CONF_DIR/ml2_conf_arista.ini"}
ARISTA_API_TYPE=${ARISTA_API_TYPE:-"EAPI"}
ARISTA_REGION_NAME=${ARISTA_REGION_NAME:-"$REGION_NAME"}

6
networking_arista/common/config.py

@ -91,6 +91,12 @@ ARISTA_DRIVER_OPTS = [
'172.13.23.56:admin:admin, .... '
'This is required if sec_group_support is set to '
'"True"')),
cfg.StrOpt('api_type',
default='JSON',
help=_('Tells the plugin to use a sepcific API interfaces '
'to communicate with CVX. Valid options are:'
'EAPI - Use EOS\' extensible API.'
'JSON - Use EOS\' JSON/REST API.')),
cfg.ListOpt('managed_physnets',
default=[],
help=_('This is a comma separated list of physical networks '

10
networking_arista/common/constants.py

@ -23,8 +23,18 @@ UNABLE_TO_DELETE_DEVICE_MSG = _('Unable to delete device')
INTERNAL_TENANT_ID = 'INTERNAL-TENANT-ID'
MECHANISM_DRV_NAME = 'arista'
# Insert a heartbeat command every 100 commands
HEARTBEAT_INTERVAL = 100
# Commands dict keys
CMD_SYNC_HEARTBEAT = 'SYNC_HEARTBEAT'
CMD_REGION_SYNC = 'REGION_SYNC'
CMD_INSTANCE = 'INSTANCE'
# EAPI error messages of interest
ERR_CVX_NOT_LEADER = 'only available on cluster leader'
ERR_DVR_NOT_SUPPORTED = 'EOS version on CVX does not support DVR'
BAREMETAL_NOT_SUPPORTED = 'EOS version on CVX dpes not support Baremetal'
# Flat network constant
NETWORK_TYPE_FLAT = 'flat'

29
networking_arista/ml2/arista_sync.py

@ -148,6 +148,7 @@ class SyncService(object):
try:
# Register with EOS to ensure that it has correct credentials
self._rpc.register_with_eos(sync=True)
self._rpc.check_supported_features()
eos_tenants = self._rpc.get_tenants()
except arista_exc.AristaRpcError:
LOG.warning(constants.EOS_UNREACHABLE_MSG)
@ -229,17 +230,25 @@ class SyncService(object):
if vms_to_delete:
self._rpc.delete_vm_bulk(tenant, vms_to_delete, sync=True)
if routers_to_delete:
self._rpc.delete_instance_bulk(
tenant,
routers_to_delete,
constants.InstanceType.ROUTER,
sync=True)
if self._rpc.bm_and_dvr_supported():
self._rpc.delete_instance_bulk(
tenant,
routers_to_delete,
constants.InstanceType.ROUTER,
sync=True)
else:
LOG.info(constants.ERR_DVR_NOT_SUPPORTED)
if bms_to_delete:
self._rpc.delete_instance_bulk(
tenant,
bms_to_delete,
constants.InstanceType.BAREMETAL,
sync=True)
if self._rpc.bm_and_dvr_supported():
self._rpc.delete_instance_bulk(
tenant,
bms_to_delete,
constants.InstanceType.BAREMETAL,
sync=True)
else:
LOG.info(constants.BAREMETAL_NOT_SUPPORTED)
if nets_to_delete:
self._rpc.delete_network_bulk(tenant, nets_to_delete,
sync=True)

40
networking_arista/ml2/mechanism_arista.py

@ -77,11 +77,22 @@ class AristaDriver(driver_api.MechanismDriver):
self.eapi = rpc
else:
self.eapi = AristaRPCWrapperEapi(self.ndb)
self.rpc = AristaRPCWrapperJSON(self.ndb)
api_type = confg['api_type'].upper()
if api_type == 'EAPI':
LOG.info("Using EAPI for RPC")
self.rpc = AristaRPCWrapperEapi(self.ndb)
elif api_type == 'JSON':
LOG.info("Using JSON for RPC")
self.rpc = AristaRPCWrapperJSON(self.ndb)
else:
msg = "RPC mechanism %s not recognized" % api_type
LOG.error(msg)
raise arista_exc.AristaRpcError(msg=msg)
def initialize(self):
if self.rpc.check_cvx_availability():
self.rpc.register_with_eos()
self.rpc.check_supported_features()
self.sg_handler = sec_group_callback.AristaSecurityGroupHandler(self)
registry.subscribe(self.set_subport,
@ -97,6 +108,12 @@ class AristaDriver(driver_api.MechanismDriver):
network = context.current
segments = context.network_segments
if not self.rpc.hpb_supported():
# Hierarchical port binding is not supported by CVX, only
# allow VLAN network type.
if(segments and
segments[0][driver_api.NETWORK_TYPE] != n_const.TYPE_VLAN):
return
network_id = network['id']
tenant_id = network['tenant_id'] or constants.INTERNAL_TENANT_ID
with self.eos_sync_lock:
@ -195,6 +212,16 @@ class AristaDriver(driver_api.MechanismDriver):
"""Send network delete request to Arista HW."""
network = context.current
segments = context.network_segments
if not self.rpc.hpb_supported():
# Hierarchical port binding is not supported by CVX, only
# send the request if network type is VLAN.
if (segments and
segments[0][driver_api.NETWORK_TYPE] != n_const.TYPE_VLAN):
# If network type is not VLAN, do nothing
return
# No need to pass segments info when calling delete_network as
# HPB is not supported.
segments = []
network_id = network['id']
tenant_id = network['tenant_id'] or constants.INTERNAL_TENANT_ID
with self.eos_sync_lock:
@ -307,6 +334,11 @@ class AristaDriver(driver_api.MechanismDriver):
"found", {'port': port.get('id')})
continue
if segment[driver_api.NETWORK_TYPE] == n_const.TYPE_VXLAN:
# Check if CVX supports HPB
if not self.rpc.hpb_supported():
LOG.debug("bind_port: HPB is not supported")
return
# The physical network is connected to arista switches,
# allocate dynamic segmentation id to bind the port to
# the network that the port belongs to.
@ -663,7 +695,7 @@ class AristaDriver(driver_api.MechanismDriver):
segmentation_id=seg[driver_api.SEGMENTATION_ID]):
net_provisioned = False
segments = []
if net_provisioned:
if net_provisioned and self.rpc.hpb_supported():
segments = seg_info
all_segments = self.ndb.get_all_network_segments(
network_id, context=context._plugin_context)
@ -814,6 +846,10 @@ class AristaDriver(driver_api.MechanismDriver):
param tenant_id: The tenant which the port belongs to
"""
if not self.rpc.hpb_supported():
# Returning as HPB not supported by CVX
return
port = context.current
network_id = port.get('network_id')

610
networking_arista/ml2/rpc/arista_eapi.py

@ -14,7 +14,10 @@
# limitations under the License.
import json
import socket
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as n_const
from oslo_config import cfg
from oslo_log import log as logging
import requests
@ -22,6 +25,7 @@ import six
from networking_arista._i18n import _, _LI, _LW, _LE
from networking_arista.common import constants as const
from networking_arista.common import db_lib
from networking_arista.common import exceptions as arista_exc
from networking_arista.ml2.rpc.base import AristaRPCWrapperBase
@ -34,6 +38,11 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
# The cli_commands dict stores the mapping between the CLI command key
# and the actual CLI command.
self.cli_commands = {
'timestamp': [
'show openstack config region %s timestamp' % self.region],
const.CMD_REGION_SYNC: 'region %s sync' % self.region,
const.CMD_INSTANCE: None,
const.CMD_SYNC_HEARTBEAT: 'sync heartbeat',
'resource-pool': [],
'features': {},
}
@ -120,6 +129,24 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
LOG.warning(msg)
raise
def check_supported_features(self):
cmd = ['show openstack instances']
try:
self._run_eos_cmds(cmd)
self.cli_commands[const.CMD_INSTANCE] = 'instance'
except (arista_exc.AristaRpcError, Exception) as err:
self.cli_commands[const.CMD_INSTANCE] = None
LOG.warning(_LW("'instance' command is not available on EOS "
"because of %s"), err)
# Get list of supported openstack features by CVX
cmd = ['show openstack features']
try:
resp = self._run_eos_cmds(cmd)
self.cli_commands['features'] = resp[0].get('features', {})
except (Exception, arista_exc.AristaRpcError):
self.cli_commands['features'] = {}
def check_vlan_type_driver_commands(self):
"""Checks the validity of CLI commands for Arista's VLAN type driver.
@ -137,6 +164,10 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
_LW("'resource-pool' command '%s' is not available on EOS"),
cmd)
def _heartbeat_required(self, sync, counter=0):
return (sync and self.cli_commands[const.CMD_SYNC_HEARTBEAT] and
(counter % const.HEARTBEAT_INTERVAL) == 0)
def get_vlan_assignment_uuid(self):
"""Returns the UUID for the region's vlan assignment on CVX
@ -167,6 +198,583 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
'availableVlans': '',
'allocatedVlans': ''}
def get_tenants(self):
cmds = ['show openstack config region %s' % self.region]
command_output = self._run_eos_cmds(cmds)
tenants = command_output[0]['tenants']
return tenants
def bm_and_dvr_supported(self):
return (self.cli_commands[const.CMD_INSTANCE] == 'instance')
def _baremetal_support_check(self, vnic_type):
# Basic error checking for baremental deployments
if (vnic_type == portbindings.VNIC_BAREMETAL and
not self.bm_and_dvr_supported()):
msg = _("Baremetal instances are not supported in this"
" release of EOS")
LOG.error(msg)
raise arista_exc.AristaConfigError(msg=msg)
def plug_port_into_network(self, device_id, host_id, port_id,
net_id, tenant_id, port_name, device_owner,
sg, orig_sg, vnic_type, segments,
switch_bindings=None, trunk_details=None):
if device_owner == n_const.DEVICE_OWNER_DHCP:
self.plug_dhcp_port_into_network(device_id,
host_id,
port_id,
net_id,
tenant_id,
segments,
port_name)
elif (device_owner.startswith('compute') or
device_owner.startswith('baremetal') or
device_owner.startswith('trunk')):
if vnic_type == 'baremetal':
self.plug_baremetal_into_network(device_id,
host_id,
port_id,
net_id,
tenant_id,
segments,
port_name,
device_owner,
sg, orig_sg,
vnic_type,
switch_bindings,
trunk_details)
else:
self.plug_host_into_network(device_id,
host_id,
port_id,
net_id,
tenant_id,
segments,
port_name,
trunk_details)
elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self.plug_distributed_router_port_into_network(device_id,
host_id,
port_id,
net_id,
tenant_id,
segments)
def unplug_port_from_network(self, device_id, device_owner, hostname,
port_id, network_id, tenant_id, sg, vnic_type,
switch_bindings=None, trunk_details=None):
if device_owner == n_const.DEVICE_OWNER_DHCP:
self.unplug_dhcp_port_from_network(device_id,
hostname,
port_id,
network_id,
tenant_id)
elif (device_owner.startswith('compute') or
device_owner.startswith('baremetal') or
device_owner.startswith('trunk')):
if vnic_type == 'baremetal':
self.unplug_baremetal_from_network(device_id,
hostname,
port_id,
network_id,
tenant_id,
sg,
vnic_type,
switch_bindings,
trunk_details)
else:
self.unplug_host_from_network(device_id,
hostname,
port_id,
network_id,
tenant_id,
trunk_details)
elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self.unplug_distributed_router_port_from_network(device_id,
port_id,
hostname,
tenant_id)
def plug_host_into_network(self, vm_id, host, port_id,
network_id, tenant_id, segments, port_name,
trunk_details=None):
cmds = ['tenant %s' % tenant_id,
'vm id %s hostid %s' % (vm_id, host)]
if port_name:
cmds.append('port id %s name "%s" network-id %s' %
(port_id, port_name, network_id))
else:
cmds.append('port id %s network-id %s' %
(port_id, network_id))
cmds.extend(
'segment level %d id %s' % (level, segment['id'])
for level, segment in enumerate(segments))
if trunk_details and trunk_details.get('sub_ports'):
for subport in trunk_details['sub_ports']:
port_id = subport['port_id']
net_id = self._ndb.get_network_id_from_port_id(port_id)
filters = {'port_id': port_id}
segments = db_lib.get_port_binding_level(filters)
cmds.append('port id %s network-id %s' %
(port_id, net_id))
cmds.extend(
'segment level %d id %s' % (s.level, s.segment_id)
for s in segments
)
self._run_openstack_cmds(cmds)
def plug_baremetal_into_network(self, vm_id, host, port_id,
network_id, tenant_id, segments, port_name,
device_owner,
sg=None, orig_sg=None,
vnic_type=None, switch_bindings=None,
trunk_details=None):
# Basic error checking for baremental deployments
# notice that the following method throws and exception
# if an error condition exists
self._baremetal_support_check(vnic_type)
# For baremetal, add host to the topology
if switch_bindings and vnic_type == portbindings.VNIC_BAREMETAL:
cmds = ['tenant %s' % tenant_id]
cmds.append('instance id %s hostid %s type baremetal' %
(vm_id, host))
# This list keeps track of any ACLs that need to be rolled back
# in case we hit a failure trying to apply ACLs, and we end
# failing the transaction.
for binding in switch_bindings:
if not binding:
# skip all empty entries
continue
if device_owner.startswith('trunk'):
vlan_type = 'allowed'
else:
vlan_type = 'native'
# Ensure that binding contains switch and port ID info
if binding['switch_id'] and binding['port_id']:
if port_name:
cmds.append('port id %s name "%s" network-id %s '
'type %s switch-id %s switchport %s' %
(port_id, port_name, network_id,
vlan_type, binding['switch_id'],
binding['port_id']))
else:
cmds.append('port id %s network-id %s type %s '
'switch-id %s switchport %s' %
(port_id, network_id, vlan_type,
binding['switch_id'],
binding['port_id']))
cmds.extend('segment level %d id %s' % (level,
segment['id'])
for level, segment in enumerate(segments))
if trunk_details and trunk_details.get('sub_ports'):
for subport in trunk_details['sub_ports']:
port_id = subport['port_id']
net_id = self._ndb.get_network_id_from_port_id(
port_id)
filters = {'port_id': port_id}
segments = db_lib.get_port_binding_level(filters)
cmds.append('port id %s network-id %s type allowed'
' switch-id %s switchport %s' %
(port_id, net_id, binding['switch_id'],
binding['port_id']))
cmds.extend(
'segment level %d id %s' %
(s.level, s.segment_id) for s in segments
)
else:
msg = _('switch and port ID not specified for baremetal')
LOG.error(msg)
raise arista_exc.AristaConfigError(msg=msg)
cmds.append('exit')
self._run_openstack_cmds(cmds)
if sg:
self.apply_security_group(sg, switch_bindings)
else:
# Security group was removed. Clean up the existing security
# groups.
if orig_sg:
self.remove_security_group(orig_sg, switch_bindings)
def plug_dhcp_port_into_network(self, dhcp_id, host, port_id,
network_id, tenant_id, segments,
port_name):
cmds = ['tenant %s' % tenant_id,
'network id %s' % network_id]
if port_name:
cmds.append('dhcp id %s hostid %s port-id %s name "%s"' %
(dhcp_id, host, port_id, port_name))
else:
cmds.append('dhcp id %s hostid %s port-id %s' %
(dhcp_id, host, port_id))
cmds.extend('segment level %d id %s' % (level, segment['id'])
for level, segment in enumerate(segments))
self._run_openstack_cmds(cmds)
def plug_distributed_router_port_into_network(self, router_id, host,
port_id, net_id, tenant_id,
segments):
if not self.bm_and_dvr_supported():
LOG.info(const.ERR_DVR_NOT_SUPPORTED)
return
cmds = ['tenant %s' % tenant_id,
'instance id %s type router' % router_id,
'port id %s network-id %s hostid %s' % (port_id, net_id, host)]
cmds.extend('segment level %d id %s' % (level, segment['id'])
for level, segment in enumerate(segments))
self._run_openstack_cmds(cmds)
def unplug_host_from_network(self, vm_id, host, port_id,
network_id, tenant_id, trunk_details=None):
cmds = ['tenant %s' % tenant_id,
'vm id %s hostid %s' % (vm_id, host),
]
if trunk_details and trunk_details.get('sub_ports'):
cmds.extend(
'no port id %s' % subport['port_id']
for subport in trunk_details['sub_ports']
)
cmds.append('no port id %s' % port_id)
self._run_openstack_cmds(cmds)
def unplug_baremetal_from_network(self, vm_id, host, port_id,
network_id, tenant_id, sg, vnic_type,
switch_bindings=None,
trunk_details=None):
# Basic error checking for baremental deployments
# notice that the following method throws and exception
# if an error condition exists
self._baremetal_support_check(vnic_type)
# Following is a temporary code for native VLANs - should be removed
cmds = ['tenant %s' % tenant_id]
cmds.append('instance id %s hostid %s type baremetal' % (vm_id, host))
if trunk_details and trunk_details.get('sub_ports'):
cmds.extend(
'no port id %s' % subport['port_id']
for subport in trunk_details['sub_ports']
)
cmds.append('no port id %s' % port_id)
self._run_openstack_cmds(cmds)
# SG - Remove security group rules from the port
# after deleting the instance
for binding in switch_bindings:
if not binding:
continue
self.security_group_driver.remove_acl(sg, binding['switch_id'],
binding['port_id'],
binding['switch_info'])
def unplug_dhcp_port_from_network(self, dhcp_id, host, port_id,
network_id, tenant_id):
cmds = ['tenant %s' % tenant_id,
'network id %s' % network_id,
'no dhcp id %s port-id %s' % (dhcp_id, port_id),
]
self._run_openstack_cmds(cmds)
def unplug_distributed_router_port_from_network(self, router_id,
port_id, host, tenant_id):
if not self.bm_and_dvr_supported():
LOG.info(const.ERR_DVR_NOT_SUPPORTED)
return
# When the last router port is removed, the router is deleted from EOS.
cmds = ['tenant %s' % tenant_id,
'instance id %s type router' % router_id,
'no port id %s hostid %s' % (port_id, host)]
self._run_openstack_cmds(cmds)
def create_network_bulk(self, tenant_id, network_list, sync=False):
cmds = ['tenant %s' % tenant_id]
# Create a reference to function to avoid name lookups in the loop
append_cmd = cmds.append
for counter, network in enumerate(network_list, 1):
try:
append_cmd('network id %s name "%s"' %
(network['network_id'], network['network_name']))
except KeyError:
append_cmd('network id %s' % network['network_id'])
cmds.extend(
'segment %s type %s id %d %s' % (
seg['id'] if self.hpb_supported() else 1,
seg['network_type'], seg['segmentation_id'],
('dynamic' if seg.get('is_dynamic', False) else 'static'
if self.hpb_supported() else ''))
for seg in network['segments']
if seg['network_type'] != const.NETWORK_TYPE_FLAT
)
shared_cmd = 'shared' if network['shared'] else 'no shared'
append_cmd(shared_cmd)
if self._heartbeat_required(sync, counter):
append_cmd(self.cli_commands[const.CMD_SYNC_HEARTBEAT])
if self._heartbeat_required(sync):
append_cmd(self.cli_commands[const.CMD_SYNC_HEARTBEAT])
self._run_openstack_cmds(cmds, sync=sync)
def create_network_segments(self, tenant_id, network_id,
network_name, segments):
if segments:
cmds = ['tenant %s' % tenant_id,
'network id %s name "%s"' % (network_id, network_name)]
cmds.extend(
'segment %s type %s id %d %s' % (
seg['id'], seg['network_type'], seg['segmentation_id'],
('dynamic' if seg.get('is_dynamic', False) else 'static'
if self.hpb_supported() else ''))
for seg in segments)
self._run_openstack_cmds(cmds)
def delete_network_segments(self, tenant_id, segments):
if not segments:
return
cmds = ['tenant %s' % tenant_id]
for segment in segments:
cmds.append('network id %s' % segment['network_id'])
cmds.append('no segment %s' % segment['id'])
self._run_openstack_cmds(cmds)
def delete_network_bulk(self, tenant_id, network_id_list, sync=False):
cmds = ['tenant %s' % tenant_id]
for counter, network_id in enumerate(network_id_list, 1):
cmds.append('no network id %s' % network_id)
if self._heartbeat_required(sync, counter):
cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT])
if self._heartbeat_required(sync):
cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT])
self._run_openstack_cmds(cmds, sync=sync)
def delete_vm_bulk(self, tenant_id, vm_id_list, sync=False):
cmds = ['tenant %s' % tenant_id]
counter = 0
for vm_id in vm_id_list:
counter += 1
cmds.append('no vm id %s' % vm_id)
if self._heartbeat_required(sync, counter):
cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT])
if self._heartbeat_required(sync):
cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT])
self._run_openstack_cmds(cmds, sync=sync)
def delete_instance_bulk(self, tenant_id, instance_id_list, instance_type,
sync=False):
cmds = ['tenant %s' % tenant_id]
counter = 0
for instance in instance_id_list:
counter += 1
cmds.append('no instance id %s' % instance)
if self._heartbeat_required(sync, counter):
cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT])
if self._heartbeat_required(sync):
cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT])
self._run_openstack_cmds(cmds, sync=sync)
def create_instance_bulk(self, tenant_id, neutron_ports, vms,
port_profiles, sync=False):
cmds = ['tenant %s' % tenant_id]
# Create a reference to function to avoid name lookups in the loop
append_cmd = cmds.append
counter = 0
for vm in vms.values():
counter += 1
for v_port in vm['ports']:
port_id = v_port['portId']
if not v_port['hosts']:
# Skip all the ports that have no host associsted with them
continue
if port_id not in neutron_ports.keys():
continue
neutron_port = neutron_ports[port_id]
port_name = ''
if 'name' in neutron_port:
port_name = 'name "%s"' % neutron_port['name']
device_owner = neutron_port['device_owner']
vnic_type = port_profiles[port_id]['vnic_type']
network_id = neutron_port['network_id']
segments = []
if self.hpb_supported():
segments = self._ndb.get_all_network_segments(network_id)
if device_owner == n_const.DEVICE_OWNER_DHCP:
append_cmd('network id %s' % neutron_port['network_id'])
append_cmd('dhcp id %s hostid %s port-id %s %s' %
(vm['vmId'], v_port['hosts'][0],
neutron_port['id'], port_name))
cmds.extend(
'segment level %d id %s' % (level, segment['id'])
for level, segment in enumerate(segments))
elif (device_owner.startswith('compute') or
device_owner.startswith('baremetal') or
device_owner.startswith('trunk')):
if vnic_type == 'baremetal':
append_cmd('instance id %s hostid %s type baremetal' %
(vm['vmId'], v_port['hosts'][0]))
profile = port_profiles[neutron_port['id']]
profile = json.loads(profile['profile'])
for binding in profile['local_link_information']:
if not binding or not isinstance(binding, dict):
# skip all empty entries
continue
if device_owner.startswith('trunk'):
vlan_type = 'allowed'
else:
vlan_type = 'native'
# Ensure that profile contains local link info
if binding['switch_id'] and binding['port_id']:
if port_name:
cmds.append('port id %s name "%s" '
'network-id %s type %s '
'switch-id %s switchport %s' %
(port_id, port_name,
network_id, vlan_type,
binding['switch_id'],
binding['port_id']))
else:
cmds.append('port id %s network-id %s '
'type %s '
'switch-id %s switchport %s' %
(port_id, network_id,
vlan_type,
binding['switch_id'],
binding['port_id']))
cmds.extend('segment level %d id %s' % (
level, segment['id'])
for level, segment in enumerate(segments))
else:
append_cmd('vm id %s hostid %s' % (vm['vmId'],
v_port['hosts'][0]))
append_cmd('port id %s %s network-id %s' %
(neutron_port['id'], port_name,
neutron_port['network_id']))
cmds.extend('segment level %d id %s' % (level,
segment['id'])
for level, segment in enumerate(segments))
elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
if not self.bm_and_dvr_supported():
LOG.info(const.ERR_DVR_NOT_SUPPORTED)
continue
append_cmd('instance id %s type router' % (
neutron_port['device_id']))
for host in v_port['hosts']:
append_cmd('port id %s network-id %s hostid %s' % (
neutron_port['id'],
neutron_port['network_id'], host))
cmds.extend('segment level %d id %s' % (level,
segment['id'])
for level, segment in enumerate(segments))
else:
LOG.warning(_LW("Unknown device owner: %s"),
neutron_port['device_owner'])
if self._heartbeat_required(sync, counter):
append_cmd(self.cli_commands[const.CMD_SYNC_HEARTBEAT])
if self._heartbeat_required(sync):
append_cmd(self.cli_commands[const.CMD_SYNC_HEARTBEAT])
self._run_openstack_cmds(cmds, sync=sync)
def delete_tenant_bulk(self, tenant_list, sync=False):
cmds = []
for tenant in tenant_list:
cmds.append('no tenant %s' % tenant)
if self._heartbeat_required(sync):
cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT])
self._run_openstack_cmds(cmds, sync=sync)
def delete_this_region(self):
cmds = ['enable',
'configure',
'cvx',
'service openstack',
'no region %s' % self.region,
]
self._run_eos_cmds(cmds)
def register_with_eos(self, sync=False):
self._run_openstack_cmds(['sync interval %d' % self.sync_interval],
sync=sync)
def get_region_updated_time(self):
timestamp_cmd = self.cli_commands['timestamp']
if timestamp_cmd:
try:
return self._run_eos_cmds(commands=timestamp_cmd)[0]
except IndexError:
# EAPI request failed and so return none
msg = "Failed to get last sync timestamp; trigger full sync"
LOG.info(msg)
return None
def _check_sync_lock(self, client):
"""Check if the lock is owned by this client.
:param client: Returns true only if the lock owner matches the expected
client.
"""
cmds = ['show sync lock']
ret = self._run_openstack_cmds(cmds, sync=True)
for r in ret:
if 'owner' in r:
lock_owner = r['owner']
LOG.info(_LI('Lock requested by: %s'), client)
LOG.info(_LI('Lock owner: %s'), lock_owner)
return lock_owner == client
return False
def sync_supported(self):
return self.cli_commands[const.CMD_REGION_SYNC]
def hpb_supported(self):
return 'hierarchical-port-binding' in self.cli_commands['features']
def sync_start(self):
try:
cmds = []
if self.sync_supported():
# Locking the region during sync is supported.
client_id = socket.gethostname().split('.')[0]
request_id = self._get_random_name()
cmds = ['sync lock %s %s' % (client_id, request_id)]
self._run_openstack_cmds(cmds)
# Check whether the lock was acquired.
return self._check_sync_lock(client_id)
else:
cmds = ['sync start']
self._run_openstack_cmds(cmds)
return True
except arista_exc.AristaRpcError:
return False
def sync_end(self):
try:
# 'sync end' can be sent only when the region has been entered in
# the sync mode
self._run_openstack_cmds(['sync end'], sync=True)
return True
except arista_exc.AristaRpcError:
return False
def _run_eos_cmds(self, commands, commands_to_log=None):
"""Execute/sends a CAPI (Command API) command to EOS.
@ -222,7 +830,7 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase):
"""
region_cmd = 'region %s' % self.region
if sync:
if sync and self.sync_supported():
region_cmd = self.cli_commands[const.CMD_REGION_SYNC]
full_command = [

14
networking_arista/ml2/rpc/arista_json.py

@ -139,6 +139,14 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
self.create_region(self.region)
self._set_region_update_interval()
def check_supported_features(self):
# We don't use this function as we know the features
# that are available once using this API.
pass
def bm_and_dvr_supported(self):
return True
def get_region_updated_time(self):
path = 'agent/'
data = self._send_api_request(path, 'GET')
@ -165,6 +173,12 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
return region
return None
def sync_supported(self):
return True
def hpb_supported(self):
return True
def sync_start(self):
try:
region = self.get_region(self.region)

230
networking_arista/ml2/rpc/base.py

@ -123,13 +123,88 @@ class AristaRPCWrapperBase(object):
return True
except Exception as exc:
LOG.warning(_LW('%s when getting CVX master'), exc)
LOG.warning("Failed to initialize connection with CVX. Please "
"ensure CVX is reachable and running EOS 4.18.1 "
"or greater.")
self.set_cvx_unavailable()
return False
def delete_tenant(self, tenant_id):
"""Deletes a given tenant and all its networks and VMs from EOS.
:param tenant_id: globally unique neutron tenant identifier
"""
self.delete_tenant_bulk([tenant_id])
def clear_region_updated_time(self):
# TODO(shashank): Remove this once the call is removed from the ML2
# driver.
pass
def create_network(self, tenant_id, network):
"""Creates a single network on Arista hardware
:param tenant_id: globally unique neutron tenant identifier
:param network: dict containing network_id, network_name and
segmentation_id
"""
self.create_network_bulk(tenant_id, [network])
def delete_network(self, tenant_id, network_id, network_segments):
"""Deletes a specified network for a given tenant
:param tenant_id: globally unique neutron tenant identifier
:param network_id: globally unique neutron network identifier
:param network_segments: segments associated with the network
"""
segments_info = []
segments_info.extend({'id': segment['id'], 'network_id': network_id}
for segment in network_segments)
self.delete_network_segments(tenant_id, segments_info)
self.delete_network_bulk(tenant_id, [network_id])
def delete_vm(self, tenant_id, vm_id):
"""Deletes a VM from EOS for a given tenant
:param tenant_id : globally unique neutron tenant identifier
:param vm_id : id of a VM that needs to be deleted.
"""
self.delete_vm_bulk(tenant_id, [vm_id])
@abc.abstractmethod
def plug_port_into_network(self, device_id, host_id, port_id,
net_id, tenant_id, port_name, device_owner,
sg, orig_sg, vnic_type, segments=None,
switch_bindings=None, trunk_details=None):
"""Generic routine plug a port of a VM instace into network.
:param device_id: globally unique identifier for the device
:param host: ID of the host where the port is placed
:param port_id: globally unique port ID that connects port to network
:param network_id: globally unique neutron network identifier
:param tenant_id: globally unique neutron tenant identifier
:param port_name: Name of the port - for display purposes
:param device_owner: Device owner - e.g. compute or network:dhcp
:param sg: current security group for the port
:param orig_sg: original security group for the port
:param vnic_type: VNIC type for the port
:param segments: list of network segments the port is bound to
:param switch_bindings: List of switch_bindings
:param trunk_details: List of subports of a trunk port
"""
@abc.abstractmethod
def unplug_port_from_network(self, device_id, device_owner, hostname,
port_id, network_id, tenant_id, sg, vnic_type,
switch_bindings=None, trunk_details=None):
"""Removes a port from the device
:param device_id: globally unique identifier for the device
:param host: ID of the host where the device is placed
:param port_id: globally unique port ID that connects device to network
:param network_id: globally unique neutron network identifier
:param tenant_id: globally unique neutron tenant identifier
:param trunk_details: List of subports of a trunk port
"""
def _clean_acls(self, sg, failed_switch, switches_to_clean):
"""This is a helper function to clean up ACLs on switches.
@ -199,6 +274,155 @@ class AristaRPCWrapperBase(object):
"""
self.security_group_driver.perform_sync_of_sg()
@abc.abstractmethod
def sync_supported(self):
"""Whether the EOS version supports sync.
Returns True if sync is supported, false otherwise.
"""
@abc.abstractmethod
def bm_and_dvr_supported(self):
"""Whether EOS supports Ironic and DVR.
Returns True if supported, false otherwise.
"""
@abc.abstractmethod
def register_with_eos(self, sync=False):
"""This is the registration request with EOS.
This the initial handshake between Neutron and EOS.
critical end-point information is registered with EOS.
:param sync: This flags indicates that the region is being synced.
"""
@abc.abstractmethod
def check_supported_features(self):
"""Checks whether the CLI commands are valid.
This method tries to execute the commands on EOS and if it succeedes
the command is stored.
"""
@abc.abstractmethod
def get_region_updated_time(self):
"""Return the timestamp of the last update.
This method returns the time at which any entities in the region
were updated.
"""
@abc.abstractmethod
def delete_this_region(self):
"""Deleted the region data from EOS."""
@abc.abstractmethod
def sync_start(self):
"""Let EOS know that a sync in being initiated."""
@abc.abstractmethod
def sync_end(self):
"""Let EOS know that sync is complete."""
@abc.abstractmethod
def get_tenants(self):
"""Returns dict of all tenants known by EOS.
:returns: dictionary containing the networks per tenant
and VMs allocated per tenant
"""
@abc.abstractmethod
def delete_tenant_bulk(self, tenant_list, sync=False):
"""Sends a bulk request to delete the tenants.
:param tenant_list: list of globaly unique neutron tenant ids which
need to be deleted.
:param sync: This flags indicates that the region is being synced.
"""
@abc.abstractmethod
def create_network_bulk(self, tenant_id, network_list, sync=False):
"""Creates a network on Arista Hardware
:param tenant_id: globally unique neutron tenant identifier
:param network_list: list of dicts containing network_id, network_name
and segmentation_id
:param sync: This flags indicates that the region is being synced.
"""
@abc.abstractmethod
def create_network_segments(self, tenant_id, network_id,
network_name, segments):
"""Creates a network on Arista Hardware
Note: This method is not used at the moment. create_network()
is used instead. This will be used once the support for
multiple segments is added in Neutron.
:param tenant_id: globally unique neutron tenant identifier
:param network_id: globally unique neutron network identifier
:param network_name: Network name - for display purposes
:param segments: List of segments in a given network
"""
@abc.abstractmethod
def delete_network_bulk(self, tenant_id, network_id_list, sync=False):
"""Deletes the network ids specified for a tenant
:param tenant_id: globally unique neutron tenant identifier
:param network_id_list: list of globally unique neutron network
identifiers
:param sync: This flags indicates that the region is being synced.
"""
@abc.abstractmethod
def delete_network_segments(self, tenant_id, network_segments):
"""Deletes the network segments
:param network_segments: List of network segments to be delted.
"""
@abc.abstractmethod
def create_instance_bulk(self, tenant_id, neutron_ports, vms,
port_profiles, sync=False):
"""Sends a bulk request to create ports.
:param tenant_id: globaly unique neutron tenant identifier
:param neutron_ports: list of ports that need to be created.
:param vms: list of vms to which the ports will be attached to.
:param sync: This flags indicates that the region is being synced.
"""
@abc.abstractmethod
def delete_instance_bulk(self, tenant_id, instance_id_list, instance_type,
sync=False):
"""Deletes instances from EOS for a given tenant
:param tenant_id : globally unique neutron tenant identifier
:param instance_id_list : ids of instances that needs to be deleted.
:param instance_type: The type of the instance which is being deleted.
:param sync: This flags indicates that the region is being synced.
"""
@abc.abstractmethod
def delete_vm_bulk(self, tenant_id, vm_id_list, sync=False):
"""Deletes VMs from EOS for a given tenant
:param tenant_id : globally unique neutron tenant identifier
:param vm_id_list : ids of VMs that needs to be deleted.
:param sync: This flags indicates that the region is being synced.
"""
@abc.abstractmethod
def hpb_supported(self):
"""Whether hierarchical port binding (HPB) is supported by CVX.
Returns True if HPB is supported, False otherwise.
"""
def apply_security_group(self, security_group, switch_bindings):
"""Applies ACLs on switch interface.

930
networking_arista/tests/unit/ml2/rpc/test_arista_eapi_rpc_wrapper.py

@ -14,14 +14,20 @@
# limitations under the License.
import mock
from mock import patch
from neutron_lib import constants as n_const
from oslo_config import cfg
import six
from neutron.tests import base
from neutron.tests.unit import testlib_api
from networking_arista.common import constants
from networking_arista.common import db_lib
from networking_arista.common import exceptions as arista_exc
from networking_arista.ml2.rpc import arista_eapi
from networking_arista.tests.unit.ml2.test_arista_mechanism_driver import \
FakePortBindingLevel
import networking_arista.tests.unit.ml2.utils as utils
@ -33,6 +39,738 @@ def setup_valid_config():
utils.setup_arista_wrapper_config(cfg)
class PositiveRPCWrapperValidConfigTestCase(testlib_api.SqlTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista Driver and EOS
"""
def setUp(self):
super(PositiveRPCWrapperValidConfigTestCase, self).setUp()
setup_valid_config()
ndb = db_lib.NeutronNets()
self.drv = arista_eapi.AristaRPCWrapperEapi(ndb)
self.drv._server_ip = "10.11.12.13"
self.region = 'RegionOne'
def _get_exit_mode_cmds(self, modes):
return ['exit'] * len(modes)
def _verify_send_eapi_request_calls(self, mock_send_eapi_req, cmds,
commands_to_log=None):
calls = []
calls.extend(
mock.call(cmds=cmd, commands_to_log=log_cmd)
for cmd, log_cmd in six.moves.zip(cmds, commands_to_log or cmds))
mock_send_eapi_req.assert_has_calls(calls)
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
@patch(EAPI_SEND_FUNC)
def test_plug_host_into_network(self, mock_send_eapi_req):
tenant_id = 'ten-1'
vm_id = 'vm-1'
port_id = 123
network_id = 'net-id'
host = 'host'
port_name = '123-port'
segment_id = 'segment_id_1'
segments = [{'network_type': 'vlan', 'physical_network': 'default',
'segmentation_id': 1234, 'id': segment_id}]
self.drv.plug_host_into_network(vm_id, host, port_id,
network_id, tenant_id, segments,
port_name)
cmd1 = ['show openstack agent uuid']
cmd2 = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'vm id vm-1 hostid host',
'port id 123 name "123-port" network-id net-id',
]
for level, segment in enumerate(segments):
cmd2.append('segment level %s id %s' % (level, segment['id']))
self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2])
@patch(EAPI_SEND_FUNC)
def test_plug_dhcp_port_into_network(self, mock_send_eapi_req):
tenant_id = 'ten-1'
vm_id = 'vm-1'
port_id = 123
network_id = 'net-id'
host = 'host'
port_name = '123-port'
segments = []
self.drv.plug_port_into_network(vm_id, host, port_id, network_id,
tenant_id, port_name,
n_const.DEVICE_OWNER_DHCP, None, None,
None, segments)
cmd1 = ['show openstack agent uuid']
cmd2 = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'network id net-id',
'dhcp id vm-1 hostid host port-id 123 name "123-port"',
]
self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2])
@patch(EAPI_SEND_FUNC)
def test_plug_baremetal_into_network(self, mock_send_eapi_req):
tenant_id = 'ten-1'
network_id = 'net-id-1'
bm_id = 'bm-1'
port_id = 'p1'
host = 'host'
port_name = 'name_p1'
device_owner = 'compute:None'
segments = [{'segmentation_id': 1001,
'id': 'segment_id_1',
'network_type': 'vlan',
'is_dynamic': False}]
switch_bindings = {'local_link_information': [
{'port_id': 'Eth1', 'switch_id': 'switch-id-1',
'switch_info': 'switch-1'}]}
bindings = switch_bindings['local_link_information']
self.drv.bm_and_dvr_supported = mock.MagicMock(return_value=True)
self.drv.plug_baremetal_into_network(bm_id, host, port_id,
network_id, tenant_id,
segments, port_name,
device_owner,
None, None, 'baremetal',
bindings)
cmd1 = ['show openstack agent uuid']
cmd2 = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1',
'instance id bm-1 hostid host type baremetal',
'port id p1 name "name_p1" network-id net-id-1 '
'type native switch-id switch-id-1 switchport Eth1',
]
for level, segment in enumerate(segments):
cmd2.append('segment level %s id %s' % (level, segment['id']))
cmd2.append('exit')
self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2])
@patch(EAPI_SEND_FUNC)
def test_unplug_baremetal_from_network(self, mock_send_eapi_req):
tenant_id = 'ten-1'
network_id = 'net-id-1'
bm_id = 'bm-1'
port_id = 111
host = 'host'
switch_bindings = {'local_link_information': [
{'port_id': 'Eth1', 'switch_id': 'switch-id-1',
'switch_info': 'switch-1'}]}
bindings = switch_bindings['local_link_information']
self.drv.bm_and_dvr_supported = mock.MagicMock(return_value=True)
self.drv.unplug_baremetal_from_network(bm_id, host, port_id,
network_id, tenant_id,
None, 'baremetal',
bindings)
cmd1 = ['show openstack agent uuid']
cmd2 = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1',
'instance id bm-1 hostid host type baremetal',
'no port id 111',
]
self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2])
@patch(EAPI_SEND_FUNC)
def test_unplug_host_from_network(self, mock_send_eapi_req):
tenant_id = 'ten-1'
vm_id = 'vm-1'
port_id = 123
network_id = 'net-id'
host = 'host'
self.drv.unplug_host_from_network(vm_id, host, port_id,
network_id, tenant_id)
cmd1 = ['show openstack agent uuid']
cmd2 = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'vm id vm-1 hostid host',
'no port id 123',
]
self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2])
@patch(EAPI_SEND_FUNC)
def test_unplug_dhcp_port_from_network(self, mock_send_eapi_req):
tenant_id = 'ten-1'
vm_id = 'vm-1'
port_id = 123
network_id = 'net-id'
host = 'host'
self.drv.unplug_dhcp_port_from_network(vm_id, host, port_id,
network_id, tenant_id)
cmd1 = ['show openstack agent uuid']
cmd2 = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'network id net-id',
'no dhcp id vm-1 port-id 123',
]
self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2])
@patch(EAPI_SEND_FUNC)
def test_create_network(self, mock_send_eapi_req):
tenant_id = 'ten-1'
self.drv.cli_commands['features'] = {'hierarchical-port-binding': 1}
network = {
'network_id': 'net-id',
'network_name': 'net-name',
'segments': [{'segmentation_id': 123,
'physical_network': 'default',
'network_type': 'vlan',
'id': 'segment_id_1'}],
'shared': False,
}
self.drv.create_network(tenant_id, network)
cmd1 = ['show openstack agent uuid']
cmd2 = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'network id net-id name "net-name"',
]
for seg in network['segments']:
is_dynamic = seg.get('is_dynamic', False)
cmd2.append('segment %s type %s id %d %s' % (seg['id'],
seg['network_type'], seg['segmentation_id'],
'dynamic' if is_dynamic else 'static'))
cmd2.append('no shared')
self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2])
@patch(EAPI_SEND_FUNC)
def test_create_shared_network(self, mock_send_eapi_req):
tenant_id = 'ten-1'
segment_id = 'abcd-cccc'
segmentation_id = 123
network_type = 'vlan'
segments = [{'segmentation_id': segmentation_id,
'id': segment_id,
'network_type': network_type}]
network = {
'network_id': 'net-id',
'network_name': 'net-name',
'segments': segments,
'shared': True}
self.drv.cli_commands['features'] = {'hierarchical-port-binding': 1}
self.drv.create_network(tenant_id, network)
cmd1 = ['show openstack agent uuid']
cmd2 = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'network id net-id name "net-name"',
'segment %s type %s id %d %s' % (segment_id, network_type,
segmentation_id, 'static'),
'shared',
]
self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2])
@patch(EAPI_SEND_FUNC)
def test_create_network_bulk(self, mock_send_eapi_req):
tenant_id = 'ten-2'
num_networks = 10
network_type = 'vlan'
segment_id = 'abcd-eeee-%s'
self.drv.cli_commands['features'] = {'hierarchical-port-binding': 1}
networks = [{
'network_id': 'net-id-%d' % net_id,
'network_name': 'net-name-%d' % net_id,
'segments': [{'segmentation_id': net_id,
'network_type': 'vlan',
'id': segment_id % net_id}],
'shared': True,
} for net_id in range(1, num_networks)
]
self.drv.create_network_bulk(tenant_id, networks)
cmd1 = ['show openstack agent uuid']
cmd2 = ['enable',
'configure',
'cvx',
'service openstack',
'region RegionOne',
'tenant ten-2']
for net_id in range(1, num_networks):
cmd2.append('network id net-id-%d name "net-name-%d"' %
(net_id, net_id))
cmd2.append('segment %s type %s id %d %s' % (
segment_id % net_id, network_type, net_id, 'static'))
cmd2.append('shared')
self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2])
@patch(EAPI_SEND_FUNC)
def test_delete_network(self, mock_send_eapi_req):
tenant_id = 'ten-1'
network_id = 'net-id'
segments = [{'segmentation_id': 101,
'physical_network': 'default',
'id': 'segment_id_1',
'network_type': 'vlan'}]
self.drv.delete_network(tenant_id, network_id, segments)
cmd1 = ['show openstack agent uuid']
cmd2 = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1',
'network id net-id',
'no segment segment_id_1',
]
cmd3 = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1',
'no network id net-id',
]