diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 87d29a6e..1bf5e103 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -18,7 +18,6 @@ function configure_arista() { iniset $ARISTA_ML2_CONF_FILE ml2_arista eapi_host $ARISTA_EAPI_HOST iniset $ARISTA_ML2_CONF_FILE ml2_arista eapi_username $ARISTA_EAPI_USERNAME iniset $ARISTA_ML2_CONF_FILE ml2_arista eapi_password $ARISTA_EAPI_PASSWORD - iniset $ARISTA_ML2_CONF_FILE ml2_arista api_type $ARISTA_API_TYPE iniset $ARISTA_ML2_CONF_FILE ml2_arista region_name $ARISTA_REGION_NAME if [ -n "${ARISTA_USE_FQDN+x}" ]; then diff --git a/devstack/settings b/devstack/settings index c2dfccf2..72961483 100644 --- a/devstack/settings +++ b/devstack/settings @@ -6,6 +6,4 @@ ARISTA_DIR=${ARISTA_DIR:-$DEST/networking-arista} ARISTA_ML2_CONF_SAMPLE=$ARISTA_DIR/etc/ml2_conf_arista.ini ARISTA_ML2_CONF_FILE=${ARISTA_ML2_CONF_FILE:-"$NEUTRON_CONF_DIR/ml2_conf_arista.ini"} -ARISTA_API_TYPE=${ARISTA_API_TYPE:-"EAPI"} ARISTA_REGION_NAME=${ARISTA_REGION_NAME:-"$REGION_NAME"} - diff --git a/networking_arista/common/config.py b/networking_arista/common/config.py index 1675e30f..093cf768 100644 --- a/networking_arista/common/config.py +++ b/networking_arista/common/config.py @@ -91,12 +91,6 @@ ARISTA_DRIVER_OPTS = [ '172.13.23.56:admin:admin, .... ' 'This is required if sec_group_support is set to ' '"True"')), - cfg.StrOpt('api_type', - default='JSON', - help=_('Tells the plugin to use a sepcific API interfaces ' - 'to communicate with CVX. Valid options are:' - 'EAPI - Use EOS\' extensible API.' - 'JSON - Use EOS\' JSON/REST API.')), cfg.ListOpt('managed_physnets', default=[], help=_('This is a comma separated list of physical networks ' diff --git a/networking_arista/common/constants.py b/networking_arista/common/constants.py index ef167d0c..915ccee8 100644 --- a/networking_arista/common/constants.py +++ b/networking_arista/common/constants.py @@ -23,18 +23,8 @@ UNABLE_TO_DELETE_DEVICE_MSG = _('Unable to delete device') INTERNAL_TENANT_ID = 'INTERNAL-TENANT-ID' MECHANISM_DRV_NAME = 'arista' -# Insert a heartbeat command every 100 commands -HEARTBEAT_INTERVAL = 100 - -# Commands dict keys -CMD_SYNC_HEARTBEAT = 'SYNC_HEARTBEAT' -CMD_REGION_SYNC = 'REGION_SYNC' -CMD_INSTANCE = 'INSTANCE' - # EAPI error messages of interest ERR_CVX_NOT_LEADER = 'only available on cluster leader' -ERR_DVR_NOT_SUPPORTED = 'EOS version on CVX does not support DVR' -BAREMETAL_NOT_SUPPORTED = 'EOS version on CVX dpes not support Baremetal' # Flat network constant NETWORK_TYPE_FLAT = 'flat' diff --git a/networking_arista/ml2/arista_sync.py b/networking_arista/ml2/arista_sync.py index 0ade89b2..5a9ebf64 100644 --- a/networking_arista/ml2/arista_sync.py +++ b/networking_arista/ml2/arista_sync.py @@ -148,7 +148,6 @@ class SyncService(object): try: # Register with EOS to ensure that it has correct credentials self._rpc.register_with_eos(sync=True) - self._rpc.check_supported_features() eos_tenants = self._rpc.get_tenants() except arista_exc.AristaRpcError: LOG.warning(constants.EOS_UNREACHABLE_MSG) @@ -230,25 +229,17 @@ class SyncService(object): if vms_to_delete: self._rpc.delete_vm_bulk(tenant, vms_to_delete, sync=True) if routers_to_delete: - if self._rpc.bm_and_dvr_supported(): - self._rpc.delete_instance_bulk( - tenant, - routers_to_delete, - constants.InstanceType.ROUTER, - sync=True) - else: - LOG.info(constants.ERR_DVR_NOT_SUPPORTED) - + self._rpc.delete_instance_bulk( + tenant, + routers_to_delete, + constants.InstanceType.ROUTER, + sync=True) if bms_to_delete: - if self._rpc.bm_and_dvr_supported(): - self._rpc.delete_instance_bulk( - tenant, - bms_to_delete, - constants.InstanceType.BAREMETAL, - sync=True) - else: - LOG.info(constants.BAREMETAL_NOT_SUPPORTED) - + self._rpc.delete_instance_bulk( + tenant, + bms_to_delete, + constants.InstanceType.BAREMETAL, + sync=True) if nets_to_delete: self._rpc.delete_network_bulk(tenant, nets_to_delete, sync=True) diff --git a/networking_arista/ml2/mechanism_arista.py b/networking_arista/ml2/mechanism_arista.py index 93b9aad0..1a7fec90 100644 --- a/networking_arista/ml2/mechanism_arista.py +++ b/networking_arista/ml2/mechanism_arista.py @@ -77,22 +77,11 @@ class AristaDriver(driver_api.MechanismDriver): self.eapi = rpc else: self.eapi = AristaRPCWrapperEapi(self.ndb) - api_type = confg['api_type'].upper() - if api_type == 'EAPI': - LOG.info("Using EAPI for RPC") - self.rpc = AristaRPCWrapperEapi(self.ndb) - elif api_type == 'JSON': - LOG.info("Using JSON for RPC") - self.rpc = AristaRPCWrapperJSON(self.ndb) - else: - msg = "RPC mechanism %s not recognized" % api_type - LOG.error(msg) - raise arista_exc.AristaRpcError(msg=msg) + self.rpc = AristaRPCWrapperJSON(self.ndb) def initialize(self): if self.rpc.check_cvx_availability(): self.rpc.register_with_eos() - self.rpc.check_supported_features() self.sg_handler = sec_group_callback.AristaSecurityGroupHandler(self) registry.subscribe(self.set_subport, @@ -108,12 +97,6 @@ class AristaDriver(driver_api.MechanismDriver): network = context.current segments = context.network_segments - if not self.rpc.hpb_supported(): - # Hierarchical port binding is not supported by CVX, only - # allow VLAN network type. - if(segments and - segments[0][driver_api.NETWORK_TYPE] != n_const.TYPE_VLAN): - return network_id = network['id'] tenant_id = network['tenant_id'] or constants.INTERNAL_TENANT_ID with self.eos_sync_lock: @@ -212,16 +195,6 @@ class AristaDriver(driver_api.MechanismDriver): """Send network delete request to Arista HW.""" network = context.current segments = context.network_segments - if not self.rpc.hpb_supported(): - # Hierarchical port binding is not supported by CVX, only - # send the request if network type is VLAN. - if (segments and - segments[0][driver_api.NETWORK_TYPE] != n_const.TYPE_VLAN): - # If network type is not VLAN, do nothing - return - # No need to pass segments info when calling delete_network as - # HPB is not supported. - segments = [] network_id = network['id'] tenant_id = network['tenant_id'] or constants.INTERNAL_TENANT_ID with self.eos_sync_lock: @@ -334,11 +307,6 @@ class AristaDriver(driver_api.MechanismDriver): "found", {'port': port.get('id')}) continue if segment[driver_api.NETWORK_TYPE] == n_const.TYPE_VXLAN: - # Check if CVX supports HPB - if not self.rpc.hpb_supported(): - LOG.debug("bind_port: HPB is not supported") - return - # The physical network is connected to arista switches, # allocate dynamic segmentation id to bind the port to # the network that the port belongs to. @@ -695,7 +663,7 @@ class AristaDriver(driver_api.MechanismDriver): segmentation_id=seg[driver_api.SEGMENTATION_ID]): net_provisioned = False segments = [] - if net_provisioned and self.rpc.hpb_supported(): + if net_provisioned: segments = seg_info all_segments = self.ndb.get_all_network_segments( network_id, context=context._plugin_context) @@ -846,10 +814,6 @@ class AristaDriver(driver_api.MechanismDriver): param tenant_id: The tenant which the port belongs to """ - if not self.rpc.hpb_supported(): - # Returning as HPB not supported by CVX - return - port = context.current network_id = port.get('network_id') diff --git a/networking_arista/ml2/rpc/arista_eapi.py b/networking_arista/ml2/rpc/arista_eapi.py index f730d55d..ef5865a5 100644 --- a/networking_arista/ml2/rpc/arista_eapi.py +++ b/networking_arista/ml2/rpc/arista_eapi.py @@ -14,10 +14,7 @@ # limitations under the License. import json -import socket -from neutron_lib.api.definitions import portbindings -from neutron_lib import constants as n_const from oslo_config import cfg from oslo_log import log as logging import requests @@ -25,7 +22,6 @@ import six from networking_arista._i18n import _, _LI, _LW, _LE from networking_arista.common import constants as const -from networking_arista.common import db_lib from networking_arista.common import exceptions as arista_exc from networking_arista.ml2.rpc.base import AristaRPCWrapperBase @@ -38,11 +34,6 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase): # The cli_commands dict stores the mapping between the CLI command key # and the actual CLI command. self.cli_commands = { - 'timestamp': [ - 'show openstack config region %s timestamp' % self.region], - const.CMD_REGION_SYNC: 'region %s sync' % self.region, - const.CMD_INSTANCE: None, - const.CMD_SYNC_HEARTBEAT: 'sync heartbeat', 'resource-pool': [], 'features': {}, } @@ -129,24 +120,6 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase): LOG.warning(msg) raise - def check_supported_features(self): - cmd = ['show openstack instances'] - try: - self._run_eos_cmds(cmd) - self.cli_commands[const.CMD_INSTANCE] = 'instance' - except (arista_exc.AristaRpcError, Exception) as err: - self.cli_commands[const.CMD_INSTANCE] = None - LOG.warning(_LW("'instance' command is not available on EOS " - "because of %s"), err) - - # Get list of supported openstack features by CVX - cmd = ['show openstack features'] - try: - resp = self._run_eos_cmds(cmd) - self.cli_commands['features'] = resp[0].get('features', {}) - except (Exception, arista_exc.AristaRpcError): - self.cli_commands['features'] = {} - def check_vlan_type_driver_commands(self): """Checks the validity of CLI commands for Arista's VLAN type driver. @@ -164,10 +137,6 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase): _LW("'resource-pool' command '%s' is not available on EOS"), cmd) - def _heartbeat_required(self, sync, counter=0): - return (sync and self.cli_commands[const.CMD_SYNC_HEARTBEAT] and - (counter % const.HEARTBEAT_INTERVAL) == 0) - def get_vlan_assignment_uuid(self): """Returns the UUID for the region's vlan assignment on CVX @@ -198,583 +167,6 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase): 'availableVlans': '', 'allocatedVlans': ''} - def get_tenants(self): - cmds = ['show openstack config region %s' % self.region] - command_output = self._run_eos_cmds(cmds) - tenants = command_output[0]['tenants'] - - return tenants - - def bm_and_dvr_supported(self): - return (self.cli_commands[const.CMD_INSTANCE] == 'instance') - - def _baremetal_support_check(self, vnic_type): - # Basic error checking for baremental deployments - if (vnic_type == portbindings.VNIC_BAREMETAL and - not self.bm_and_dvr_supported()): - msg = _("Baremetal instances are not supported in this" - " release of EOS") - LOG.error(msg) - raise arista_exc.AristaConfigError(msg=msg) - - def plug_port_into_network(self, device_id, host_id, port_id, - net_id, tenant_id, port_name, device_owner, - sg, orig_sg, vnic_type, segments, - switch_bindings=None, trunk_details=None): - if device_owner == n_const.DEVICE_OWNER_DHCP: - self.plug_dhcp_port_into_network(device_id, - host_id, - port_id, - net_id, - tenant_id, - segments, - port_name) - elif (device_owner.startswith('compute') or - device_owner.startswith('baremetal') or - device_owner.startswith('trunk')): - if vnic_type == 'baremetal': - self.plug_baremetal_into_network(device_id, - host_id, - port_id, - net_id, - tenant_id, - segments, - port_name, - device_owner, - sg, orig_sg, - vnic_type, - switch_bindings, - trunk_details) - else: - self.plug_host_into_network(device_id, - host_id, - port_id, - net_id, - tenant_id, - segments, - port_name, - trunk_details) - elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: - self.plug_distributed_router_port_into_network(device_id, - host_id, - port_id, - net_id, - tenant_id, - segments) - - def unplug_port_from_network(self, device_id, device_owner, hostname, - port_id, network_id, tenant_id, sg, vnic_type, - switch_bindings=None, trunk_details=None): - if device_owner == n_const.DEVICE_OWNER_DHCP: - self.unplug_dhcp_port_from_network(device_id, - hostname, - port_id, - network_id, - tenant_id) - elif (device_owner.startswith('compute') or - device_owner.startswith('baremetal') or - device_owner.startswith('trunk')): - if vnic_type == 'baremetal': - self.unplug_baremetal_from_network(device_id, - hostname, - port_id, - network_id, - tenant_id, - sg, - vnic_type, - switch_bindings, - trunk_details) - else: - self.unplug_host_from_network(device_id, - hostname, - port_id, - network_id, - tenant_id, - trunk_details) - elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: - self.unplug_distributed_router_port_from_network(device_id, - port_id, - hostname, - tenant_id) - - def plug_host_into_network(self, vm_id, host, port_id, - network_id, tenant_id, segments, port_name, - trunk_details=None): - cmds = ['tenant %s' % tenant_id, - 'vm id %s hostid %s' % (vm_id, host)] - if port_name: - cmds.append('port id %s name "%s" network-id %s' % - (port_id, port_name, network_id)) - else: - cmds.append('port id %s network-id %s' % - (port_id, network_id)) - cmds.extend( - 'segment level %d id %s' % (level, segment['id']) - for level, segment in enumerate(segments)) - - if trunk_details and trunk_details.get('sub_ports'): - for subport in trunk_details['sub_ports']: - port_id = subport['port_id'] - net_id = self._ndb.get_network_id_from_port_id(port_id) - filters = {'port_id': port_id} - segments = db_lib.get_port_binding_level(filters) - - cmds.append('port id %s network-id %s' % - (port_id, net_id)) - cmds.extend( - 'segment level %d id %s' % (s.level, s.segment_id) - for s in segments - ) - self._run_openstack_cmds(cmds) - - def plug_baremetal_into_network(self, vm_id, host, port_id, - network_id, tenant_id, segments, port_name, - device_owner, - sg=None, orig_sg=None, - vnic_type=None, switch_bindings=None, - trunk_details=None): - # Basic error checking for baremental deployments - # notice that the following method throws and exception - # if an error condition exists - self._baremetal_support_check(vnic_type) - - # For baremetal, add host to the topology - if switch_bindings and vnic_type == portbindings.VNIC_BAREMETAL: - cmds = ['tenant %s' % tenant_id] - cmds.append('instance id %s hostid %s type baremetal' % - (vm_id, host)) - # This list keeps track of any ACLs that need to be rolled back - # in case we hit a failure trying to apply ACLs, and we end - # failing the transaction. - for binding in switch_bindings: - if not binding: - # skip all empty entries - continue - if device_owner.startswith('trunk'): - vlan_type = 'allowed' - else: - vlan_type = 'native' - # Ensure that binding contains switch and port ID info - if binding['switch_id'] and binding['port_id']: - if port_name: - cmds.append('port id %s name "%s" network-id %s ' - 'type %s switch-id %s switchport %s' % - (port_id, port_name, network_id, - vlan_type, binding['switch_id'], - binding['port_id'])) - else: - cmds.append('port id %s network-id %s type %s ' - 'switch-id %s switchport %s' % - (port_id, network_id, vlan_type, - binding['switch_id'], - binding['port_id'])) - cmds.extend('segment level %d id %s' % (level, - segment['id']) - for level, segment in enumerate(segments)) - - if trunk_details and trunk_details.get('sub_ports'): - for subport in trunk_details['sub_ports']: - port_id = subport['port_id'] - net_id = self._ndb.get_network_id_from_port_id( - port_id) - filters = {'port_id': port_id} - segments = db_lib.get_port_binding_level(filters) - - cmds.append('port id %s network-id %s type allowed' - ' switch-id %s switchport %s' % - (port_id, net_id, binding['switch_id'], - binding['port_id'])) - cmds.extend( - 'segment level %d id %s' % - (s.level, s.segment_id) for s in segments - ) - else: - msg = _('switch and port ID not specified for baremetal') - LOG.error(msg) - raise arista_exc.AristaConfigError(msg=msg) - cmds.append('exit') - self._run_openstack_cmds(cmds) - - if sg: - self.apply_security_group(sg, switch_bindings) - else: - # Security group was removed. Clean up the existing security - # groups. - if orig_sg: - self.remove_security_group(orig_sg, switch_bindings) - - def plug_dhcp_port_into_network(self, dhcp_id, host, port_id, - network_id, tenant_id, segments, - port_name): - cmds = ['tenant %s' % tenant_id, - 'network id %s' % network_id] - if port_name: - cmds.append('dhcp id %s hostid %s port-id %s name "%s"' % - (dhcp_id, host, port_id, port_name)) - else: - cmds.append('dhcp id %s hostid %s port-id %s' % - (dhcp_id, host, port_id)) - cmds.extend('segment level %d id %s' % (level, segment['id']) - for level, segment in enumerate(segments)) - self._run_openstack_cmds(cmds) - - def plug_distributed_router_port_into_network(self, router_id, host, - port_id, net_id, tenant_id, - segments): - if not self.bm_and_dvr_supported(): - LOG.info(const.ERR_DVR_NOT_SUPPORTED) - return - - cmds = ['tenant %s' % tenant_id, - 'instance id %s type router' % router_id, - 'port id %s network-id %s hostid %s' % (port_id, net_id, host)] - cmds.extend('segment level %d id %s' % (level, segment['id']) - for level, segment in enumerate(segments)) - self._run_openstack_cmds(cmds) - - def unplug_host_from_network(self, vm_id, host, port_id, - network_id, tenant_id, trunk_details=None): - cmds = ['tenant %s' % tenant_id, - 'vm id %s hostid %s' % (vm_id, host), - ] - if trunk_details and trunk_details.get('sub_ports'): - cmds.extend( - 'no port id %s' % subport['port_id'] - for subport in trunk_details['sub_ports'] - ) - cmds.append('no port id %s' % port_id) - self._run_openstack_cmds(cmds) - - def unplug_baremetal_from_network(self, vm_id, host, port_id, - network_id, tenant_id, sg, vnic_type, - switch_bindings=None, - trunk_details=None): - # Basic error checking for baremental deployments - # notice that the following method throws and exception - # if an error condition exists - self._baremetal_support_check(vnic_type) - - # Following is a temporary code for native VLANs - should be removed - cmds = ['tenant %s' % tenant_id] - cmds.append('instance id %s hostid %s type baremetal' % (vm_id, host)) - if trunk_details and trunk_details.get('sub_ports'): - cmds.extend( - 'no port id %s' % subport['port_id'] - for subport in trunk_details['sub_ports'] - ) - cmds.append('no port id %s' % port_id) - self._run_openstack_cmds(cmds) - - # SG - Remove security group rules from the port - # after deleting the instance - for binding in switch_bindings: - if not binding: - continue - self.security_group_driver.remove_acl(sg, binding['switch_id'], - binding['port_id'], - binding['switch_info']) - - def unplug_dhcp_port_from_network(self, dhcp_id, host, port_id, - network_id, tenant_id): - cmds = ['tenant %s' % tenant_id, - 'network id %s' % network_id, - 'no dhcp id %s port-id %s' % (dhcp_id, port_id), - ] - self._run_openstack_cmds(cmds) - - def unplug_distributed_router_port_from_network(self, router_id, - port_id, host, tenant_id): - if not self.bm_and_dvr_supported(): - LOG.info(const.ERR_DVR_NOT_SUPPORTED) - return - - # When the last router port is removed, the router is deleted from EOS. - cmds = ['tenant %s' % tenant_id, - 'instance id %s type router' % router_id, - 'no port id %s hostid %s' % (port_id, host)] - self._run_openstack_cmds(cmds) - - def create_network_bulk(self, tenant_id, network_list, sync=False): - cmds = ['tenant %s' % tenant_id] - # Create a reference to function to avoid name lookups in the loop - append_cmd = cmds.append - for counter, network in enumerate(network_list, 1): - try: - append_cmd('network id %s name "%s"' % - (network['network_id'], network['network_name'])) - except KeyError: - append_cmd('network id %s' % network['network_id']) - - cmds.extend( - 'segment %s type %s id %d %s' % ( - seg['id'] if self.hpb_supported() else 1, - seg['network_type'], seg['segmentation_id'], - ('dynamic' if seg.get('is_dynamic', False) else 'static' - if self.hpb_supported() else '')) - for seg in network['segments'] - if seg['network_type'] != const.NETWORK_TYPE_FLAT - ) - shared_cmd = 'shared' if network['shared'] else 'no shared' - append_cmd(shared_cmd) - if self._heartbeat_required(sync, counter): - append_cmd(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) - - if self._heartbeat_required(sync): - append_cmd(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) - - self._run_openstack_cmds(cmds, sync=sync) - - def create_network_segments(self, tenant_id, network_id, - network_name, segments): - if segments: - cmds = ['tenant %s' % tenant_id, - 'network id %s name "%s"' % (network_id, network_name)] - cmds.extend( - 'segment %s type %s id %d %s' % ( - seg['id'], seg['network_type'], seg['segmentation_id'], - ('dynamic' if seg.get('is_dynamic', False) else 'static' - if self.hpb_supported() else '')) - for seg in segments) - self._run_openstack_cmds(cmds) - - def delete_network_segments(self, tenant_id, segments): - if not segments: - return - cmds = ['tenant %s' % tenant_id] - for segment in segments: - cmds.append('network id %s' % segment['network_id']) - cmds.append('no segment %s' % segment['id']) - - self._run_openstack_cmds(cmds) - - def delete_network_bulk(self, tenant_id, network_id_list, sync=False): - cmds = ['tenant %s' % tenant_id] - for counter, network_id in enumerate(network_id_list, 1): - cmds.append('no network id %s' % network_id) - if self._heartbeat_required(sync, counter): - cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) - - if self._heartbeat_required(sync): - cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) - self._run_openstack_cmds(cmds, sync=sync) - - def delete_vm_bulk(self, tenant_id, vm_id_list, sync=False): - cmds = ['tenant %s' % tenant_id] - counter = 0 - for vm_id in vm_id_list: - counter += 1 - cmds.append('no vm id %s' % vm_id) - if self._heartbeat_required(sync, counter): - cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) - - if self._heartbeat_required(sync): - cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) - self._run_openstack_cmds(cmds, sync=sync) - - def delete_instance_bulk(self, tenant_id, instance_id_list, instance_type, - sync=False): - cmds = ['tenant %s' % tenant_id] - counter = 0 - for instance in instance_id_list: - counter += 1 - cmds.append('no instance id %s' % instance) - if self._heartbeat_required(sync, counter): - cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) - - if self._heartbeat_required(sync): - cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) - self._run_openstack_cmds(cmds, sync=sync) - - def create_instance_bulk(self, tenant_id, neutron_ports, vms, - port_profiles, sync=False): - cmds = ['tenant %s' % tenant_id] - # Create a reference to function to avoid name lookups in the loop - append_cmd = cmds.append - counter = 0 - for vm in vms.values(): - counter += 1 - - for v_port in vm['ports']: - port_id = v_port['portId'] - if not v_port['hosts']: - # Skip all the ports that have no host associsted with them - continue - - if port_id not in neutron_ports.keys(): - continue - neutron_port = neutron_ports[port_id] - - port_name = '' - if 'name' in neutron_port: - port_name = 'name "%s"' % neutron_port['name'] - - device_owner = neutron_port['device_owner'] - vnic_type = port_profiles[port_id]['vnic_type'] - network_id = neutron_port['network_id'] - segments = [] - if self.hpb_supported(): - segments = self._ndb.get_all_network_segments(network_id) - if device_owner == n_const.DEVICE_OWNER_DHCP: - append_cmd('network id %s' % neutron_port['network_id']) - append_cmd('dhcp id %s hostid %s port-id %s %s' % - (vm['vmId'], v_port['hosts'][0], - neutron_port['id'], port_name)) - cmds.extend( - 'segment level %d id %s' % (level, segment['id']) - for level, segment in enumerate(segments)) - elif (device_owner.startswith('compute') or - device_owner.startswith('baremetal') or - device_owner.startswith('trunk')): - if vnic_type == 'baremetal': - append_cmd('instance id %s hostid %s type baremetal' % - (vm['vmId'], v_port['hosts'][0])) - profile = port_profiles[neutron_port['id']] - profile = json.loads(profile['profile']) - for binding in profile['local_link_information']: - if not binding or not isinstance(binding, dict): - # skip all empty entries - continue - if device_owner.startswith('trunk'): - vlan_type = 'allowed' - else: - vlan_type = 'native' - # Ensure that profile contains local link info - if binding['switch_id'] and binding['port_id']: - if port_name: - cmds.append('port id %s name "%s" ' - 'network-id %s type %s ' - 'switch-id %s switchport %s' % - (port_id, port_name, - network_id, vlan_type, - binding['switch_id'], - binding['port_id'])) - else: - cmds.append('port id %s network-id %s ' - 'type %s ' - 'switch-id %s switchport %s' % - (port_id, network_id, - vlan_type, - binding['switch_id'], - binding['port_id'])) - cmds.extend('segment level %d id %s' % ( - level, segment['id']) - for level, segment in enumerate(segments)) - - else: - append_cmd('vm id %s hostid %s' % (vm['vmId'], - v_port['hosts'][0])) - append_cmd('port id %s %s network-id %s' % - (neutron_port['id'], port_name, - neutron_port['network_id'])) - cmds.extend('segment level %d id %s' % (level, - segment['id']) - for level, segment in enumerate(segments)) - elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: - if not self.bm_and_dvr_supported(): - LOG.info(const.ERR_DVR_NOT_SUPPORTED) - continue - append_cmd('instance id %s type router' % ( - neutron_port['device_id'])) - for host in v_port['hosts']: - append_cmd('port id %s network-id %s hostid %s' % ( - neutron_port['id'], - neutron_port['network_id'], host)) - cmds.extend('segment level %d id %s' % (level, - segment['id']) - for level, segment in enumerate(segments)) - else: - LOG.warning(_LW("Unknown device owner: %s"), - neutron_port['device_owner']) - - if self._heartbeat_required(sync, counter): - append_cmd(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) - - if self._heartbeat_required(sync): - append_cmd(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) - - self._run_openstack_cmds(cmds, sync=sync) - - def delete_tenant_bulk(self, tenant_list, sync=False): - cmds = [] - for tenant in tenant_list: - cmds.append('no tenant %s' % tenant) - if self._heartbeat_required(sync): - cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) - self._run_openstack_cmds(cmds, sync=sync) - - def delete_this_region(self): - cmds = ['enable', - 'configure', - 'cvx', - 'service openstack', - 'no region %s' % self.region, - ] - self._run_eos_cmds(cmds) - - def register_with_eos(self, sync=False): - self._run_openstack_cmds(['sync interval %d' % self.sync_interval], - sync=sync) - - def get_region_updated_time(self): - timestamp_cmd = self.cli_commands['timestamp'] - if timestamp_cmd: - try: - return self._run_eos_cmds(commands=timestamp_cmd)[0] - except IndexError: - # EAPI request failed and so return none - msg = "Failed to get last sync timestamp; trigger full sync" - LOG.info(msg) - return None - - def _check_sync_lock(self, client): - """Check if the lock is owned by this client. - - :param client: Returns true only if the lock owner matches the expected - client. - """ - cmds = ['show sync lock'] - ret = self._run_openstack_cmds(cmds, sync=True) - for r in ret: - if 'owner' in r: - lock_owner = r['owner'] - LOG.info(_LI('Lock requested by: %s'), client) - LOG.info(_LI('Lock owner: %s'), lock_owner) - return lock_owner == client - return False - - def sync_supported(self): - return self.cli_commands[const.CMD_REGION_SYNC] - - def hpb_supported(self): - return 'hierarchical-port-binding' in self.cli_commands['features'] - - def sync_start(self): - try: - cmds = [] - if self.sync_supported(): - # Locking the region during sync is supported. - client_id = socket.gethostname().split('.')[0] - request_id = self._get_random_name() - cmds = ['sync lock %s %s' % (client_id, request_id)] - self._run_openstack_cmds(cmds) - # Check whether the lock was acquired. - return self._check_sync_lock(client_id) - else: - cmds = ['sync start'] - self._run_openstack_cmds(cmds) - return True - except arista_exc.AristaRpcError: - return False - - def sync_end(self): - try: - # 'sync end' can be sent only when the region has been entered in - # the sync mode - self._run_openstack_cmds(['sync end'], sync=True) - return True - except arista_exc.AristaRpcError: - return False - def _run_eos_cmds(self, commands, commands_to_log=None): """Execute/sends a CAPI (Command API) command to EOS. @@ -830,7 +222,7 @@ class AristaRPCWrapperEapi(AristaRPCWrapperBase): """ region_cmd = 'region %s' % self.region - if sync and self.sync_supported(): + if sync: region_cmd = self.cli_commands[const.CMD_REGION_SYNC] full_command = [ diff --git a/networking_arista/ml2/rpc/arista_json.py b/networking_arista/ml2/rpc/arista_json.py index ca789a44..f252e985 100644 --- a/networking_arista/ml2/rpc/arista_json.py +++ b/networking_arista/ml2/rpc/arista_json.py @@ -139,14 +139,6 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase): self.create_region(self.region) self._set_region_update_interval() - def check_supported_features(self): - # We don't use this function as we know the features - # that are available once using this API. - pass - - def bm_and_dvr_supported(self): - return True - def get_region_updated_time(self): path = 'agent/' data = self._send_api_request(path, 'GET') @@ -173,12 +165,6 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase): return region return None - def sync_supported(self): - return True - - def hpb_supported(self): - return True - def sync_start(self): try: region = self.get_region(self.region) diff --git a/networking_arista/ml2/rpc/base.py b/networking_arista/ml2/rpc/base.py index 27815b19..4741d0b4 100644 --- a/networking_arista/ml2/rpc/base.py +++ b/networking_arista/ml2/rpc/base.py @@ -123,88 +123,13 @@ class AristaRPCWrapperBase(object): return True except Exception as exc: LOG.warning(_LW('%s when getting CVX master'), exc) + LOG.warning("Failed to initialize connection with CVX. Please " + "ensure CVX is reachable and running EOS 4.18.1 " + "or greater.") self.set_cvx_unavailable() return False - def delete_tenant(self, tenant_id): - """Deletes a given tenant and all its networks and VMs from EOS. - - :param tenant_id: globally unique neutron tenant identifier - """ - self.delete_tenant_bulk([tenant_id]) - - def clear_region_updated_time(self): - # TODO(shashank): Remove this once the call is removed from the ML2 - # driver. - pass - - def create_network(self, tenant_id, network): - """Creates a single network on Arista hardware - - :param tenant_id: globally unique neutron tenant identifier - :param network: dict containing network_id, network_name and - segmentation_id - """ - self.create_network_bulk(tenant_id, [network]) - - def delete_network(self, tenant_id, network_id, network_segments): - """Deletes a specified network for a given tenant - - :param tenant_id: globally unique neutron tenant identifier - :param network_id: globally unique neutron network identifier - :param network_segments: segments associated with the network - """ - segments_info = [] - segments_info.extend({'id': segment['id'], 'network_id': network_id} - for segment in network_segments) - self.delete_network_segments(tenant_id, segments_info) - self.delete_network_bulk(tenant_id, [network_id]) - - def delete_vm(self, tenant_id, vm_id): - """Deletes a VM from EOS for a given tenant - - :param tenant_id : globally unique neutron tenant identifier - :param vm_id : id of a VM that needs to be deleted. - """ - self.delete_vm_bulk(tenant_id, [vm_id]) - - @abc.abstractmethod - def plug_port_into_network(self, device_id, host_id, port_id, - net_id, tenant_id, port_name, device_owner, - sg, orig_sg, vnic_type, segments=None, - switch_bindings=None, trunk_details=None): - """Generic routine plug a port of a VM instace into network. - - :param device_id: globally unique identifier for the device - :param host: ID of the host where the port is placed - :param port_id: globally unique port ID that connects port to network - :param network_id: globally unique neutron network identifier - :param tenant_id: globally unique neutron tenant identifier - :param port_name: Name of the port - for display purposes - :param device_owner: Device owner - e.g. compute or network:dhcp - :param sg: current security group for the port - :param orig_sg: original security group for the port - :param vnic_type: VNIC type for the port - :param segments: list of network segments the port is bound to - :param switch_bindings: List of switch_bindings - :param trunk_details: List of subports of a trunk port - """ - - @abc.abstractmethod - def unplug_port_from_network(self, device_id, device_owner, hostname, - port_id, network_id, tenant_id, sg, vnic_type, - switch_bindings=None, trunk_details=None): - """Removes a port from the device - - :param device_id: globally unique identifier for the device - :param host: ID of the host where the device is placed - :param port_id: globally unique port ID that connects device to network - :param network_id: globally unique neutron network identifier - :param tenant_id: globally unique neutron tenant identifier - :param trunk_details: List of subports of a trunk port - """ - def _clean_acls(self, sg, failed_switch, switches_to_clean): """This is a helper function to clean up ACLs on switches. @@ -274,155 +199,6 @@ class AristaRPCWrapperBase(object): """ self.security_group_driver.perform_sync_of_sg() - @abc.abstractmethod - def sync_supported(self): - """Whether the EOS version supports sync. - - Returns True if sync is supported, false otherwise. - """ - - @abc.abstractmethod - def bm_and_dvr_supported(self): - """Whether EOS supports Ironic and DVR. - - Returns True if supported, false otherwise. - """ - - @abc.abstractmethod - def register_with_eos(self, sync=False): - """This is the registration request with EOS. - - This the initial handshake between Neutron and EOS. - critical end-point information is registered with EOS. - - :param sync: This flags indicates that the region is being synced. - """ - - @abc.abstractmethod - def check_supported_features(self): - """Checks whether the CLI commands are valid. - - This method tries to execute the commands on EOS and if it succeedes - the command is stored. - """ - - @abc.abstractmethod - def get_region_updated_time(self): - """Return the timestamp of the last update. - - This method returns the time at which any entities in the region - were updated. - """ - - @abc.abstractmethod - def delete_this_region(self): - """Deleted the region data from EOS.""" - - @abc.abstractmethod - def sync_start(self): - """Let EOS know that a sync in being initiated.""" - - @abc.abstractmethod - def sync_end(self): - """Let EOS know that sync is complete.""" - - @abc.abstractmethod - def get_tenants(self): - """Returns dict of all tenants known by EOS. - - :returns: dictionary containing the networks per tenant - and VMs allocated per tenant - """ - - @abc.abstractmethod - def delete_tenant_bulk(self, tenant_list, sync=False): - """Sends a bulk request to delete the tenants. - - :param tenant_list: list of globaly unique neutron tenant ids which - need to be deleted. - :param sync: This flags indicates that the region is being synced. - """ - - @abc.abstractmethod - def create_network_bulk(self, tenant_id, network_list, sync=False): - """Creates a network on Arista Hardware - - :param tenant_id: globally unique neutron tenant identifier - :param network_list: list of dicts containing network_id, network_name - and segmentation_id - :param sync: This flags indicates that the region is being synced. - """ - - @abc.abstractmethod - def create_network_segments(self, tenant_id, network_id, - network_name, segments): - """Creates a network on Arista Hardware - - Note: This method is not used at the moment. create_network() - is used instead. This will be used once the support for - multiple segments is added in Neutron. - - :param tenant_id: globally unique neutron tenant identifier - :param network_id: globally unique neutron network identifier - :param network_name: Network name - for display purposes - :param segments: List of segments in a given network - """ - - @abc.abstractmethod - def delete_network_bulk(self, tenant_id, network_id_list, sync=False): - """Deletes the network ids specified for a tenant - - :param tenant_id: globally unique neutron tenant identifier - :param network_id_list: list of globally unique neutron network - identifiers - :param sync: This flags indicates that the region is being synced. - """ - - @abc.abstractmethod - def delete_network_segments(self, tenant_id, network_segments): - """Deletes the network segments - - :param network_segments: List of network segments to be delted. - """ - - @abc.abstractmethod - def create_instance_bulk(self, tenant_id, neutron_ports, vms, - port_profiles, sync=False): - """Sends a bulk request to create ports. - - :param tenant_id: globaly unique neutron tenant identifier - :param neutron_ports: list of ports that need to be created. - :param vms: list of vms to which the ports will be attached to. - :param sync: This flags indicates that the region is being synced. - """ - - @abc.abstractmethod - def delete_instance_bulk(self, tenant_id, instance_id_list, instance_type, - sync=False): - """Deletes instances from EOS for a given tenant - - :param tenant_id : globally unique neutron tenant identifier - :param instance_id_list : ids of instances that needs to be deleted. - :param instance_type: The type of the instance which is being deleted. - :param sync: This flags indicates that the region is being synced. - """ - - @abc.abstractmethod - def delete_vm_bulk(self, tenant_id, vm_id_list, sync=False): - """Deletes VMs from EOS for a given tenant - - :param tenant_id : globally unique neutron tenant identifier - :param vm_id_list : ids of VMs that needs to be deleted. - :param sync: This flags indicates that the region is being synced. - """ - - @abc.abstractmethod - def hpb_supported(self): - """Whether hierarchical port binding (HPB) is supported by CVX. - - Returns True if HPB is supported, False otherwise. - """ - def apply_security_group(self, security_group, switch_bindings): """Applies ACLs on switch interface. diff --git a/networking_arista/tests/unit/ml2/rpc/test_arista_eapi_rpc_wrapper.py b/networking_arista/tests/unit/ml2/rpc/test_arista_eapi_rpc_wrapper.py index 2ac1c259..10f082cf 100644 --- a/networking_arista/tests/unit/ml2/rpc/test_arista_eapi_rpc_wrapper.py +++ b/networking_arista/tests/unit/ml2/rpc/test_arista_eapi_rpc_wrapper.py @@ -14,20 +14,14 @@ # limitations under the License. import mock -from mock import patch -from neutron_lib import constants as n_const from oslo_config import cfg -import six from neutron.tests import base from neutron.tests.unit import testlib_api -from networking_arista.common import constants from networking_arista.common import db_lib from networking_arista.common import exceptions as arista_exc from networking_arista.ml2.rpc import arista_eapi -from networking_arista.tests.unit.ml2.test_arista_mechanism_driver import \ - FakePortBindingLevel import networking_arista.tests.unit.ml2.utils as utils @@ -39,738 +33,6 @@ def setup_valid_config(): utils.setup_arista_wrapper_config(cfg) -class PositiveRPCWrapperValidConfigTestCase(testlib_api.SqlTestCase): - """Test cases to test the RPC between Arista Driver and EOS. - - Tests all methods used to send commands between Arista Driver and EOS - """ - - def setUp(self): - super(PositiveRPCWrapperValidConfigTestCase, self).setUp() - setup_valid_config() - ndb = db_lib.NeutronNets() - self.drv = arista_eapi.AristaRPCWrapperEapi(ndb) - self.drv._server_ip = "10.11.12.13" - self.region = 'RegionOne' - - def _get_exit_mode_cmds(self, modes): - return ['exit'] * len(modes) - - def _verify_send_eapi_request_calls(self, mock_send_eapi_req, cmds, - commands_to_log=None): - calls = [] - calls.extend( - mock.call(cmds=cmd, commands_to_log=log_cmd) - for cmd, log_cmd in six.moves.zip(cmds, commands_to_log or cmds)) - mock_send_eapi_req.assert_has_calls(calls) - - def test_no_exception_on_correct_configuration(self): - self.assertIsNotNone(self.drv) - - @patch(EAPI_SEND_FUNC) - def test_plug_host_into_network(self, mock_send_eapi_req): - tenant_id = 'ten-1' - vm_id = 'vm-1' - port_id = 123 - network_id = 'net-id' - host = 'host' - port_name = '123-port' - segment_id = 'segment_id_1' - segments = [{'network_type': 'vlan', 'physical_network': 'default', - 'segmentation_id': 1234, 'id': segment_id}] - - self.drv.plug_host_into_network(vm_id, host, port_id, - network_id, tenant_id, segments, - port_name) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', 'configure', 'cvx', 'service openstack', - 'region RegionOne', - 'tenant ten-1', 'vm id vm-1 hostid host', - 'port id 123 name "123-port" network-id net-id', - ] - for level, segment in enumerate(segments): - cmd2.append('segment level %s id %s' % (level, segment['id'])) - - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_plug_dhcp_port_into_network(self, mock_send_eapi_req): - tenant_id = 'ten-1' - vm_id = 'vm-1' - port_id = 123 - network_id = 'net-id' - host = 'host' - port_name = '123-port' - segments = [] - - self.drv.plug_port_into_network(vm_id, host, port_id, network_id, - tenant_id, port_name, - n_const.DEVICE_OWNER_DHCP, None, None, - None, segments) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', 'configure', 'cvx', 'service openstack', - 'region RegionOne', - 'tenant ten-1', 'network id net-id', - 'dhcp id vm-1 hostid host port-id 123 name "123-port"', - ] - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_plug_baremetal_into_network(self, mock_send_eapi_req): - tenant_id = 'ten-1' - network_id = 'net-id-1' - bm_id = 'bm-1' - port_id = 'p1' - host = 'host' - port_name = 'name_p1' - device_owner = 'compute:None' - - segments = [{'segmentation_id': 1001, - 'id': 'segment_id_1', - 'network_type': 'vlan', - 'is_dynamic': False}] - - switch_bindings = {'local_link_information': [ - {'port_id': 'Eth1', 'switch_id': 'switch-id-1', - 'switch_info': 'switch-1'}]} - bindings = switch_bindings['local_link_information'] - - self.drv.bm_and_dvr_supported = mock.MagicMock(return_value=True) - - self.drv.plug_baremetal_into_network(bm_id, host, port_id, - network_id, tenant_id, - segments, port_name, - device_owner, - None, None, 'baremetal', - bindings) - - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', 'configure', 'cvx', 'service openstack', - 'region RegionOne', - 'tenant ten-1', - 'instance id bm-1 hostid host type baremetal', - 'port id p1 name "name_p1" network-id net-id-1 ' - 'type native switch-id switch-id-1 switchport Eth1', - ] - for level, segment in enumerate(segments): - cmd2.append('segment level %s id %s' % (level, segment['id'])) - cmd2.append('exit') - - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_unplug_baremetal_from_network(self, mock_send_eapi_req): - tenant_id = 'ten-1' - network_id = 'net-id-1' - bm_id = 'bm-1' - port_id = 111 - host = 'host' - switch_bindings = {'local_link_information': [ - {'port_id': 'Eth1', 'switch_id': 'switch-id-1', - 'switch_info': 'switch-1'}]} - bindings = switch_bindings['local_link_information'] - self.drv.bm_and_dvr_supported = mock.MagicMock(return_value=True) - self.drv.unplug_baremetal_from_network(bm_id, host, port_id, - network_id, tenant_id, - None, 'baremetal', - bindings) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', 'configure', 'cvx', 'service openstack', - 'region RegionOne', - 'tenant ten-1', - 'instance id bm-1 hostid host type baremetal', - 'no port id 111', - ] - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_unplug_host_from_network(self, mock_send_eapi_req): - tenant_id = 'ten-1' - vm_id = 'vm-1' - port_id = 123 - network_id = 'net-id' - host = 'host' - self.drv.unplug_host_from_network(vm_id, host, port_id, - network_id, tenant_id) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', 'configure', 'cvx', 'service openstack', - 'region RegionOne', - 'tenant ten-1', 'vm id vm-1 hostid host', - 'no port id 123', - ] - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_unplug_dhcp_port_from_network(self, mock_send_eapi_req): - tenant_id = 'ten-1' - vm_id = 'vm-1' - port_id = 123 - network_id = 'net-id' - host = 'host' - - self.drv.unplug_dhcp_port_from_network(vm_id, host, port_id, - network_id, tenant_id) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', 'configure', 'cvx', 'service openstack', - 'region RegionOne', - 'tenant ten-1', 'network id net-id', - 'no dhcp id vm-1 port-id 123', - ] - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_create_network(self, mock_send_eapi_req): - tenant_id = 'ten-1' - self.drv.cli_commands['features'] = {'hierarchical-port-binding': 1} - network = { - 'network_id': 'net-id', - 'network_name': 'net-name', - 'segments': [{'segmentation_id': 123, - 'physical_network': 'default', - 'network_type': 'vlan', - 'id': 'segment_id_1'}], - 'shared': False, - } - self.drv.create_network(tenant_id, network) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', 'configure', 'cvx', 'service openstack', - 'region RegionOne', - 'tenant ten-1', 'network id net-id name "net-name"', - ] - for seg in network['segments']: - is_dynamic = seg.get('is_dynamic', False) - cmd2.append('segment %s type %s id %d %s' % (seg['id'], - seg['network_type'], seg['segmentation_id'], - 'dynamic' if is_dynamic else 'static')) - cmd2.append('no shared') - - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_create_shared_network(self, mock_send_eapi_req): - tenant_id = 'ten-1' - segment_id = 'abcd-cccc' - segmentation_id = 123 - network_type = 'vlan' - segments = [{'segmentation_id': segmentation_id, - 'id': segment_id, - 'network_type': network_type}] - network = { - 'network_id': 'net-id', - 'network_name': 'net-name', - 'segments': segments, - 'shared': True} - self.drv.cli_commands['features'] = {'hierarchical-port-binding': 1} - self.drv.create_network(tenant_id, network) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', 'configure', 'cvx', 'service openstack', - 'region RegionOne', - 'tenant ten-1', 'network id net-id name "net-name"', - 'segment %s type %s id %d %s' % (segment_id, network_type, - segmentation_id, 'static'), - 'shared', - ] - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_create_network_bulk(self, mock_send_eapi_req): - tenant_id = 'ten-2' - num_networks = 10 - network_type = 'vlan' - segment_id = 'abcd-eeee-%s' - self.drv.cli_commands['features'] = {'hierarchical-port-binding': 1} - networks = [{ - 'network_id': 'net-id-%d' % net_id, - 'network_name': 'net-name-%d' % net_id, - 'segments': [{'segmentation_id': net_id, - 'network_type': 'vlan', - 'id': segment_id % net_id}], - 'shared': True, - } for net_id in range(1, num_networks) - ] - - self.drv.create_network_bulk(tenant_id, networks) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', - 'configure', - 'cvx', - 'service openstack', - 'region RegionOne', - 'tenant ten-2'] - for net_id in range(1, num_networks): - cmd2.append('network id net-id-%d name "net-name-%d"' % - (net_id, net_id)) - cmd2.append('segment %s type %s id %d %s' % ( - segment_id % net_id, network_type, net_id, 'static')) - cmd2.append('shared') - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_delete_network(self, mock_send_eapi_req): - tenant_id = 'ten-1' - network_id = 'net-id' - segments = [{'segmentation_id': 101, - 'physical_network': 'default', - 'id': 'segment_id_1', - 'network_type': 'vlan'}] - self.drv.delete_network(tenant_id, network_id, segments) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', 'configure', 'cvx', 'service openstack', - 'region RegionOne', - 'tenant ten-1', - 'network id net-id', - 'no segment segment_id_1', - ] - cmd3 = ['enable', 'configure', 'cvx', 'service openstack', - 'region RegionOne', - 'tenant ten-1', - 'no network id net-id', - ] - self._verify_send_eapi_request_calls(mock_send_eapi_req, - [cmd1, cmd2, cmd1, cmd3]) - - @patch(EAPI_SEND_FUNC) - def test_delete_network_bulk(self, mock_send_eapi_req): - tenant_id = 'ten-2' - num_networks = 10 - - networks = ['net-id-%d' % net_id for net_id in range(1, num_networks)] - self.drv.delete_network_bulk(tenant_id, networks) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', - 'configure', - 'cvx', - 'service openstack', - 'region RegionOne', - 'tenant ten-2'] - for net_id in range(1, num_networks): - cmd2.append('no network id net-id-%d' % net_id) - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_delete_vm(self, mock_send_eapi_req): - tenant_id = 'ten-1' - vm_id = 'vm-id' - self.drv.delete_vm(tenant_id, vm_id) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', 'configure', 'cvx', 'service openstack', - 'region RegionOne', - 'tenant ten-1', 'no vm id vm-id', - ] - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_delete_vm_bulk(self, mock_send_eapi_req): - tenant_id = 'ten-2' - num_vms = 10 - vm_ids = ['vm-id-%d' % vm_id for vm_id in range(1, num_vms)] - self.drv.delete_vm_bulk(tenant_id, vm_ids) - - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', - 'configure', - 'cvx', - 'service openstack', - 'region RegionOne', - 'tenant ten-2'] - - for vm_id in range(1, num_vms): - cmd2.append('no vm id vm-id-%d' % vm_id) - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_create_port_bulk(self, mock_send_eapi_req): - tenant_id = 'ten-3' - num_devices = 10 - num_ports_per_device = 2 - - device_count = 0 - devices = {} - for device_id in range(1, num_devices): - device_count += 1 - dev_id = 'dev-id-%d' % device_id - devices[dev_id] = {'vmId': dev_id, - 'baremetal_instance': False, - 'ports': [] - } - for port_id in range(1, num_ports_per_device): - port_id = 'port-id-%d-%d' % (device_id, port_id) - port = { - 'device_id': 'dev-id-%d' % device_id, - 'hosts': ['host_%d' % (device_count)], - 'portId': port_id - } - devices[dev_id]['ports'].append(port) - - device_owners = [n_const.DEVICE_OWNER_DHCP, 'compute', - n_const.DEVICE_OWNER_DVR_INTERFACE] - port_list = [] - - net_count = 1 - for device_id in range(1, num_devices): - for port_id in range(1, num_ports_per_device): - port = { - 'portId': 'port-id-%d-%d' % (device_id, port_id), - 'device_id': 'dev-id-%d' % device_id, - 'device_owner': device_owners[(device_id + port_id) % 3], - 'network_id': 'network-id-%d' % net_count, - 'name': 'port-%d-%d' % (device_id, port_id), - 'tenant_id': tenant_id - } - port_list.append(port) - net_count += 1 - - create_ports = {} - port_profiles = {} - for port in port_list: - create_ports.update(utils.port_dict_representation(port)) - port_profiles[port['portId']] = {'vnic_type': 'normal'} - - self.drv.cli_commands[constants.CMD_INSTANCE] = 'instance' - self.drv.create_instance_bulk(tenant_id, create_ports, devices, - port_profiles=port_profiles) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', - 'configure', - 'cvx', - 'service openstack', - 'region RegionOne', - 'tenant ten-3'] - - for device in devices.values(): - for v_port in device['ports']: - port_id = v_port['portId'] - port = create_ports[port_id] - host = v_port['hosts'][0] - device_owner = port['device_owner'] - port_name = port['name'] - network_id = port['network_id'] - device_id = port['device_id'] - if device_owner == 'network:dhcp': - cmd2.append('network id %s' % network_id) - cmd2.append('dhcp id %s hostid %s port-id %s name "%s"' % ( - device_id, host, port_id, port_name)) - elif device_owner == 'compute': - cmd2.append('vm id %s hostid %s' % (device_id, host)) - cmd2.append('port id %s name "%s" network-id %s' % ( - port_id, port_name, network_id)) - elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: - cmd2.append('instance id %s type router' % device_id) - cmd2.append('port id %s network-id %s hostid %s' % ( - port_id, network_id, host)) - net_count += 1 - - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_delete_tenant(self, mock_send_eapi_req): - tenant_id = 'ten-1' - self.drv.delete_tenant(tenant_id) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', 'configure', 'cvx', 'service openstack', - 'region RegionOne', 'no tenant ten-1', - ] - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_delete_tenant_bulk(self, mock_send_eapi_req): - num_tenants = 10 - tenant_list = ['ten-%d' % t_id for t_id in range(1, num_tenants)] - self.drv.delete_tenant_bulk(tenant_list) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', - 'configure', - 'cvx', - 'service openstack', - 'region RegionOne'] - for ten_id in range(1, num_tenants): - cmd2.append('no tenant ten-%d' % ten_id) - - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - def test_get_network_info_returns_none_when_no_such_net(self): - expected = [] - self.drv.get_tenants = mock.MagicMock() - self.drv.get_tenants.return_value = [] - - net_info = self.drv.get_tenants() - - self.drv.get_tenants.assert_called_once_with() - self.assertEqual(net_info, expected, ('Network info must be "None"' - 'for unknown network')) - - def test_get_network_info_returns_info_for_available_net(self): - valid_network_id = '12345' - valid_net_info = {'network_id': valid_network_id, - 'some_info': 'net info'} - known_nets = valid_net_info - - self.drv.get_tenants = mock.MagicMock() - self.drv.get_tenants.return_value = known_nets - - net_info = self.drv.get_tenants() - self.assertEqual(net_info, valid_net_info, - ('Must return network info for a valid net')) - - @patch(EAPI_SEND_FUNC) - def test_check_supported_features(self, mock_send_eapi_req): - self.drv._get_random_name = mock.MagicMock() - self.drv._get_random_name.return_value = 'RegionOne' - - self.drv.check_supported_features() - - get_eos_master_cmd = ['show openstack agent uuid'] - instance_command = ['show openstack instances'] - cmds = [get_eos_master_cmd, instance_command] - - calls = [] - calls.extend(mock.call(cmds=cmd, commands_to_log=cmd) for cmd in cmds) - mock_send_eapi_req.assert_has_calls(calls) - - @patch(EAPI_SEND_FUNC) - def test_register_with_eos(self, mock_send_eapi_req): - self.drv.register_with_eos() - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', - 'configure', - 'cvx', - 'service openstack', - 'region %s' % self.region, - 'sync interval %d' % cfg.CONF.ml2_arista.sync_interval, - ] - self._verify_send_eapi_request_calls( - mock_send_eapi_req, - [cmd1, cmd2], - commands_to_log=[cmd1, cmd2]) - - def _enable_sync_cmds(self): - self.drv.cli_commands[ - constants.CMD_REGION_SYNC] = 'region RegionOne sync' - self.drv.cli_commands[constants.CMD_SYNC_HEARTBEAT] = 'sync heartbeat' - self.drv.cli_commands['baremetal'] = '' - - @patch(EAPI_SEND_FUNC) - def test_create_network_bulk_during_sync(self, mock_send_eapi_req): - self._enable_sync_cmds() - self.drv.cli_commands['features'] = {'hierarchical-port-binding': 1} - tenant_id = 'ten-10' - num_networks = 101 - segments = [{'segmentation_id': 101, - 'physical_network': 'default', - 'id': 'segment_id_1', - 'network_type': 'vlan'}] - networks = [{ - 'network_id': 'net-id-%d' % net_id, - 'network_name': 'net-name-%d' % net_id, - 'segments': segments, - 'shared': True, - } for net_id in range(1, num_networks + 1) - ] - - self.drv.create_network_bulk(tenant_id, networks, sync=True) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', - 'configure', - 'cvx', - 'service openstack', - 'region RegionOne sync', - 'tenant ten-10'] - - # Send 100 create network commands - for net_id in range(1, 101): - cmd2.append('network id net-id-%d name "net-name-%d"' % - (net_id, net_id)) - for seg in segments: - is_dynamic = seg.get('is_dynamic', False) - cmd2.append('segment %s type %s id %d %s' % (seg['id'], - seg['network_type'], seg['segmentation_id'], - 'dynamic' if is_dynamic else 'static')) - cmd2.append('shared') - - # Send heartbeat - cmd2.append('sync heartbeat') - # Send the remaining network - cmd2.append('network id net-id-101 name "net-name-101"') - for seg in segments: - is_dynamic = seg.get('is_dynamic', False) - cmd2.append('segment %s type %s id %d %s' % (seg['id'], - seg['network_type'], seg['segmentation_id'], - 'dynamic' if is_dynamic else 'static')) - cmd2.append('shared') - # Send the final heartbeat - cmd2.append('sync heartbeat') - - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_delete_network_bulk_during_sync(self, mock_send_eapi_req): - self._enable_sync_cmds() - tenant_id = 'ten-10' - num_networks = 101 - networks = ['nid-%d' % net_id for net_id in range(1, num_networks + 1)] - - self.drv.delete_network_bulk(tenant_id, networks, sync=True) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', - 'configure', - 'cvx', - 'service openstack', - 'region RegionOne sync', - 'tenant ten-10'] - - # Send 100 create network commands - for net_id in range(1, 101): - cmd2.append('no network id nid-%d' % (net_id)) - - # Send heartbeat - cmd2.append('sync heartbeat') - # Send the remaining network - cmd2.append('no network id nid-101') - # Send the final heartbeat - cmd2.append('sync heartbeat') - - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_delete_vm_bulk_during_sync(self, mock_send_eapi_req): - self._enable_sync_cmds() - tenant_id = 'ten-2' - num_vms = 101 - vm_ids = ['vm-id-%d' % vm_id for vm_id in range(1, num_vms + 1)] - self.drv.delete_vm_bulk(tenant_id, vm_ids, sync=True) - - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', - 'configure', - 'cvx', - 'service openstack', - 'region RegionOne sync', - 'tenant ten-2'] - - for vm_id in range(1, 101): - cmd2.append('no vm id vm-id-%d' % vm_id) - - # Send heartbeat - cmd2.append('sync heartbeat') - # Send the remaining vm - cmd2.append('no vm id vm-id-101') - # Send the final heartbeat - cmd2.append('sync heartbeat') - - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_create_port_bulk_during_sync(self, mock_send_eapi_req): - self._enable_sync_cmds() - tenant_id = 'ten-3' - num_devices = 101 - num_ports_per_device = 2 - - device_count = 0 - devices = {} - for device_id in range(1, num_devices): - device_count += 1 - dev_id = 'dev-id-%d' % device_id - devices[dev_id] = {'vmId': dev_id, - 'baremetal_instance': False, - 'ports': [] - } - for port_id in range(1, num_ports_per_device + 1): - port_id = 'port-id-%d-%d' % (device_id, port_id) - port = { - 'device_id': 'dev-id-%d' % device_id, - 'hosts': ['host_%d' % (device_count)], - 'portId': port_id - } - devices[dev_id]['ports'].append(port) - - device_owners = [n_const.DEVICE_OWNER_DHCP, 'compute', - n_const.DEVICE_OWNER_DVR_INTERFACE] - - port_list = [] - net_count = 1 - for device_id in range(1, num_devices): - for port_id in range(1, num_ports_per_device + 1): - port = { - 'portId': 'port-id-%d-%d' % (device_id, port_id), - 'device_id': 'dev-id-%d' % device_id, - 'device_owner': device_owners[(device_id + port_id) % 3], - 'network_id': 'network-id-%d' % net_count, - 'name': 'port-%d-%d' % (device_id, port_id), - 'tenant_id': tenant_id - } - port_list.append(port) - net_count += 1 - - create_ports = {} - port_profiles = {} - for port in port_list: - create_ports.update(utils.port_dict_representation(port)) - port_profiles[port['portId']] = {'vnic_type': 'normal'} - - self.drv.cli_commands[constants.CMD_INSTANCE] = 'instance' - self.drv.create_instance_bulk(tenant_id, create_ports, devices, - port_profiles=port_profiles, sync=True) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', - 'configure', - 'cvx', - 'service openstack', - 'region RegionOne sync', - 'tenant ten-3'] - - for count, device in enumerate(devices.values(), 1): - for v_port in device['ports']: - port_id = v_port['portId'] - port = create_ports[port_id] - host = v_port['hosts'][0] - vm_id = device['vmId'] - port_name = port['name'] - network_id = port['network_id'] - device_owner = port['device_owner'] - device_id = port['device_id'] - - if device_owner == n_const.DEVICE_OWNER_DHCP: - cmd2.append('network id %s' % network_id) - cmd2.append('dhcp id %s hostid %s port-id %s name "%s"' % ( - vm_id, host, port_id, port_name)) - elif device_owner == 'compute': - cmd2.append('vm id %s hostid %s' % (vm_id, host)) - cmd2.append('port id %s name "%s" network-id %s' % ( - port_id, port_name, network_id)) - elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: - cmd2.append('instance id %s type router' % device_id) - cmd2.append('port id %s network-id %s hostid %s' % ( - port_id, network_id, host)) - if count == (num_devices - 1): - # Send heartbeat - cmd2.append('sync heartbeat') - - # Send the final heartbeat - cmd2.append('sync heartbeat') - - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_delete_tenant_bulk_during_sync(self, mock_send_eapi_req): - self._enable_sync_cmds() - num_tenants = 101 - tenant_list = ['ten-%d' % t_id for t_id in range(1, num_tenants + 1)] - self.drv.delete_tenant_bulk(tenant_list, sync=True) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', - 'configure', - 'cvx', - 'service openstack', - 'region RegionOne sync'] - for ten_id in range(1, num_tenants + 1): - cmd2.append('no tenant ten-%d' % ten_id) - - cmd2.append('sync heartbeat') - - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - class AristaRPCWrapperInvalidConfigTestCase(base.BaseTestCase): """Negative test cases to test the Arista Driver configuration.""" @@ -802,194 +64,6 @@ class NegativeRPCWrapperTestCase(testlib_api.SqlTestCase): side_effect=Exception('server error') ) with mock.patch.object(arista_eapi.LOG, 'error') as log_err: - self.assertRaises(arista_exc.AristaRpcError, drv.get_tenants) + self.assertRaises(arista_exc.AristaRpcError, + drv._run_openstack_cmds, []) log_err.assert_called_once_with(mock.ANY) - - -class RPCWrapperEapiValidConfigTrunkTestCase(testlib_api.SqlTestCase): - """Test cases to test plug trunk port into network.""" - - def setUp(self): - super(RPCWrapperEapiValidConfigTrunkTestCase, self).setUp() - setup_valid_config() - ndb = mock.MagicMock() - self.drv = arista_eapi.AristaRPCWrapperEapi(ndb) - self.drv._server_ip = "10.11.12.13" - self.region = 'RegionOne' - arista_eapi.db_lib = mock.MagicMock() - - @patch(EAPI_SEND_FUNC) - def test_plug_host_into_network(self, mock_send_eapi_req): - tenant_id = 'ten-1' - network_id = 'net-id-1' - vm_id = 'vm-1' - port_id = 111 - host = 'host' - port_name = '111-port' - sub_segment_id = 'sub_segment_id_1' - sub_segmentation_id = 1002 - sub_network_id = 'subnet-id' - subport_id = 222 - segment_id = 'segment_id_1' - segments = [{'network_type': 'vlan', 'physical_network': 'default', - 'segmentation_id': 1234, 'id': segment_id}] - binding_level = FakePortBindingLevel(subport_id, 0, 'vendor-1', - sub_segment_id) - subport_segments = [binding_level] - trunk_details = {'sub_ports': [{'mac_address': 'mac_address', - 'port_id': subport_id, - 'segmentation_id': sub_segmentation_id, - 'segmentation_type': 'vlan'}], - 'trunk_id': 'trunk_id'} - self.drv._ndb.get_network_id_from_port_id.return_value = sub_network_id - arista_eapi.db_lib.get_port_binding_level.return_value = \ - subport_segments - - self.drv.plug_host_into_network(vm_id, host, port_id, - network_id, tenant_id, segments, - port_name, trunk_details=trunk_details) - - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', 'configure', 'cvx', 'service openstack', - 'region RegionOne', - 'tenant ten-1', 'vm id vm-1 hostid host', - 'port id 111 name "111-port" network-id net-id-1', - ] - for level, segment in enumerate(segments): - cmd2.append('segment level %s id %s' % (level, segment['id'])) - cmd2.append('port id 222 network-id subnet-id') - for segment in subport_segments: - cmd2.append('segment level %s id %s' % (segment.level, - segment.segment_id)) - - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_plug_baremetal_into_network(self, mock_send_eapi_req): - tenant_id = 'ten-2' - network_id = 'net-id-1' - bm_id = 'bm-1' - port_id = 'p1' - host = 'host' - port_name = 'name_p1' - device_owner = 'compute:None' - subport_id = 222 - sub_segment_id = 'sub_segment_id_1' - - segments = [{'segmentation_id': 1001, - 'id': 'segment_id_1', - 'network_type': 'vlan', - 'is_dynamic': False}] - - subport_net_id = 'net-id-2' - binding_level = FakePortBindingLevel(subport_id, 0, 'vendor-1', - sub_segment_id) - subport_segments = [binding_level] - - trunk_details = {'sub_ports': [{'mac_address': 'mac_address', - 'port_id': 'p2', - 'segmentation_id': 1002, - 'segmentation_type': 'vlan'}], - 'trunk_id': 'trunk_id'} - switch_bindings = {'local_link_information': [ - {'port_id': 'Eth1', 'switch_id': 'switch-id-1', - 'switch_info': 'switch-1'}]} - bindings = switch_bindings['local_link_information'] - self.drv._ndb.get_network_id_from_port_id.return_value = subport_net_id - arista_eapi.db_lib.get_port_binding_level.return_value = \ - subport_segments - - self.drv.bm_and_dvr_supported = mock.MagicMock(return_value=True) - - self.drv.plug_baremetal_into_network(bm_id, host, port_id, - network_id, tenant_id, - segments, port_name, - device_owner, - None, None, 'baremetal', - bindings, trunk_details) - - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', 'configure', 'cvx', 'service openstack', - 'region RegionOne', - 'tenant ten-2', - 'instance id bm-1 hostid host type baremetal', - 'port id p1 name "name_p1" network-id net-id-1 ' - 'type native switch-id switch-id-1 switchport Eth1', - ] - for level, segment in enumerate(segments): - cmd2.append('segment level %s id %s' % (level, segment['id'])) - cmd2.append('port id p2 network-id net-id-2 ' - 'type allowed switch-id switch-id-1 switchport Eth1', ) - for segment in subport_segments: - cmd2.append('segment level %s id %s' % (segment.level, - segment.segment_id)) - cmd2.append('exit') - - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_unplug_host_from_network(self, mock_send_eapi_req): - tenant_id = 'ten-2' - network_id = 'net-id-1' - vm_id = 'vm-2' - port_id = 111 - host = 'host' - subport_id = 222 - - trunk_details = {'sub_ports': [{'mac_address': 'mac_address', - 'port_id': subport_id, - 'segmentation_id': 123, - 'segmentation_type': 'vlan'}], - 'trunk_id': 'trunk_id'} - self.drv.unplug_host_from_network(vm_id, host, port_id, - network_id, tenant_id, - trunk_details) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', 'configure', 'cvx', 'service openstack', - 'region RegionOne', - 'tenant ten-2', 'vm id vm-2 hostid host', - 'no port id 222', - 'no port id 111', - ] - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - @patch(EAPI_SEND_FUNC) - def test_unplug_baremetal_from_network(self, mock_send_eapi_req): - tenant_id = 'ten-2' - network_id = 'net-id-1' - bm_id = 'bm-2' - port_id = 111 - host = 'host' - subport_id = 222 - - trunk_details = {'sub_ports': [{'mac_address': 'mac_address', - 'port_id': subport_id, - 'segmentation_id': 123, - 'segmentation_type': 'vlan'}], - 'trunk_id': 'trunk_id'} - switch_bindings = {'local_link_information': [ - {'port_id': 'Eth1', 'switch_id': 'switch-id-1', - 'switch_info': 'switch-1'}]} - bindings = switch_bindings['local_link_information'] - self.drv.bm_and_dvr_supported = mock.MagicMock(return_value=True) - self.drv.unplug_baremetal_from_network(bm_id, host, port_id, - network_id, tenant_id, - None, 'baremetal', - bindings, trunk_details) - cmd1 = ['show openstack agent uuid'] - cmd2 = ['enable', 'configure', 'cvx', 'service openstack', - 'region RegionOne', - 'tenant ten-2', - 'instance id bm-2 hostid host type baremetal', - 'no port id 222', - 'no port id 111', - ] - self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) - - def _verify_send_eapi_request_calls(self, mock_send_eapi_req, cmds, - commands_to_log=None): - calls = [] - calls.extend( - mock.call(cmds=cmd, commands_to_log=log_cmd) - for cmd, log_cmd in six.moves.zip(cmds, commands_to_log or cmds)) - mock_send_eapi_req.assert_has_calls(calls) diff --git a/networking_arista/tests/unit/ml2/test_arista_sync.py b/networking_arista/tests/unit/ml2/test_arista_sync.py index 8d95a064..53f6c5ba 100644 --- a/networking_arista/tests/unit/ml2/test_arista_sync.py +++ b/networking_arista/tests/unit/ml2/test_arista_sync.py @@ -89,7 +89,6 @@ class SyncServiceTest(testlib_api.SqlTestCase): mock.call.get_region_updated_time(), mock.call.sync_start(), mock.call.register_with_eos(sync=True), - mock.call.check_supported_features(), mock.call.get_tenants(), mock.call.create_network_bulk( tenant_id, @@ -196,7 +195,6 @@ class SyncServiceTest(testlib_api.SqlTestCase): mock.call.get_region_updated_time(), mock.call.sync_start(), mock.call.register_with_eos(sync=True), - mock.call.check_supported_features(), mock.call.get_tenants(), mock.call.create_network_bulk( @@ -255,7 +253,6 @@ class SyncServiceTest(testlib_api.SqlTestCase): mock.call.get_region_updated_time(), mock.call.sync_start(), mock.call.register_with_eos(sync=True), - mock.call.check_supported_features(), mock.call.get_tenants(), mock.call.create_network_bulk( diff --git a/networking_arista/tests/unit/ml2/test_mechanism_arista.py b/networking_arista/tests/unit/ml2/test_mechanism_arista.py index a44ec2c4..20e53b36 100644 --- a/networking_arista/tests/unit/ml2/test_mechanism_arista.py +++ b/networking_arista/tests/unit/ml2/test_mechanism_arista.py @@ -51,7 +51,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): network_id = 'net1-id' segmentation_id = 1001 - self.drv.rpc.hpb_supported.return_value = True network_context = self._get_network_context(tenant_id, network_id, segmentation_id, @@ -60,7 +59,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): segment_id = network_context.network_segments[0]['id'] expected_calls = [ - mock.call.hpb_supported(), mock.call.remember_tenant(tenant_id), mock.call.remember_network_segment(tenant_id, network_id, @@ -85,7 +83,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): segment_id = network_context.network_segments[0]['id'] expected_calls += [ - mock.call.hpb_supported(), mock.call.remember_tenant(INTERNAL_TENANT_ID), mock.call.remember_network_segment(INTERNAL_TENANT_ID, network_id, @@ -234,7 +231,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): network_id = 'net1-id' segmentation_id = 1001 - self.drv.rpc.hpb_supported.return_value = True network_context = self._get_network_context(tenant_id, network_id, segmentation_id, @@ -244,7 +240,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): mechanism_arista.db_lib.num_vms_provisioned.return_value = 0 self.drv.delete_network_postcommit(network_context) expected_calls = [ - mock.call.hpb_supported(), mock.call.delete_network(tenant_id, network_id, network_context.network_segments), mock.call.num_nets_provisioned(tenant_id), @@ -269,7 +264,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): self.drv.delete_network_postcommit(network_context) expected_calls += [ - mock.call.hpb_supported(), mock.call.delete_network(INTERNAL_TENANT_ID, network_id, network_context.network_segments), mock.call.num_nets_provisioned(INTERNAL_TENANT_ID), @@ -520,8 +514,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): self.drv.ndb.get_network_from_net_id.return_value = [network] physnet = dict(physnet='default') self.fake_rpc.get_physical_network.return_value = physnet - self.drv.rpc.hpb_supported.return_value = True - self.drv.delete_port_postcommit(port_context) expected_calls = [ @@ -539,7 +531,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): mock.call.num_vms_provisioned(tenant_id), mock.call.forget_tenant(tenant_id), mock.call.delete_tenant(tenant_id), - mock.call.hpb_supported(), ] for binding_level in port_context.binding_levels: expected_calls.append(mock.call.is_network_provisioned(tenant_id, @@ -590,7 +581,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): mock.call.num_vms_provisioned(INTERNAL_TENANT_ID), mock.call.forget_tenant(INTERNAL_TENANT_ID), mock.call.delete_tenant(INTERNAL_TENANT_ID), - mock.call.hpb_supported(), ] for binding_level in port_context.binding_levels: expected_calls.append(mock.call.is_network_provisioned( @@ -637,7 +627,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): self.drv.ndb.get_network_from_net_id.return_value = [network] physnet = dict(physnet='default') self.fake_rpc.get_physical_network.return_value = physnet - self.drv.rpc.hpb_supported.return_value = True self.drv.delete_port_postcommit(port_context) @@ -656,7 +645,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): mock.call.num_vms_provisioned(tenant_id), mock.call.forget_tenant(tenant_id), mock.call.delete_tenant(tenant_id), - mock.call.hpb_supported(), ] for binding_level in port_context.binding_levels: expected_calls.append(mock.call.is_network_provisioned( @@ -971,7 +959,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): mechanism_arista.db_lib.num_nets_provisioned.return_value = 1 mechanism_arista.db_lib.num_vms_provisioned.return_value = 1 self.drv.ndb.get_all_network_segments.return_value = segments - mechanism_arista.db_lib.hpb_supported.return_value = True network = {'tenant_id': tenant_id} self.drv.ndb.get_network_from_net_id.return_value = [network] @@ -986,7 +973,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): profile = port['binding:profile'] network_name = network_context.current['name'] - self.drv.rpc.hpb_supported.return_value = True self.drv.update_port_postcommit(port_context) expected_calls = [ @@ -996,7 +982,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): None, None), mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, None), - mock.call.hpb_supported(), mock.call.create_network_segments(tenant_id, network_id, network_name, segments), @@ -1056,7 +1041,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): None, None), mock.call.is_network_provisioned(INTERNAL_TENANT_ID, network_id, segmentation_id, None), - mock.call.hpb_supported(), mock.call.create_network_segments(INTERNAL_TENANT_ID, network_id, network_name, segments), @@ -1117,7 +1101,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): None, None), mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, None), - mock.call.hpb_supported(), mock.call.create_network_segments(tenant_id, network_id, network_name, segments), @@ -1143,7 +1126,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): None, None), mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, None), - mock.call.hpb_supported(), mock.call.create_network_segments(tenant_id, network_id, network_name, segments), @@ -1192,7 +1174,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): mechanism_arista.db_lib.num_nets_provisioned.return_value = 1 mechanism_arista.db_lib.num_vms_provisioned.return_value = 1 self.drv.ndb.get_all_network_segments.return_value = segments - mechanism_arista.db_lib.hpb_supported.return_value = True network = {'tenant_id': tenant_id} self.drv.ndb.get_network_from_net_id.return_value = [network] @@ -1216,7 +1197,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): None, None), mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, None), - mock.call.hpb_supported(), mock.call.create_network_segments(tenant_id, network_id, network_name, segments), @@ -1441,8 +1421,7 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): mechanism_arista.db_lib.num_nets_provisioned.return_value = 1 mechanism_arista.db_lib.num_vms_provisioned.return_value = 1 - self.drv.rpc.hpb_supported.return_value = False - self.drv.ndb.get_network_segments.return_value = segments + self.drv.ndb.get_all_network_segments.return_value = segments mechanism_arista.db_lib.reset_mock() self.drv.update_port_postcommit(context) @@ -1454,7 +1433,9 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, None), - mock.call.hpb_supported(), + mock.call.create_network_segments(tenant_id, network_id, + network_context.current['name'], + segments), mock.call.is_network_provisioned(tenant_id, network_id, None, None), mock.call.unplug_port_from_network(old_device_id, @@ -1529,14 +1510,16 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, None), - mock.call.hpb_supported(), + mock.call.create_network_segments(tenant_id, network_id, + network_context.current['name'], + segments), mock.call.plug_port_into_network(new_device_id, new_host, port_id, network_id, tenant_id, port_name, n_const.DEVICE_OWNER_DHCP, None, None, vnic_type, - segments=[], + segments=segments, switch_bindings=profile, trunk_details=None), ] @@ -1562,14 +1545,16 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, None), - mock.call.hpb_supported(), + mock.call.create_network_segments(tenant_id, network_id, + network_context.current['name'], + segments), mock.call.plug_port_into_network(new_device_id, new_host, port_id, network_id, tenant_id, port_name, n_const.DEVICE_OWNER_DHCP, None, None, vnic_type, - segments=[], + segments=segments, switch_bindings=profile, trunk_details=None), ] @@ -1595,7 +1580,6 @@ class AristaDriverTestCase(testlib_api.SqlTestCase): mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, None), - mock.call.hpb_supported(), ] mechanism_arista.db_lib.assert_has_calls(expected_calls)