[AIM] Add extension for ERSPAN
This adds an extension to the neutron port resource, in order to support creation and tear-down of ERSPAN sessions. The port resource is extended with the apic:erspan_config property, which is a list of dictionary objects. Each entry in the dictionary specifies the parameters for an ERSPAN session: 'dest_ip': the ERSPAN destination IP address 'flow_id': the flow ID to use (1-1023) 'direction': 'in', 'out', or 'bi' (port-centric) The neutron port UUID plus direction define a unique ERSPAN source, while the destination IP and flow ID defina a unique ERSPAN destination. ERSPAN Sources and and Destinations are associated by name using the SpanSpanlbl resource in AIM. Sources and destinations must also be applied to interface resources in AIM, providing topology to the source EPs. This means that overlapping destination IPs aren't supported. This could be extended to consider things like the VRF that the network/EPG is mapped to, but would require a data migration of existing DB state. This extension is only supported on ports that belong to networks of type 'opflex'. This means that hierarchical port binding (HPB) and 'vlan' type networks are not supported, nor are SVI networks. The ports must have a vnic_type of "normal" and have a device owner prefix of "compute:". The extensions can be added to the port at any point in its life cycle, but the configuration is only pushed to AIM when the port is bound. Unbinding the port removes the configuration from AIM, but not the extension information in the port resource. That state must be removed explicitly by the user. This workflow currently doesn't support live-migration. Live migration may still work, as port rebinding updates the appropriate state in AIM, but this is done by first deleting the information from the source interface policy group in APIC, then adding it to the destination/target iterface policy group, which will lead to some loss of traffic. This can be addressed in a future patch if needed. Administrative privileges are required to use this extension. This restriction can be relaxed in a subsequent patch, if needed. ERSPAN traffic is sent from the local vSwitch to the host, and the host's IP stack forwards the encapsulated Change-Id: I3a35b060f914daebd7b34fa1fca2e289bd5f6967
This commit is contained in:
parent
356d6845c4
commit
7adb9734b9
@ -60,6 +60,7 @@
|
||||
|
||||
"network_device": "field:port:device_owner=~^network:",
|
||||
"create_port": "",
|
||||
"create_port:apic:erspan_config": "rule:admin_only",
|
||||
"create_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
@ -75,6 +76,7 @@
|
||||
"get_port:binding:host_id": "rule:admin_only",
|
||||
"get_port:binding:profile": "rule:admin_only",
|
||||
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
|
||||
"update_port:apic:erspan_config": "rule:admin_only",
|
||||
"update_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:mac_address": "rule:admin_only or rule:context_is_advsvc",
|
||||
"update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
|
@ -0,0 +1,45 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""port_erspan_extension
|
||||
|
||||
Revision ID: 016a678fafd4
|
||||
Revises: bda3c34581e0
|
||||
Create Date: 2020-11-03 00:00:00.000000
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '016a678fafd4'
|
||||
down_revision = 'bda3c34581e0'
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.create_table(
|
||||
'apic_aim_port_erspan_configurations',
|
||||
sa.Column('port_id', sa.String(36), nullable=False),
|
||||
sa.Column('dest_ip', sa.String(64), nullable=False),
|
||||
sa.Column('flow_id', sa.Integer, nullable=False),
|
||||
sa.Column('direction', sa.Enum('in', 'out', 'both'), nullable=False),
|
||||
sa.ForeignKeyConstraint(
|
||||
['port_id'], ['ports.id'],
|
||||
name='apic_aim_port_erspan_extensions_fk_port',
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('port_id', 'dest_ip', 'flow_id', 'direction'))
|
||||
|
||||
|
||||
def downgrade():
|
||||
pass
|
@ -1 +1 @@
|
||||
bda3c34581e0
|
||||
016a678fafd4
|
||||
|
@ -19,6 +19,7 @@ import functools
|
||||
from neutron_lib.api import converters as conv
|
||||
from neutron_lib.api.definitions import address_scope as as_def
|
||||
from neutron_lib.api.definitions import network as net_def
|
||||
from neutron_lib.api.definitions import port as port_def
|
||||
from neutron_lib.api.definitions import subnet as subnet_def
|
||||
from neutron_lib.api import extensions
|
||||
from neutron_lib.api import validators as valid
|
||||
@ -46,6 +47,7 @@ NESTED_DOMAIN_NODE_NETWORK_VLAN = 'apic:nested_domain_node_network_vlan'
|
||||
EXTRA_PROVIDED_CONTRACTS = 'apic:extra_provided_contracts'
|
||||
EXTRA_CONSUMED_CONTRACTS = 'apic:extra_consumed_contracts'
|
||||
EPG_CONTRACT_MASTERS = 'apic:epg_contract_masters'
|
||||
ERSPAN_CONFIG = 'apic:erspan_config'
|
||||
|
||||
BD = 'BridgeDomain'
|
||||
EPG = 'EndpointGroup'
|
||||
@ -66,6 +68,10 @@ APIC_MIN_VLAN = 1
|
||||
VLAN_RANGE_START = 'start'
|
||||
VLAN_RANGE_END = 'end'
|
||||
|
||||
ERSPAN_DEST_IP = 'dest_ip'
|
||||
ERSPAN_FLOW_ID = 'flow_id'
|
||||
ERSPAN_DIRECTION = 'direction'
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -102,6 +108,51 @@ def _validate_apic_vlan_range(data, key_specs=None):
|
||||
return msg
|
||||
|
||||
|
||||
def _validate_erspan_flow_id(data, key_specs=None):
|
||||
if data is None:
|
||||
return
|
||||
msg = valid.validate_non_negative(data)
|
||||
if int(data) > 1023:
|
||||
msg = _("ERSPAN flow ID must be less than 1023 (was %s)") % data
|
||||
elif int(data) == 0:
|
||||
msg = _("ERSPAN flow ID must be greater than 0 (was %s)") % data
|
||||
return msg
|
||||
|
||||
|
||||
def _validate_erspan_configs(data, valid_values=None):
|
||||
"""Validate a list of unique ERSPAN configurations.
|
||||
|
||||
:param data: The data to validate. To be valid it must be a list like
|
||||
structure of ERSPAN config dicts, each containing 'dest_ip' and
|
||||
'flow_id' key values.
|
||||
:param valid_values: Not used!
|
||||
:returns: None if data is a valid list of unique ERSPAN config dicts,
|
||||
otherwise a human readable message indicating why validation failed.
|
||||
"""
|
||||
if not isinstance(data, list):
|
||||
msg = _("Invalid data format for ERSPAN config: '%s'") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
expected_keys = (ERSPAN_DEST_IP, ERSPAN_FLOW_ID,)
|
||||
erspan_configs = []
|
||||
for erspan_config in data:
|
||||
msg = valid._verify_dict_keys(expected_keys, erspan_config, False)
|
||||
if msg:
|
||||
return msg
|
||||
msg = _validate_erspan_flow_id(erspan_config[ERSPAN_FLOW_ID])
|
||||
if msg:
|
||||
return msg
|
||||
msg = valid.validate_ip_address(erspan_config[ERSPAN_DEST_IP])
|
||||
if msg:
|
||||
return msg
|
||||
if erspan_config in erspan_configs:
|
||||
msg = _("Duplicate ERSPAN config '%s'") % erspan_config
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
erspan_configs.append(erspan_config)
|
||||
|
||||
|
||||
def _validate_dict_or_string(data, key_specs=None):
|
||||
if data is None:
|
||||
return
|
||||
@ -110,7 +161,7 @@ def _validate_dict_or_string(data, key_specs=None):
|
||||
try:
|
||||
data = ast.literal_eval(data)
|
||||
except Exception:
|
||||
msg = _("Allowed VLANs %s cannot be converted to dict") % data
|
||||
msg = _("Extension %s cannot be converted to dict") % data
|
||||
return msg
|
||||
|
||||
return valid.validate_dict_or_none(data, key_specs)
|
||||
@ -154,6 +205,8 @@ valid.validators['type:apic_vlan_list'] = functools.partial(
|
||||
valid.validators['type:apic_vlan_range_list'] = functools.partial(
|
||||
valid._validate_list_of_items, _validate_apic_vlan_range)
|
||||
valid.validators['type:dict_or_string'] = _validate_dict_or_string
|
||||
valid.validators['type:apic_erspan_flow_id'] = _validate_erspan_flow_id
|
||||
valid.validators['type:apic_erspan_configs'] = _validate_erspan_configs
|
||||
|
||||
|
||||
APIC_ATTRIBUTES = {
|
||||
@ -161,6 +214,15 @@ APIC_ATTRIBUTES = {
|
||||
SYNC_STATE: {'allow_post': False, 'allow_put': False, 'is_visible': True}
|
||||
}
|
||||
|
||||
ERSPAN_KEY_SPECS = [
|
||||
{ERSPAN_DEST_IP: {'type:ip_address': None,
|
||||
'required': True},
|
||||
ERSPAN_FLOW_ID: {'type:apic_erspan_flow_id': None,
|
||||
'required': True},
|
||||
ERSPAN_DIRECTION: {'type:values': ['in', 'out', 'both'],
|
||||
'default': 'both'}},
|
||||
]
|
||||
|
||||
EPG_CONTRACT_MASTER_KEY_SPECS = [
|
||||
# key spec for opt_name in _VALID_BLANK_EXTRA_DHCP_OPTS
|
||||
{'app_profile_name': {'type:not_empty_string': None,
|
||||
@ -169,6 +231,15 @@ EPG_CONTRACT_MASTER_KEY_SPECS = [
|
||||
'required': True}},
|
||||
]
|
||||
|
||||
PORT_ATTRIBUTES = {
|
||||
ERSPAN_CONFIG: {
|
||||
'allow_post': True, 'allow_put': True,
|
||||
'is_visible': True, 'default': None,
|
||||
'convert_to': conv.convert_none_to_empty_list,
|
||||
'validate': {'type:apic_erspan_configs': None},
|
||||
},
|
||||
}
|
||||
|
||||
NET_ATTRIBUTES = {
|
||||
SVI: {
|
||||
'allow_post': True, 'allow_put': False,
|
||||
@ -318,6 +389,8 @@ ADDRESS_SCOPE_ATTRIBUTES = {
|
||||
|
||||
|
||||
EXTENDED_ATTRIBUTES_2_0 = {
|
||||
port_def.COLLECTION_NAME: dict(
|
||||
list(APIC_ATTRIBUTES.items()) + list(PORT_ATTRIBUTES.items())),
|
||||
net_def.COLLECTION_NAME: dict(
|
||||
list(APIC_ATTRIBUTES.items()) + list(EXT_NET_ATTRIBUTES.items()) +
|
||||
list(NET_ATTRIBUTES.items())),
|
||||
|
@ -128,3 +128,19 @@ class InvalidPreexistingBdForNetwork(exceptions.BadRequest):
|
||||
message = _("The Bridge Domain specified in apic:distinguished_names "
|
||||
"either does not exist in ACI or belongs to another network "
|
||||
"in this OpenStack instance.")
|
||||
|
||||
|
||||
class InvalidPortForErspanSession(exceptions.BadRequest):
|
||||
message = _("AIM ERSPAN extensions are only supported on ports with a "
|
||||
"device owner of 'compute:'")
|
||||
|
||||
|
||||
class InvalidFabricPathForErspanSession(exceptions.BadRequest):
|
||||
message = _("AIM ERSPAN extensions are only supported for ports on hosts "
|
||||
"that are connected to the fabric via virtual port channels "
|
||||
"(VPCs) or port channels (PCs).")
|
||||
|
||||
|
||||
class InvalidNetworkForErspanSession(exceptions.BadRequest):
|
||||
message = _("AIM ERSPAN extensions are not supported on ports on SVI "
|
||||
"type networks.")
|
||||
|
@ -32,6 +32,24 @@ BAKERY = baked.bakery(_size_alert=lambda c: LOG.warning(
|
||||
"sqlalchemy baked query cache size exceeded in %s", __name__))
|
||||
|
||||
|
||||
class PortExtensionErspanDb(model_base.BASEV2):
|
||||
|
||||
__tablename__ = 'apic_aim_port_erspan_configurations'
|
||||
|
||||
port_id = sa.Column(
|
||||
sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
dest_ip = sa.Column(sa.String(64), primary_key=True)
|
||||
flow_id = sa.Column(sa.Integer, primary_key=True)
|
||||
direction = sa.Column(sa.Enum('in', 'out', 'both'),
|
||||
default='both', primary_key=True)
|
||||
port = orm.relationship(models_v2.Port,
|
||||
backref=orm.backref(
|
||||
'aim_extension_erspan_configs',
|
||||
uselist=True,
|
||||
lazy='joined', cascade='delete'))
|
||||
|
||||
|
||||
class NetworkExtensionDb(model_base.BASEV2):
|
||||
|
||||
__tablename__ = 'apic_aim_network_extensions'
|
||||
@ -151,6 +169,59 @@ class ExtensionDbMixin(object):
|
||||
if db_attr is not None:
|
||||
res_dict[res_attr] = db_attr
|
||||
|
||||
def get_port_extn_db(self, session, port_id):
|
||||
return self.get_port_extn_db_bulk(session, [port_id]).get(
|
||||
port_id, {})
|
||||
|
||||
def get_port_extn_db_bulk(self, session, port_ids):
|
||||
if not port_ids:
|
||||
return {}
|
||||
|
||||
query = BAKERY(lambda s: s.query(
|
||||
PortExtensionErspanDb))
|
||||
query += lambda q: q.filter(
|
||||
PortExtensionErspanDb.port_id.in_(
|
||||
sa.bindparam('port_ids', expanding=True)))
|
||||
db_erspans = query(session).params(
|
||||
port_ids=port_ids).all()
|
||||
|
||||
erspans_by_port_id = {}
|
||||
for db_erspan in db_erspans:
|
||||
erspans_by_port_id.setdefault(db_erspan.port_id, []).append(
|
||||
db_erspan)
|
||||
|
||||
result = {}
|
||||
for db_obj in db_erspans:
|
||||
port_id = db_obj.port_id
|
||||
result.setdefault(port_id, self.make_port_extn_db_conf_dict(
|
||||
erspans_by_port_id.get(port_id, [])))
|
||||
return result
|
||||
|
||||
def make_port_extn_db_conf_dict(self, db_erspans):
|
||||
port_res = {}
|
||||
db_obj = db_erspans
|
||||
if db_obj:
|
||||
def _db_to_dict(db_obj):
|
||||
ed = {cisco_apic.ERSPAN_DEST_IP: db_obj.dest_ip,
|
||||
cisco_apic.ERSPAN_FLOW_ID: db_obj.flow_id,
|
||||
cisco_apic.ERSPAN_DIRECTION: db_obj.direction}
|
||||
return ed
|
||||
port_res[cisco_apic.ERSPAN_CONFIG] = [_db_to_dict(e)
|
||||
for e in db_erspans]
|
||||
return port_res
|
||||
|
||||
def set_port_extn_db(self, session, port_id, res_dict):
|
||||
with session.begin(subtransactions=True):
|
||||
if cisco_apic.ERSPAN_CONFIG in res_dict:
|
||||
self._update_dict_attr(
|
||||
session, PortExtensionErspanDb,
|
||||
(cisco_apic.ERSPAN_DEST_IP,
|
||||
cisco_apic.ERSPAN_FLOW_ID,
|
||||
cisco_apic.ERSPAN_DIRECTION
|
||||
),
|
||||
res_dict[cisco_apic.ERSPAN_CONFIG],
|
||||
port_id=port_id)
|
||||
|
||||
def get_network_extn_db(self, session, network_id):
|
||||
return self.get_network_extn_db_bulk(session, [network_id]).get(
|
||||
network_id, {})
|
||||
@ -493,12 +564,16 @@ class ExtensionDbMixin(object):
|
||||
|
||||
# remove duplicates, may change order
|
||||
new_values = [dict(t) for t in {tuple(d.items()) for d in new_values}]
|
||||
for r in rows:
|
||||
curr_obj = {key: r[key] for key in keys}
|
||||
if curr_obj in new_values:
|
||||
new_values.discard(curr_obj)
|
||||
else:
|
||||
session.delete(r)
|
||||
# Updates are deletions with additions, so to ensure that
|
||||
# the delete happens before a subsequent addtion, we create
|
||||
# a subtransaction
|
||||
with session.begin(subtransactions=True):
|
||||
for r in rows:
|
||||
curr_obj = {key: r[key] for key in keys}
|
||||
if curr_obj in new_values:
|
||||
new_values.remove(curr_obj)
|
||||
else:
|
||||
session.delete(r)
|
||||
for v in new_values:
|
||||
v.update(filters)
|
||||
db_obj = db_model(**v)
|
||||
|
@ -59,6 +59,43 @@ class ApicExtensionDriver(api_plus.ExtensionDriver,
|
||||
def extension_alias(self):
|
||||
return "cisco-apic"
|
||||
|
||||
def extend_port_dict(self, session, base_model, result):
|
||||
try:
|
||||
self._md.extend_port_dict(session, base_model, result)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
if db_api.is_retriable(e):
|
||||
LOG.debug("APIC AIM extend_port_dict got retriable "
|
||||
"exception: %s", type(e))
|
||||
else:
|
||||
LOG.exception("APIC AIM extend_port_dict failed")
|
||||
|
||||
def extend_port_dict_bulk(self, session, results):
|
||||
try:
|
||||
self._md.extend_port_dict_bulk(session, results)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
if db_api.is_retriable(e):
|
||||
LOG.debug("APIC AIM extend_port_dict_bulk got retriable "
|
||||
"exception: %s", type(e))
|
||||
else:
|
||||
LOG.exception("APIC AIM extend_port_dict_bulk failed")
|
||||
|
||||
def process_create_port(self, plugin_context, data, result):
|
||||
res_dict = {cisco_apic.ERSPAN_CONFIG:
|
||||
data.get(cisco_apic.ERSPAN_CONFIG, [])}
|
||||
self.set_port_extn_db(plugin_context.session, result['id'],
|
||||
res_dict)
|
||||
result.update(res_dict)
|
||||
|
||||
def process_update_port(self, plugin_context, data, result):
|
||||
if cisco_apic.ERSPAN_CONFIG not in data:
|
||||
return
|
||||
res_dict = {cisco_apic.ERSPAN_CONFIG: data[cisco_apic.ERSPAN_CONFIG]}
|
||||
self.set_port_extn_db(plugin_context.session, result['id'],
|
||||
res_dict)
|
||||
result.update(res_dict)
|
||||
|
||||
def extend_network_dict(self, session, base_model, result):
|
||||
try:
|
||||
self._md.extend_network_dict(session, base_model, result)
|
||||
|
@ -452,6 +452,16 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
self._ensure_any_filter(aim_ctx)
|
||||
self._setup_default_arp_dhcp_security_group_rules(aim_ctx)
|
||||
|
||||
# This is required for infra resources needed by ERSPAN
|
||||
check_topology = self.aim.find(aim_ctx, aim_resource.Topology)
|
||||
if not check_topology:
|
||||
topology_aim = aim_resource.Topology()
|
||||
self.aim.create(aim_ctx, topology_aim)
|
||||
check_infra = self.aim.find(aim_ctx, aim_resource.Infra)
|
||||
if not check_infra:
|
||||
infra_aim = aim_resource.Infra()
|
||||
self.aim.create(aim_ctx, infra_aim)
|
||||
|
||||
def _setup_default_arp_dhcp_security_group_rules(self, aim_ctx):
|
||||
sg_name = self._default_sg_name
|
||||
dname = aim_utils.sanitize_display_name('DefaultSecurityGroup')
|
||||
@ -1203,6 +1213,89 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
res_dict[cisco_apic.SYNC_STATE] = (
|
||||
aim_status_track[SYNC_STATE_TMP])
|
||||
|
||||
def extend_port_dict_bulk(self, session, results, single=False):
|
||||
"""Extend port resource with apic_aim extensions
|
||||
|
||||
Add any extensions defined by the apic_aim mechanism
|
||||
driver. This method may get called before the mechanism
|
||||
driver precommit calls.
|
||||
"""
|
||||
LOG.debug("APIC AIM MD extending dict bulk for port: %s",
|
||||
results)
|
||||
|
||||
# Gather db objects
|
||||
aim_ctx = aim_context.AimContext(session)
|
||||
aim_resources_aggregate = []
|
||||
res_dict_by_aim_res_dn = {}
|
||||
# template to track the status related info
|
||||
# for each resource.
|
||||
aim_status_track_template = {
|
||||
SYNC_STATE_TMP: cisco_apic.SYNC_NOT_APPLICABLE,
|
||||
AIM_RESOURCES_CNT: 0}
|
||||
|
||||
for res_dict, port_db in results:
|
||||
aim_resources = []
|
||||
# Use a tmp field to aggregate the status across mapped
|
||||
# AIM objects, we set the actual sync_state only if we
|
||||
# are able to process all the status objects for these
|
||||
# corresponding AIM resources. If any status object is not
|
||||
# available then sync_state will be 'build'. On create,
|
||||
# subnets start in 'N/A'. The tracking object is added
|
||||
# along with the res_dict on the DN based res_dict_by_aim_res_dn
|
||||
# dict which maintains the mapping from status objs to res_dict.
|
||||
aim_status_track = copy.deepcopy(aim_status_track_template)
|
||||
|
||||
res_dict[cisco_apic.SYNC_STATE] = cisco_apic.SYNC_NOT_APPLICABLE
|
||||
res_dict_and_aim_status_track = (res_dict, aim_status_track)
|
||||
erspan_ext = port_db.aim_extension_erspan_configs
|
||||
if not erspan_ext and single:
|
||||
# Needed because of commit
|
||||
# d8c1e153f88952b7670399715c2f88f1ecf0a94a in Neutron that
|
||||
# put the extension call in Pike+ *before* the precommit
|
||||
# calls happen in port creation. I believe this is a bug
|
||||
# and should be discussed with the Neutron team.
|
||||
ext_dict = self.get_port_extn_db(session, port_db.id)
|
||||
else:
|
||||
ext_dict = self.make_port_extn_db_conf_dict(erspan_ext)
|
||||
if ext_dict:
|
||||
res_dict.update(ext_dict)
|
||||
|
||||
# ERSPAN resources will only be valid if it's a bound port for
|
||||
# a compute instance on an opflex type network.
|
||||
cep_dn = self._map_port(session, port_db)
|
||||
resources = self._get_erspan_aim_resources_list(port_db, cep_dn)
|
||||
if resources:
|
||||
aim_resources.extend(resources)
|
||||
binding = (port_db.port_bindings[0]
|
||||
if port_db.port_bindings else None)
|
||||
acc_name = self._get_acc_bundle_for_host(aim_ctx, binding.host)
|
||||
if resources and acc_name:
|
||||
acc_bundle = aim_resource.InfraAccBundleGroup(name=acc_name)
|
||||
aim_resources.append(acc_bundle)
|
||||
resources.append(acc_bundle)
|
||||
for resource in resources:
|
||||
res_dict_by_aim_res_dn[resource.dn] = (
|
||||
res_dict_and_aim_status_track)
|
||||
|
||||
# Track the number of AIM resources in aim_status_track,
|
||||
# decrement count each time we process a status obj related to
|
||||
# the resource. If the count hits zero then we have processed
|
||||
# the status objs for all of the associated AIM resources. Until
|
||||
# this happens, the sync_state is held as 'build' (unless it has
|
||||
# to be set to 'error').
|
||||
aim_status_track[AIM_RESOURCES_CNT] = len(aim_resources)
|
||||
aim_resources_aggregate.extend(aim_resources)
|
||||
|
||||
self._merge_aim_status_bulk(aim_ctx, aim_resources_aggregate,
|
||||
res_dict_by_aim_res_dn)
|
||||
|
||||
def extend_port_dict(self, session, port_db, result):
|
||||
if result.get(api_plus.BULK_EXTENDED):
|
||||
return
|
||||
LOG.debug("APIC AIM MD extending dict for port: %s", result)
|
||||
self.extend_port_dict_bulk(session, [(result, port_db)],
|
||||
single=True)
|
||||
|
||||
def extend_network_dict_bulk(self, session, results, single=False):
|
||||
# Gather db objects
|
||||
aim_ctx = aim_context.AimContext(session)
|
||||
@ -2641,9 +2734,181 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
raise exceptions.AAPNotAllowedOnDifferentActiveActiveAAPSubnet(
|
||||
subnet_ids=subnet_ids, other_subnet_ids=other_subnet_ids)
|
||||
|
||||
def _get_erspan_aim_resources(self, port_id, cep_dn, erspan_config):
|
||||
dest_ip = erspan_config[cisco_apic.ERSPAN_DEST_IP]
|
||||
flow_id = erspan_config[cisco_apic.ERSPAN_FLOW_ID]
|
||||
direction = erspan_config[cisco_apic.ERSPAN_DIRECTION]
|
||||
source_group_name = self._erspan_source_group_name(port_id,
|
||||
erspan_config)
|
||||
dest_group_name = self._erspan_dest_group_name(erspan_config)
|
||||
|
||||
source_group = aim_resource.SpanVsourceGroup(
|
||||
name=source_group_name)
|
||||
dest_group = aim_resource.SpanVdestGroup(name=dest_group_name)
|
||||
source = aim_resource.SpanVsource(vsg_name=source_group_name,
|
||||
name=source_group_name, dir=direction, src_paths=[cep_dn])
|
||||
dest = aim_resource.SpanVdest(vdg_name=dest_group_name,
|
||||
name=dest_group_name)
|
||||
summary = aim_resource.SpanVepgSummary(vdg_name=dest_group_name,
|
||||
vd_name=dest_group_name, dst_ip=dest_ip, flow_id=flow_id)
|
||||
label = aim_resource.SpanSpanlbl(vsg_name=source_group_name,
|
||||
name=dest_group_name, tag='yellow-green')
|
||||
return (source_group, dest_group, source, dest, summary, label)
|
||||
|
||||
def _get_erspan_aim_resources_list(self, port, cep_dn):
|
||||
erspan_configs = port.aim_extension_erspan_configs or []
|
||||
resources = []
|
||||
for erspan_config in erspan_configs:
|
||||
resources.extend(self._get_erspan_aim_resources(
|
||||
port['id'], cep_dn, erspan_config))
|
||||
return resources
|
||||
|
||||
def _get_acc_bundle_for_host(self, aim_ctx, host_name):
|
||||
if not host_name:
|
||||
return None
|
||||
host_links = self.aim.find(aim_ctx, aim_infra.HostLink,
|
||||
host_name=host_name)
|
||||
# Extract the interface policy group names from the DNs.
|
||||
grpNames = []
|
||||
for host_link in host_links:
|
||||
# topology is a tuple consisting of:
|
||||
# (is_vpc, pod_id, nodes, node_paths, module/bundle, None/port)
|
||||
topology = self._get_topology_from_path(host_link.path)
|
||||
# This currently only supports opflex devices connected via
|
||||
# interface policy groups that are either port channel or
|
||||
# virtual port channel interfaces.
|
||||
if not topology[0]:
|
||||
continue
|
||||
grpNames.append(topology[4])
|
||||
# VPCs will have two entries, so remove duplicates.
|
||||
return list(set(grpNames))[0] if grpNames else None
|
||||
|
||||
def _create_erspan_aim_config(self, context, cep_dn, port):
|
||||
session = context._plugin_context.session
|
||||
aim_ctx = aim_context.AimContext(db_session=session)
|
||||
|
||||
agent = context.host_agents(ofcst.AGENT_TYPE_OPFLEX_OVS)
|
||||
if not agent:
|
||||
LOG.warning("Port %s is not bound to an opflex host, "
|
||||
"so an RSPAN session can't be established.",
|
||||
port['id'])
|
||||
return
|
||||
|
||||
acc_name = self._get_acc_bundle_for_host(aim_ctx,
|
||||
port['binding:host_id'])
|
||||
if acc_name:
|
||||
acc_bundle = aim_resource.InfraAccBundleGroup(name=acc_name)
|
||||
else:
|
||||
LOG.warning("A interface port group for port %s "
|
||||
"could not be found - ERSPAN session "
|
||||
"can't be established.", port['id'])
|
||||
return
|
||||
|
||||
for erspan_config in port.get(cisco_apic.ERSPAN_CONFIG, []):
|
||||
resources = self._get_erspan_aim_resources(port['id'], cep_dn,
|
||||
erspan_config)
|
||||
# Create ERSPAN source group and source
|
||||
if not self.aim.get(aim_ctx, resources[0]):
|
||||
self.aim.create(aim_ctx, resources[0])
|
||||
# Create ERSPAN source and destination.
|
||||
self.aim.create(aim_ctx, resources[2])
|
||||
|
||||
# Create the dest group, dest, and summary
|
||||
# resources if needed.
|
||||
if not self.aim.get(aim_ctx, resources[1]):
|
||||
self.aim.create(aim_ctx, resources[1])
|
||||
self.aim.create(aim_ctx, resources[3])
|
||||
self.aim.create(aim_ctx, resources[4])
|
||||
|
||||
# Update the bundle group.
|
||||
curr_bundle = self.aim.get(aim_ctx, acc_bundle)
|
||||
source_groups = curr_bundle.span_vsource_group_names
|
||||
dest_groups = curr_bundle.span_vdest_group_names
|
||||
if resources[0].name not in source_groups:
|
||||
source_groups.append(resources[0].name)
|
||||
if resources[1].name not in dest_groups:
|
||||
dest_groups.append(resources[1].name)
|
||||
self.aim.update(aim_ctx, curr_bundle,
|
||||
span_vsource_group_names=source_groups,
|
||||
span_vdest_group_names=dest_groups)
|
||||
# Create the ERSPAN label.
|
||||
if not self.aim.get(aim_ctx, resources[5]):
|
||||
self.aim.create(aim_ctx, resources[5])
|
||||
|
||||
def _erspan_source_group_name(self, port_id, erspan_config):
|
||||
return port_id + '-' + erspan_config['direction']
|
||||
|
||||
def _erspan_dest_group_name(self, erspan_config):
|
||||
return erspan_config['dest_ip'] + '-' + str(erspan_config['flow_id'])
|
||||
|
||||
def _delete_erspan_aim_config(self, context, port, erspan_configs=None):
|
||||
session = context._plugin_context.session
|
||||
aim_ctx = aim_context.AimContext(db_session=session)
|
||||
acc_name = self._get_acc_bundle_for_host(aim_ctx,
|
||||
port.get('binding:host_id'))
|
||||
erspan_configs = (erspan_configs if erspan_configs else
|
||||
port.get(cisco_apic.ERSPAN_CONFIG, []))
|
||||
for erspan_config in erspan_configs:
|
||||
source_group = self._erspan_source_group_name(port['id'],
|
||||
erspan_config)
|
||||
dest_group = self._erspan_dest_group_name(erspan_config)
|
||||
# The destination state can be shared, so check to see if
|
||||
# any other label sources reference it.
|
||||
labels = self.aim.find(aim_ctx, aim_resource.SpanSpanlbl,
|
||||
name=dest_group)
|
||||
other_sources = [label.name for label in labels
|
||||
if label.vsg_name != source_group]
|
||||
if acc_name:
|
||||
# Remove the source from the bundle group, but only
|
||||
# remove the destination if no other sources are using it.
|
||||
curr_bg = self.aim.get(aim_ctx,
|
||||
aim_resource.InfraAccBundleGroup(name=acc_name))
|
||||
vsource_group_names = list(
|
||||
set(curr_bg.span_vsource_group_names) -
|
||||
set([source_group]))
|
||||
vdest_group_names = list(set(curr_bg.span_vdest_group_names) -
|
||||
set([dest_group]) if not other_sources
|
||||
else curr_bg.span_vdest_group_names)
|
||||
self.aim.update(aim_ctx, aim_resource.InfraAccBundleGroup(
|
||||
name=acc_name),
|
||||
span_vsource_group_names=vsource_group_names,
|
||||
span_vdest_group_names=vdest_group_names)
|
||||
|
||||
# We can delete the source state and label, as those
|
||||
# are only dependent on this neutron port.
|
||||
self.aim.delete(aim_ctx, aim_resource.SpanSpanlbl(
|
||||
vsg_name=source_group, name=dest_group))
|
||||
|
||||
self.aim.delete(aim_ctx, aim_resource.SpanVsource(
|
||||
vsg_name=source_group, name=source_group))
|
||||
self.aim.delete(aim_ctx, aim_resource.SpanVsourceGroup(
|
||||
name=source_group))
|
||||
|
||||
# No other sources share this dest, so we can delete it.
|
||||
if not other_sources:
|
||||
self.aim.delete(aim_ctx, aim_resource.SpanVepgSummary(
|
||||
vdg_name=dest_group, vd_name=dest_group))
|
||||
self.aim.delete(aim_ctx, aim_resource.SpanVdest(
|
||||
vdg_name=dest_group, name=dest_group))
|
||||
self.aim.delete(aim_ctx, aim_resource.SpanVdestGroup(
|
||||
name=dest_group))
|
||||
|
||||
def _check_valid_erspan_config(self, port):
|
||||
# Currently only supported on instance ports
|
||||
if not port['device_owner'].startswith('compute:'):
|
||||
raise exceptions.InvalidPortForErspanSession()
|
||||
|
||||
# Not supported on SVI networks
|
||||
ctx = nctx.get_admin_context()
|
||||
net_db = self.plugin._get_network(ctx, port['network_id'])
|
||||
if self._is_svi_db(net_db):
|
||||
raise exceptions.InvalidNetworkForErspanSession()
|
||||
|
||||
def create_port_precommit(self, context):
|
||||
port = context.current
|
||||
self._check_active_active_aap(context, port)
|
||||
if port.get(cisco_apic.ERSPAN_CONFIG):
|
||||
self._check_valid_erspan_config(port)
|
||||
self._really_update_sg_rule_with_remote_group_set(
|
||||
context, port, port['security_groups'], is_delete=False)
|
||||
self._insert_provisioning_block(context)
|
||||
@ -2824,8 +3089,12 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
context, static_ports, bind_context.network.current)
|
||||
|
||||
def update_port_precommit(self, context):
|
||||
session = context._plugin_context.session
|
||||
orig = context.original
|
||||
port = context.current
|
||||
self._check_active_active_aap(context, port)
|
||||
if port.get(cisco_apic.ERSPAN_CONFIG):
|
||||
self._check_valid_erspan_config(port)
|
||||
if context.original_host and context.original_host != context.host:
|
||||
self.disassociate_domain(context, use_original=True)
|
||||
if self._use_static_path(context.original_bottom_bound_segment):
|
||||
@ -2833,6 +3102,10 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
self._update_static_path(context, host=context.original_host,
|
||||
segment=context.original_bottom_bound_segment, remove=True)
|
||||
self._release_dynamic_segment(context, use_original=True)
|
||||
# The port is either being unbound or rebound. We need the host
|
||||
# from the original port binding, so pass that as the port
|
||||
self._delete_erspan_aim_config(context, orig,
|
||||
port.get(cisco_apic.ERSPAN_CONFIG))
|
||||
if self._is_port_bound(port):
|
||||
if self._use_static_path(context.bottom_bound_segment):
|
||||
self._associate_domain(context, is_vmm=False)
|
||||
@ -2841,6 +3114,18 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
self._is_opflex_type(
|
||||
context.bottom_bound_segment[api.NETWORK_TYPE])):
|
||||
self._associate_domain(context, is_vmm=True)
|
||||
# Handle changes in ERSPAN configuration.
|
||||
if (port.get(cisco_apic.ERSPAN_CONFIG) or
|
||||
orig.get(cisco_apic.ERSPAN_CONFIG)):
|
||||
erspan_deletions = []
|
||||
for erspan in orig.get(cisco_apic.ERSPAN_CONFIG, []):
|
||||
if erspan not in port[cisco_apic.ERSPAN_CONFIG]:
|
||||
erspan_deletions.append(erspan)
|
||||
cep_dn = self._map_port(session, port)
|
||||
self._delete_erspan_aim_config(context, port,
|
||||
erspan_deletions)
|
||||
self._create_erspan_aim_config(context, cep_dn, port)
|
||||
|
||||
self._update_sg_rule_with_remote_group_set(context, port)
|
||||
self._check_allowed_address_pairs(context, port)
|
||||
self._insert_provisioning_block(context)
|
||||
@ -2900,6 +3185,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
self._is_opflex_type(
|
||||
context.bottom_bound_segment[api.NETWORK_TYPE])):
|
||||
self.disassociate_domain(context)
|
||||
self._delete_erspan_aim_config(context, port)
|
||||
self._really_update_sg_rule_with_remote_group_set(
|
||||
context, port, port['security_groups'], is_delete=True)
|
||||
|
||||
@ -4070,6 +4356,25 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
return query(session).params(
|
||||
scope_id=scope_id).one_or_none()
|
||||
|
||||
def _map_port(self, session, port):
|
||||
tenant_aname = self.name_mapper.project(session,
|
||||
port['project_id'])
|
||||
# REVISIT: GBP workflow isn't supported in this release. If
|
||||
# we do add support for GBP, getting the EPG requires
|
||||
# determining which workflow was used, and get the
|
||||
# EPG accordingly.
|
||||
mapping = self._get_network_mapping(session, port['network_id'])
|
||||
# If it's not a network we support (e.g. SVI), we can't look
|
||||
# up the CEP
|
||||
if not mapping or not mapping.epg_name:
|
||||
return None
|
||||
epg = self._get_network_epg(mapping)
|
||||
# The ERSPAN source ocnfiguration requires the DN
|
||||
# for the EP, so we construct it.
|
||||
return ('uni/tn-' + tenant_aname +
|
||||
'/ap-OpenStack/epg-' + epg.name +
|
||||
'/cep-' + port['mac_address'].upper())
|
||||
|
||||
def _map_network(self, session, network, vrf=None, preexisting_bd_dn=None):
|
||||
tenant_aname = (vrf.tenant_name if vrf and vrf.tenant_name != 'common'
|
||||
else self.name_mapper.project(
|
||||
@ -6062,6 +6367,13 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
mgr.register_aim_resource_class(aim_resource.Tenant)
|
||||
mgr.register_aim_resource_class(aim_resource.VMMDomain)
|
||||
mgr.register_aim_resource_class(aim_resource.VRF)
|
||||
mgr.register_aim_resource_class(aim_resource.SpanVsourceGroup)
|
||||
mgr.register_aim_resource_class(aim_resource.SpanVdestGroup)
|
||||
mgr.register_aim_resource_class(aim_resource.SpanVsource)
|
||||
mgr.register_aim_resource_class(aim_resource.SpanVdest)
|
||||
mgr.register_aim_resource_class(aim_resource.SpanVepgSummary)
|
||||
mgr.register_aim_resource_class(aim_resource.SpanSpanlbl)
|
||||
mgr.register_aim_resource_class(aim_resource.InfraAccBundleGroup)
|
||||
|
||||
# Copy common Tenant from actual to expected AIM store.
|
||||
for tenant in mgr.aim_mgr.find(
|
||||
@ -6784,6 +7096,32 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
||||
query += lambda q: q.distinct()
|
||||
for project_id, in query(mgr.actual_session):
|
||||
self._expect_project(mgr, project_id)
|
||||
query = BAKERY(lambda s: s.query(
|
||||
models_v2.Port))
|
||||
for port_db in query(mgr.actual_session):
|
||||
# We can only validate AIM resources on bound ports.
|
||||
if port_db.aim_extension_erspan_configs and port_db.port_bindings:
|
||||
cep_dn = self._map_port(mgr.actual_session, port_db)
|
||||
resources = self._get_erspan_aim_resources_list(port_db,
|
||||
cep_dn)
|
||||
# Copy the bundle group pre-existing resources, if they
|
||||
# are monitored, from the actual AIM store to the validation
|
||||
# AIM store, so that the resource behaves as expected
|
||||
# during validation. Make sure not to overwrite any
|
||||
# pre-existing resources that have already been copied.
|
||||
acc_name = self._get_acc_bundle_for_host(mgr.actual_aim_ctx,
|
||||
port_db.port_bindings[0].host)
|
||||
if acc_name:
|
||||
acc_bundle = aim_resource.InfraAccBundleGroup(
|
||||
name=acc_name)
|
||||
actual_bg = mgr.aim_mgr.get(mgr.actual_aim_ctx, acc_bundle)
|
||||
if actual_bg and actual_bg.monitored:
|
||||
if not mgr.aim_mgr.get(mgr.expected_aim_ctx,
|
||||
actual_bg):
|
||||
mgr.aim_mgr.create(mgr.expected_aim_ctx,
|
||||
actual_bg)
|
||||
for resource in resources:
|
||||
mgr.expect_aim_resource(resource)
|
||||
|
||||
def _validate_subnetpools(self, mgr):
|
||||
query = BAKERY(lambda s: s.query(
|
||||
|
@ -4115,6 +4115,93 @@ class TestSyncState(ApicAimTestCase):
|
||||
def test_unmanaged_external_subnet(self):
|
||||
self._test_external_subnet('N/A')
|
||||
|
||||
def _test_erspan_sync(self, expected_state, with_erspan=True):
|
||||
|
||||
aim_ctx = aim_context.AimContext(
|
||||
db_session=db_api.get_writer_session())
|
||||
self._register_agent('host1', AGENT_CONF_OPFLEX)
|
||||
self._register_agent('host2', AGENT_CONF_OPFLEX)
|
||||
# Host 1: VPC host
|
||||
host1_pg = 'pg-ostack-pt-1-17'
|
||||
host1_dn = 'topology/pod-1/protpaths-101-102/pathep-[%s]' % host1_pg
|
||||
self.hlink1 = aim_infra.HostLink(
|
||||
host_name='host1', interface_name='eth0', path=host1_dn)
|
||||
self.aim_mgr.create(aim_ctx, self.hlink1)
|
||||
# Add topology for this
|
||||
acc_bundle = aim_resource.InfraAccBundleGroup(name=host1_pg)
|
||||
self.aim_mgr.create(aim_ctx, acc_bundle)
|
||||
# Host 2: non-VPC host
|
||||
host2_pg = 'eth1/17'
|
||||
host2_dn = 'topology/pod-1/paths-101/pathep-[%s]' % host2_pg
|
||||
self.hlink1 = aim_infra.HostLink(
|
||||
host_name='host2', interface_name='eth0', path=host2_dn)
|
||||
self.aim_mgr.create(aim_ctx, self.hlink1)
|
||||
|
||||
# Create the test network, subnet, and port.
|
||||
net = self._make_network(self.fmt, 'net1', True)
|
||||
self._make_subnet(
|
||||
self.fmt, net, '10.0.0.1', '10.0.0.0/24')['subnet']
|
||||
arg_list = None
|
||||
erspan_config = {}
|
||||
if with_erspan:
|
||||
erspan_config = {'apic:erspan_config':
|
||||
[{'dest_ip': '192.168.0.10',
|
||||
'direction': 'in',
|
||||
'flow_id': 1023}]}
|
||||
arg_list = ('apic:erspan_config',)
|
||||
p1 = self._make_port(self.fmt, net['network']['id'],
|
||||
device_owner='compute:',
|
||||
arg_list=arg_list,
|
||||
**erspan_config)['port']
|
||||
|
||||
# Bind the port to host1, and verify the AIM configuration
|
||||
self._bind_port_to_host(p1['id'], 'host1')
|
||||
port = self._show('ports', p1['id'])['port']
|
||||
self.assertEqual(expected_state, port['apic:synchronization_state'])
|
||||
|
||||
def test_erspan_no_status(self):
|
||||
def get_status(self, context, resource, create_if_absent=True):
|
||||
return None
|
||||
|
||||
with mock.patch('aim.aim_manager.AimManager.get_status', get_status):
|
||||
with mock.patch('aim.aim_manager.AimManager.get_statuses',
|
||||
TestSyncState._mocked_get_statuses):
|
||||
self._test_erspan_sync('N/A')
|
||||
|
||||
def test_erspan_sync(self):
|
||||
with mock.patch('aim.aim_manager.AimManager.get_status',
|
||||
TestSyncState._get_synced_status):
|
||||
with mock.patch('aim.aim_manager.AimManager.get_statuses',
|
||||
TestSyncState._mocked_get_statuses):
|
||||
self._test_erspan_sync('synced')
|
||||
|
||||
def test_erspan_build(self):
|
||||
def get_status(self, context, resource, create_if_absent=True):
|
||||
return TestSyncState._get_pending_status_for_type(
|
||||
context, resource, aim_resource.InfraAccBundleGroup)
|
||||
|
||||
with mock.patch('aim.aim_manager.AimManager.get_status', get_status):
|
||||
with mock.patch('aim.aim_manager.AimManager.get_statuses',
|
||||
TestSyncState._mocked_get_statuses):
|
||||
self._test_erspan_sync('build')
|
||||
|
||||
def test_erspan_error(self):
|
||||
def get_status(self, context, resource, create_if_absent=True):
|
||||
return TestSyncState._get_failed_status_for_type(
|
||||
context, resource, aim_resource.InfraAccBundleGroup)
|
||||
|
||||
with mock.patch('aim.aim_manager.AimManager.get_status', get_status):
|
||||
with mock.patch('aim.aim_manager.AimManager.get_statuses',
|
||||
TestSyncState._mocked_get_statuses):
|
||||
self._test_erspan_sync('error')
|
||||
|
||||
def test_no_erspan(self):
|
||||
with mock.patch('aim.aim_manager.AimManager.get_status',
|
||||
TestSyncState._get_synced_status):
|
||||
with mock.patch('aim.aim_manager.AimManager.get_statuses',
|
||||
TestSyncState._mocked_get_statuses):
|
||||
self._test_erspan_sync('N/A', with_erspan=False)
|
||||
|
||||
|
||||
class TestTopology(ApicAimTestCase):
|
||||
def test_network_subnets_on_same_router(self):
|
||||
@ -6881,6 +6968,332 @@ class TestExtensionAttributes(ApicAimTestCase):
|
||||
self.assertIn('is already in use by address-scope',
|
||||
resp['NeutronError']['message'])
|
||||
|
||||
def test_erspan_extension(self):
|
||||
net = self._make_network(self.fmt, 'net1', True)
|
||||
self._make_subnet(
|
||||
self.fmt, net, '10.0.0.1', '10.0.0.0/24')['subnet']
|
||||
p1 = self._make_port(self.fmt, net['network']['id'],
|
||||
device_owner='compute:')['port']
|
||||
|
||||
# Update the port with just the destination IP, which should fail
|
||||
data = {'port': {'apic:erspan_config': [{'dest_ip': '192.168.0.10'}]}}
|
||||
req = self.new_update_request('ports', data, p1['id'], self.fmt)
|
||||
resp = req.get_response(self.api)
|
||||
self.assertEqual(resp.status_int, webob.exc.HTTPClientError.code)
|
||||
|
||||
# Update the port with just the flow ID, which should fail
|
||||
data = {'port': {'apic:erspan_config': [{'flow_id': 1023}]}}
|
||||
req = self.new_update_request('ports', data, p1['id'], self.fmt)
|
||||
resp = req.get_response(self.api)
|
||||
self.assertEqual(resp.status_int, webob.exc.HTTPClientError.code)
|
||||
|
||||
# Update the port with a destination IP but an invalid flow ID
|
||||
data = {'port': {'apic:erspan_config': [{'dest_ip': '192.168.0.10',
|
||||
'flow_id': 1024}]}}
|
||||
req = self.new_update_request('ports', data, p1['id'], self.fmt)
|
||||
resp = req.get_response(self.api)
|
||||
self.assertEqual(resp.status_int, webob.exc.HTTPClientError.code)
|
||||
|
||||
# Update the port with a valid destination IP and flow ID. Also
|
||||
# verify that flow ID can be a string (in addition t an int).
|
||||
data = {'port': {'apic:erspan_config': [{'dest_ip': '192.168.0.10',
|
||||
'flow_id': '1023'}]}}
|
||||
req = self.new_update_request('ports', data, p1['id'], self.fmt)
|
||||
resp = req.get_response(self.api)
|
||||
self.assertEqual(resp.status_int, webob.exc.HTTPOk.code)
|
||||
port_data = self.deserialize(self.fmt, resp)['port']
|
||||
data['port']['apic:erspan_config'][0]['direction'] = 'both'
|
||||
data['port']['apic:erspan_config'][0]['flow_id'] = int(
|
||||
data['port']['apic:erspan_config'][0]['flow_id'])
|
||||
self.assertEqual(data['port']['apic:erspan_config'],
|
||||
port_data.get('apic:erspan_config'))
|
||||
|
||||
# Update the port with a valid destination IP,
|
||||
# flow ID, but change the direction to "out"
|
||||
data = {'port': {'apic:erspan_config': [{'dest_ip': '192.168.0.10',
|
||||
'flow_id': 1023,
|
||||
'direction': 'out'}]}}
|
||||
req = self.new_update_request('ports', data, p1['id'], self.fmt)
|
||||
resp = req.get_response(self.api)
|
||||
self.assertEqual(resp.status_int, webob.exc.HTTPOk.code)
|
||||
port_data = self.deserialize(self.fmt, resp)['port']
|
||||
self.assertEqual(data['port']['apic:erspan_config'],
|
||||
port_data.get('apic:erspan_config'))
|
||||
|
||||
def test_erspan_exceptions(self):
|
||||
net1 = self._make_network(self.fmt, 'net1', True)
|
||||
self._make_subnet(
|
||||
self.fmt, net1, '10.0.0.1', '10.0.0.0/24')['subnet']
|
||||
# Make network with ERSPAN config, but isn't an instance port.
|
||||
data = {'port': {'network_id': net1['network']['id'],
|
||||
'apic:erspan_config': [{'dest_ip': '192.168.0.10',
|
||||
'flow_id': 1023}],
|
||||
'project_id': 'tenant1'}}
|
||||
req = self.new_create_request('ports', data, self.fmt)
|
||||
resp = req.get_response(self.api)
|
||||
result = self.deserialize(self.fmt, resp)
|
||||
self.assertEqual(
|
||||
'InvalidPortForErspanSession',
|
||||
result['NeutronError']['type'])
|
||||
|
||||
# Make another port, which isn't an instance port, but without
|
||||
# ERSPAN configuration.
|
||||
p1 = self._make_port(self.fmt, net1['network']['id'])['port']
|
||||
# Update the port with ERSPAN config, which should fail.
|
||||
data = {'port': {'apic:erspan_config': [{'dest_ip': '192.168.0.10',
|
||||
'flow_id': 1023}]}}
|
||||
req = self.new_update_request('ports', data, p1['id'], self.fmt)
|
||||
# Make an SVI type network, and a port with ERSPAN configuraiton,
|
||||
# which should fail.
|
||||
resp = req.get_response(self.api)
|
||||
result = self.deserialize(self.fmt, resp)
|
||||
self.assertEqual(
|
||||
'InvalidPortForErspanSession',
|
||||
result['NeutronError']['type'])
|
||||
net2 = self._make_network(self.fmt, 'net2', True,
|
||||
arg_list=('provider:physical_network',
|
||||
'provider:network_type', SVI),
|
||||
**{'provider:physical_network': 'physnet3',
|
||||
'provider:network_type': 'vlan',
|
||||
'apic:svi': 'True'})
|
||||
self._make_subnet(
|
||||
self.fmt, net2, '20.0.0.1', '20.0.0.0/24')['subnet']
|
||||
data = {'port': {'network_id': net2['network']['id'],
|
||||
'apic:erspan_config': [{'dest_ip': '192.168.0.10',
|
||||
'flow_id': 1023}],
|
||||
'device_owner': 'compute:',
|
||||
'project_id': 'tenant1'}}
|
||||
req = self.new_create_request('ports', data, self.fmt)
|
||||
resp = req.get_response(self.api)
|
||||
result = self.deserialize(self.fmt, resp)
|
||||
self.assertEqual(
|
||||
'InvalidNetworkForErspanSession',
|
||||
result['NeutronError']['type'])
|
||||
# Verify that attempting to update a port on an SVI network with
|
||||
# ERSPAN state fails.
|
||||
p2 = self._make_port(self.fmt, net2['network']['id'])['port']
|
||||
data = {'port': {'apic:erspan_config': [{'dest_ip': '192.168.0.10',
|
||||
'flow_id': 1023}],
|
||||
'device_owner': 'compute:'}}
|
||||
req = self.new_update_request('ports', data, p2['id'], self.fmt)
|
||||
resp = req.get_response(self.api)
|
||||
result = self.deserialize(self.fmt, resp)
|
||||
self.assertEqual(
|
||||
'InvalidNetworkForErspanSession',
|
||||
result['NeutronError']['type'])
|
||||
|
||||
def test_erspan_aim_config(self, network_type='opflex'):
|
||||
aim_ctx = aim_context.AimContext(
|
||||
db_session=db_api.get_writer_session())
|
||||
self._register_agent('host1', AGENT_CONF_OPFLEX)
|
||||
self._register_agent('host2', AGENT_CONF_OPFLEX)
|
||||
# Host 1: VPC host
|
||||
host1_pg = 'pg-ostack-pt-1-17'
|
||||
host1_dn = 'topology/pod-1/protpaths-101-102/pathep-[%s]' % host1_pg
|
||||
self.hlink1 = aim_infra.HostLink(
|
||||
host_name='host1', interface_name='eth0', path=host1_dn)
|
||||
self.aim_mgr.create(aim_ctx, self.hlink1)
|
||||
# Add topology for this
|
||||
acc_bundle = aim_resource.InfraAccBundleGroup(name=host1_pg)
|
||||
self.aim_mgr.create(aim_ctx, acc_bundle)
|
||||
# Host 2: non-VPC host
|
||||
host2_pg = 'eth1/17'
|
||||
host2_dn = 'topology/pod-1/paths-101/pathep-[%s]' % host2_pg
|
||||
self.hlink1 = aim_infra.HostLink(
|
||||
host_name='host2', interface_name='eth0', path=host2_dn)
|
||||
self.aim_mgr.create(aim_ctx, self.hlink1)
|
||||
|
||||
# Create the test network, subnet, and port.
|
||||
net = self._make_network(self.fmt, 'net1', True)
|
||||
self._make_subnet(
|
||||
self.fmt, net, '10.0.0.1', '10.0.0.0/24')['subnet']
|
||||
erspan_config = {'apic:erspan_config':
|
||||
[{'dest_ip': '192.168.0.10',
|
||||
'direction': 'in',
|
||||
'flow_id': 1023}]}
|
||||
p1 = self._make_port(self.fmt, net['network']['id'],
|
||||
device_owner='compute:',
|
||||
arg_list=('apic:erspan_config',),
|
||||
**erspan_config)['port']
|
||||
self.assertEqual(erspan_config.get('apic:erspan_config'),
|
||||
p1['apic:erspan_config'])
|
||||
|
||||
def check_erspan_config(aim_ctx, source_resources, dest_resources):
|
||||
# Convert AIM resources to dicts, removing any
|
||||
# non-user_attributes (e.g. "epoch"), and sorting
|
||||
# any lists so that comparisons will work.
|
||||
def normalize_resources(resources):
|
||||
new_resources = []
|
||||
for res in resources:
|
||||
res_dict = {}
|
||||
for k, v in res.members.items():
|
||||
if k in res.user_attributes():
|
||||
if isinstance(v, list):
|
||||
v = v.sort() or []
|
||||
res_dict[k] = v
|
||||
new_resources.append(res_dict)
|
||||
return new_resources
|
||||
|
||||
def items_equal(actual, expected):
|
||||
for res in expected:
|
||||
self.assertIn(res, actual)
|
||||
|
||||
expected = normalize_resources(source_resources[0])
|
||||
actual = normalize_resources(
|
||||
self.aim_mgr.find(aim_ctx, aim_resource.SpanVsourceGroup))
|
||||
items_equal(actual, expected)
|
||||
expected = normalize_resources(dest_resources[0])
|
||||
actual = normalize_resources(
|
||||
self.aim_mgr.find(aim_ctx, aim_resource.SpanVdestGroup))
|
||||
items_equal(actual, expected)
|
||||
expected = normalize_resources(source_resources[1])
|
||||
actual = normalize_resources(
|
||||
self.aim_mgr.find(aim_ctx, aim_resource.SpanVsource))
|
||||
items_equal(actual, expected)
|
||||
expected = normalize_resources(dest_resources[1])
|
||||
actual = normalize_resources(
|
||||
self.aim_mgr.find(aim_ctx, aim_resource.SpanVdest))
|
||||
items_equal(actual, expected)
|
||||
expected = normalize_resources(dest_resources[2])
|
||||
actual = normalize_resources(
|
||||
self.aim_mgr.find(aim_ctx, aim_resource.SpanVepgSummary))
|
||||
items_equal(actual, expected)
|
||||
expected = normalize_resources(source_resources[2])
|
||||
actual = normalize_resources(
|
||||
self.aim_mgr.find(aim_ctx, aim_resource.SpanSpanlbl))
|
||||
items_equal(actual, expected)
|
||||
expected = normalize_resources(dest_resources[3])
|
||||
actual = normalize_resources(
|
||||
self.aim_mgr.find(aim_ctx, aim_resource.InfraAccBundleGroup))
|
||||
items_equal(actual, expected)
|
||||
|
||||
# Verify that there is no information in AIM for
|
||||
# the unbound port.
|
||||
source_resources = [[], [], []]
|
||||
dest_resources = [
|
||||
[], [], [], [aim_resource.InfraAccBundleGroup(name=host1_pg)]]
|
||||
check_erspan_config(aim_ctx, source_resources, dest_resources)
|
||||
|
||||
# Bind the port to host1, and verify the AIM configuration
|
||||
self._bind_port_to_host(p1['id'], 'host1')
|
||||
source_name = p1['id'] + '-in'
|
||||
dest_name = '192.168.0.10' + '-' + '1023'
|
||||
source_resources = [[aim_resource.SpanVsourceGroup(
|
||||
name=source_name)],
|
||||
[aim_resource.SpanVsource(vsg_name=source_name,
|
||||
name=source_name, dir='in',
|
||||
src_paths=['uni/tn-prj_' + p1['project_id'] +
|
||||
'/ap-OpenStack/epg-net_' + net['network']['id'] +
|
||||
'/cep-' + p1['mac_address'].upper()])],
|
||||
[aim_resource.SpanSpanlbl(vsg_name=source_name,
|
||||
name=dest_name, tag='yellow-green')]]
|
||||
dest_resources = [[aim_resource.SpanVdestGroup(
|
||||
name=dest_name)],
|
||||
[aim_resource.SpanVdest(vdg_name=dest_name,
|
||||
name=dest_name)],
|
||||
[aim_resource.SpanVepgSummary(vdg_name=dest_name,
|
||||
vd_name=dest_name, dst_ip='192.168.0.10',
|
||||
flow_id='1023')]]
|
||||
dest_resources.append([
|
||||
aim_resource.InfraAccBundleGroup(name=host1_pg,
|
||||
span_vsource_group_names=[source_resources[0][0].name],
|
||||
span_vdest_group_names=[dest_resources[0][0].name])])
|
||||
check_erspan_config(aim_ctx, source_resources, dest_resources)
|
||||
|
||||
# Update the port to add another ERSPAN session, using a
|
||||
# different direction, destination, and flow ID.
|
||||
erspan_config['apic:erspan_config'].append(
|
||||
{'dest_ip': '192.168.0.11',
|
||||
'direction': 'both',
|
||||
'flow_id': '1022'})
|
||||
data = {'port': erspan_config}
|
||||
p1 = self._update('ports', p1['id'], data)['port']
|
||||
source_name = p1['id'] + '-both'
|
||||
dest_name = '192.168.0.11' + '-' + '1022'
|
||||
source_resources[0].append(aim_resource.SpanVsourceGroup(
|
||||
name=source_name))
|
||||
source_resources[1].append(aim_resource.SpanVsource(
|
||||
vsg_name=source_name, name=source_name, dir='both',
|
||||
src_paths=['uni/tn-prj_' + p1['project_id'] +
|
||||
'/ap-OpenStack/epg-net_' + net['network']['id'] +
|
||||
'/cep-' + p1['mac_address'].upper()]))
|
||||
source_resources[2].append(aim_resource.SpanSpanlbl(
|
||||
vsg_name=source_name, name=dest_name, tag='yellow-green'))
|
||||
dest_resources[0].append(aim_resource.SpanVdestGroup(
|
||||
name=dest_name))
|
||||
dest_resources[1].append(aim_resource.SpanVdest(vdg_name=dest_name,
|
||||
name=dest_name))
|
||||
dest_resources[2].append(aim_resource.SpanVepgSummary(
|
||||
vdg_name=dest_name, vd_name=dest_name, dst_ip='192.168.0.11',
|
||||
flow_id='1022'))
|
||||
dest_resources[3] = [aim_resource.InfraAccBundleGroup(
|
||||
name=host1_pg,
|
||||
span_vsource_group_names=[res.name for res in source_resources[0]],
|
||||
span_vdest_group_names=[res.name for res in dest_resources[0]])]
|
||||
check_erspan_config(aim_ctx, source_resources, dest_resources)
|
||||
|
||||
# Create a new port and bind it, using the same destination and
|
||||
# flow ID used for the first port. This should create a new set
|
||||
# of source resources, but not destination resources in AIM.
|
||||
erspan_config = {'apic:erspan_config':
|
||||
[{'dest_ip': '192.168.0.10',
|
||||
'direction': 'both',
|
||||
'flow_id': 1023}]}
|
||||
p2 = self._make_port(self.fmt, net['network']['id'],
|
||||
device_owner='compute:',
|
||||
arg_list=('apic:erspan_config',),
|
||||
**erspan_config)['port']
|
||||
self.assertEqual(erspan_config.get('apic:erspan_config'),
|
||||
p2['apic:erspan_config'])
|
||||
self._bind_port_to_host(p2['id'], 'host1')
|
||||
source_name = p2['id'] + '-both'
|
||||
dest_name = '192.168.0.10' + '-' + '1023'
|
||||
source_resources[0].append(aim_resource.SpanVsourceGroup(
|
||||
name=source_name))
|
||||
source_resources[1].append(aim_resource.SpanVsource(
|
||||
vsg_name=source_name, name=source_name, dir='both',
|
||||
src_paths=['uni/tn-prj_' + p2['project_id'] +
|
||||
'/ap-OpenStack/epg-net_' + net['network']['id'] +
|
||||
'/cep-' + p2['mac_address'].upper()]))
|
||||
source_resources[2].append(aim_resource.SpanSpanlbl(
|
||||
vsg_name=source_name, name=dest_name, tag='yellow-green'))
|
||||
dest_resources[3] = [aim_resource.InfraAccBundleGroup(
|
||||
name=host1_pg,
|
||||
span_vsource_group_names=[res.name for res in source_resources[0]],
|
||||
span_vdest_group_names=[res.name for res in dest_resources[0]])]
|
||||
check_erspan_config(aim_ctx, source_resources, dest_resources)
|
||||
|
||||
# Rebind the first port to host2, which doesn't have
|
||||
# a VPC. Verify that the ports resources are removed.
|
||||
self._bind_port_to_host(p1['id'], 'host2')
|
||||
source_resources = [[aim_resource.SpanVsourceGroup(
|
||||
name=source_name)],
|
||||
[aim_resource.SpanVsource(
|
||||
vsg_name=source_name, name=source_name, dir='both',
|
||||
src_paths=['uni/tn-prj_' + p2['project_id'] +
|
||||
'/ap-OpenStack/epg-net_' + net['network']['id'] +
|
||||
'/cep-' + p2['mac_address'].upper()])],
|
||||
[aim_resource.SpanSpanlbl(
|
||||
vsg_name=source_name, name=dest_name, tag='yellow-green')]]
|
||||
dest_resources = [[aim_resource.SpanVdestGroup(
|
||||
name=dest_name)],
|
||||
[aim_resource.SpanVdest(vdg_name=dest_name,
|
||||
name=dest_name)],
|
||||
[aim_resource.SpanVepgSummary(vdg_name=dest_name,
|
||||
vd_name=dest_name, dst_ip='192.168.0.10',
|
||||
flow_id='1023')]]
|
||||
dest_resources.append([aim_resource.InfraAccBundleGroup(
|
||||
name=host1_pg,
|
||||
span_vsource_group_names=[res.name for res in source_resources[0]],
|
||||
span_vdest_group_names=[res.name for res in dest_resources[0]])])
|
||||
check_erspan_config(aim_ctx, source_resources, dest_resources)
|
||||
# Unbuind the second port, and verify that no resources are left.
|
||||
self._bind_port_to_host(p2['id'], '')
|
||||
source_resources = [[], [], []]
|
||||
dest_resources = [
|
||||
[], [], [], [aim_resource.InfraAccBundleGroup(name=host1_pg)]]
|
||||
check_erspan_config(aim_ctx, source_resources, dest_resources)
|
||||
|
||||
|
||||
class CallRecordWrapper(object):
|
||||
# Instrument all method calls in a class to record the call in a mock
|
||||
|
@ -1029,6 +1029,45 @@ class TestNeutronMapping(AimValidationTestCase):
|
||||
port_id=port['id']).update({'host': 'yyy'})
|
||||
self._validate_fails_binding_ports()
|
||||
|
||||
def test_erspan_ports(self):
|
||||
# Create network, subnet, and bound port.
|
||||
net_resp = self._make_network(self.fmt, 'net1', True)
|
||||
net = net_resp['network']
|
||||
subnet = self._make_subnet(
|
||||
self.fmt, net_resp, None, '10.0.0.0/24')['subnet']
|
||||
fixed_ips = [{'subnet_id': subnet['id'], 'ip_address': '10.0.0.100'}]
|
||||
port = self._make_port(
|
||||
self.fmt, net['id'], fixed_ips=fixed_ips)['port']
|
||||
port = self._bind_port_to_host(port['id'], 'host1')['port']
|
||||
self._validate()
|
||||
|
||||
# Create host links DB info, along with pre-existing
|
||||
# VPC interface policy group.
|
||||
host1_pg = 'pg-ostack-pt-1-17'
|
||||
host1_dn = 'topology/pod-1/protpaths-101-102/pathep-[%s]' % host1_pg
|
||||
self.hlink1 = aim_infra.HostLink(
|
||||
host_name='host1', interface_name='eth0', path=host1_dn)
|
||||
self.aim_mgr.create(self.aim_ctx, self.hlink1)
|
||||
acc_bundle = aim_resource.InfraAccBundleGroup(name=host1_pg,
|
||||
monitored=True)
|
||||
self.aim_mgr.create(self.aim_ctx, acc_bundle)
|
||||
|
||||
# Add ERSPAN session and verify that it validates.
|
||||
erspan_config = {'apic:erspan_config': [
|
||||
{'dest_ip': '192.168.0.11',
|
||||
'direction': 'both',
|
||||
'flow_id': 1022}]}
|
||||
data = {'port': erspan_config}
|
||||
port = self._update('ports', port['id'], data)['port']
|
||||
self._validate()
|
||||
|
||||
# Delete source group from AIM, and verify that it
|
||||
# can be repaired.
|
||||
source_groups = self.aim_mgr.find(self.aim_ctx,
|
||||
aim_resource.SpanVsourceGroup)
|
||||
self.aim_mgr.delete(self.aim_ctx, source_groups[0])
|
||||
self._validate_repair_validate()
|
||||
|
||||
|
||||
class TestGbpMapping(AimValidationTestCase):
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user