Browse Source

Cleanup synchronization

Tailor sync logic to the json_api. Since GET/DELETE/POST logic
is so similar for all resource types, a great deal of common
code can be used. Each resource must simply define a neutron db
get helper, a cvx endpoint and a conversion helper from neutron
format to CVX format.

Change-Id: I91273d3da3ddbcee970ab10dbb51d726a417fae9
changes/07/535507/13
Mitchell Jameson 4 years ago
parent
commit
3529a93680
  1. 397
      networking_arista/common/db_lib.py
  2. 40
      networking_arista/common/utils.py
  3. 395
      networking_arista/ml2/arista_resources.py
  4. 215
      networking_arista/ml2/arista_sync.py
  5. 1
      networking_arista/ml2/mechanism_arista.py
  6. 82
      networking_arista/ml2/rpc/arista_json.py
  7. 462
      networking_arista/tests/unit/common/test_db_lib.py
  8. 2
      networking_arista/tests/unit/ml2/rpc/test_arista_eapi_rpc_wrapper.py
  9. 2
      networking_arista/tests/unit/ml2/rpc/test_arista_json_rpc_wrapper.py
  10. 1063
      networking_arista/tests/unit/ml2/test_arista_resources.py
  11. 289
      networking_arista/tests/unit/ml2/test_arista_sync.py
  12. 9
      networking_arista/tests/unit/ml2/test_mechanism_arista.py
  13. 427
      networking_arista/tests/unit/utils.py

397
networking_arista/common/db_lib.py

@ -13,6 +13,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from oslo_config import cfg
from sqlalchemy import and_, or_
from sqlalchemy.orm import Query, aliased
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as n_const
from neutron_lib import context as nctx
from neutron_lib.plugins.ml2 import api as driver_api
@ -28,128 +35,330 @@ from neutron.services.trunk import models as trunk_models
from networking_arista.common import utils
VLAN_SEGMENTATION = 'vlan'
def join_if_necessary(query, *args, **kwargs):
table = args[0]
if table in [t.entity for t in query._join_entities]:
return query
elif table in query._primary_entity.entities:
return query
return query.join(*args, **kwargs)
def outerjoin_if_necessary(query, *args, **kwargs):
table = args[0]
if table in [t.entity for t in query._join_entities]:
return query
elif table in query._primary_entity.entities:
return query
return query.outerjoin(*args, **kwargs)
def filter_network_type(query):
"""Filter unsupported segment types"""
segment_model = segment_models.NetworkSegment
query = (query
.filter(
segment_model.network_type.in_(
utils.SUPPORTED_NETWORK_TYPES)))
return query
def filter_unbound_ports(query):
"""Filter ports not bound to a host or network"""
# hack for pep8 E711: comparison to None should be
# 'if cond is not None'
none = None
port_model = models_v2.Port
binding_level_model = ml2_models.PortBindingLevel
query = (query
.join_if_necessary(port_model)
.join_if_necessary(binding_level_model)
.filter(
binding_level_model.host != '',
port_model.device_id != none,
port_model.network_id != none))
return query
def filter_by_device_owner(query, device_owners=None):
"""Filter ports by device_owner
Either filter using specified device_owner or using the list of all
device_owners supported and unsupported by the arista ML2 plugin
"""
port_model = models_v2.Port
if not device_owners:
device_owners = utils.SUPPORTED_DEVICE_OWNERS
supported_device_owner_filter = [
port_model.device_owner.ilike('%s%%' % owner)
for owner in device_owners]
unsupported_device_owner_filter = [
port_model.device_owner.notilike('%s%%' % owner)
for owner in utils.UNSUPPORTED_DEVICE_OWNERS]
query = (query
.filter(
and_(*unsupported_device_owner_filter),
or_(*supported_device_owner_filter)))
return query
def filter_by_device_id(query):
"""Filter ports attached to devices we don't care about
Currently used to filter DHCP_RESERVED ports
"""
port_model = models_v2.Port
unsupported_device_id_filter = [
port_model.device_id.notilike('%s%%' % id)
for id in utils.UNSUPPORTED_DEVICE_IDS]
query = (query
.filter(and_(*unsupported_device_id_filter)))
return query
def filter_by_vnic_type(query, vnic_type):
"""Filter ports by vnic_type (currently only used for baremetals)"""
port_model = models_v2.Port
binding_model = ml2_models.PortBinding
dst_binding_model = ml2_models.DistributedPortBinding
query = (query
.outerjoin_if_necessary(
binding_model,
port_model.id == binding_model.port_id)
.outerjoin_if_necessary(
dst_binding_model,
port_model.id == dst_binding_model.port_id)
.filter(
(binding_model.vnic_type == vnic_type) |
(dst_binding_model.vnic_type == vnic_type)))
return query
def filter_unmanaged_physnets(query):
"""Filter ports managed by other ML2 plugins """
config = cfg.CONF.ml2_arista
managed_physnets = config['managed_physnets']
# Filter out ports bound to segments on physnets that we're not
# managing
segment_model = segment_models.NetworkSegment
if managed_physnets:
query = (query
.join_if_necessary(segment_model)
.filter(segment_model.physical_network.in_(
managed_physnets)))
return query
def filter_inactive_ports(query):
"""Filter ports that aren't in active status """
port_model = models_v2.Port
query = (query
.filter(port_model.status == n_const.PORT_STATUS_ACTIVE))
return query
def filter_unnecessary_ports(query, device_owners=None, vnic_type=None,
active=True):
"""Filter out all ports are not needed on CVX """
query = (query
.filter_unbound_ports()
.filter_by_device_owner(device_owners)
.filter_by_device_id()
.filter_unmanaged_physnets())
if active:
query = query.filter_inactive_ports()
if vnic_type:
query = query.filter_by_vnic_type(vnic_type)
return query
Query.join_if_necessary = join_if_necessary
Query.outerjoin_if_necessary = outerjoin_if_necessary
Query.filter_network_type = filter_network_type
Query.filter_unbound_ports = filter_unbound_ports
Query.filter_by_device_owner = filter_by_device_owner
Query.filter_by_device_id = filter_by_device_id
Query.filter_by_vnic_type = filter_by_vnic_type
Query.filter_unmanaged_physnets = filter_unmanaged_physnets
Query.filter_inactive_ports = filter_inactive_ports
Query.filter_unnecessary_ports = filter_unnecessary_ports
def get_instance_ports(tenant_id, manage_fabric=True, managed_physnets=None):
"""Returns all instance ports for a given tenant."""
def get_tenants():
"""Returns list of all project/tenant ids that may be relevant on CVX"""
session = db.get_reader_session()
project_ids = set()
with session.begin():
# hack for pep8 E711: comparison to None should be
# 'if cond is not None'
none = None
network_model = models_v2.Network
project_ids |= set(pid[0] for pid in
session.query(network_model.project_id).distinct())
port_model = models_v2.Port
binding_level_model = ml2_models.PortBindingLevel
segment_model = segment_models.NetworkSegment
all_ports = (session
.query(port_model, binding_level_model, segment_model)
.join(binding_level_model)
.join(segment_model)
.filter(port_model.tenant_id == tenant_id,
binding_level_model.host != none,
port_model.device_id != none,
port_model.network_id != none))
if not manage_fabric:
all_ports = all_ports.filter(
segment_model.physical_network != none)
if managed_physnets is not None:
managed_physnets.append(None)
all_ports = all_ports.filter(segment_model.physical_network.in_(
managed_physnets))
def eos_port_representation(port):
return {u'portId': port.id,
u'deviceId': port.device_id,
u'hosts': set([bl.host for bl in port.binding_levels]),
u'networkId': port.network_id}
ports = {}
for port in all_ports:
if not utils.supported_device_owner(port.Port.device_owner):
continue
ports[port.Port.id] = eos_port_representation(port.Port)
vm_dict = dict()
def eos_vm_representation(port):
return {u'vmId': port['deviceId'],
u'baremetal_instance': False,
u'ports': {port['portId']: port}}
for port in ports.values():
deviceId = port['deviceId']
if deviceId in vm_dict:
vm_dict[deviceId]['ports'][port['portId']] = port
else:
vm_dict[deviceId] = eos_vm_representation(port)
return vm_dict
def get_instances(tenant):
"""Returns set of all instance ids that may be relevant on CVX."""
project_ids |= set(pid[0] for pid in
session.query(port_model.project_id).distinct())
return [{'project_id': project_id} for project_id in project_ids]
def get_networks():
"""Returns list of all networks that may be relevant on CVX"""
session = db.get_reader_session()
with session.begin():
port_model = models_v2.Port
return set(device_id[0] for device_id in
session.query(port_model.device_id).
filter(port_model.tenant_id == tenant).distinct())
model = models_v2.Network
networks = (session.query(model)).all()
return networks
def tenant_provisioned(tid):
"""Returns true if any networks or ports exist for a tenant."""
def get_segments():
"""Returns list of all network segments that may be relevant on CVX"""
session = db.get_reader_session()
with session.begin():
network_model = models_v2.Network
port_model = models_v2.Port
res = bool(
session.query(network_model).filter_by(tenant_id=tid).count() or
session.query(port_model).filter_by(tenant_id=tid).count()
)
return res
model = segment_models.NetworkSegment
segments = session.query(model).filter_network_type()
return segments
def get_tenants():
"""Returns list of all project/tenant ids that may be relevant on CVX."""
def get_instances(device_owners=None, vnic_type=None):
"""Returns filtered list of all instances in the neutron db"""
session = db.get_reader_session()
project_ids = set()
with session.begin():
network_model = models_v2.Network
project_ids |= set(pid[0] for pid in
session.query(network_model.project_id).distinct())
port_model = models_v2.Port
project_ids |= set(pid[0] for pid in
session.query(port_model.project_id).distinct())
return project_ids
binding_model = ml2_models.PortBinding
instances = (session
.query(port_model,
binding_model)
.outerjoin(
binding_model,
port_model.id == binding_model.port_id)
.distinct(port_model.device_id)
.group_by(port_model.device_id)
.filter_unnecessary_ports(device_owners, vnic_type))
return instances.all()
def get_dhcp_instances():
"""Returns filtered list of DHCP instances that may be relevant on CVX"""
return get_instances(device_owners=[n_const.DEVICE_OWNER_DHCP])
def get_router_instances():
"""Returns filtered list of routers that may be relevant on CVX"""
return get_instances(device_owners=[n_const.DEVICE_OWNER_DVR_INTERFACE])
def _make_port_dict(record):
"""Make a dict from the BM profile DB record."""
return {'port_id': record.port_id,
'host_id': record.host,
'vnic_type': record.vnic_type,
'profile': record.profile}
def get_vm_instances():
"""Returns filtered list of vms that may be relevant on CVX"""
return get_instances(device_owners=[n_const.DEVICE_OWNER_COMPUTE_PREFIX],
vnic_type=portbindings.VNIC_NORMAL)
def get_all_baremetal_ports():
"""Returns a list of all ports that belong to baremetal hosts."""
def get_baremetal_instances():
"""Returns filtered list of baremetals that may be relevant on CVX"""
return get_instances(vnic_type=portbindings.VNIC_BAREMETAL)
def get_ports(device_owners=None, vnic_type=None, active=True):
"""Returns list of all ports in neutron the db"""
session = db.get_reader_session()
with session.begin():
querry = session.query(ml2_models.PortBinding)
bm_ports = querry.filter_by(vnic_type='baremetal').all()
port_model = models_v2.Port
ports = (session
.query(port_model)
.filter_unnecessary_ports(device_owners, vnic_type, active))
return ports.all()
def get_dhcp_ports():
"""Returns filtered list of DHCP instances that may be relevant on CVX"""
return get_ports(device_owners=[n_const.DEVICE_OWNER_DHCP])
def get_router_ports():
"""Returns filtered list of routers that may be relevant on CVX"""
return get_ports(device_owners=[n_const.DEVICE_OWNER_DVR_INTERFACE])
def get_vm_ports():
"""Returns filtered list of vms that may be relevant on CVX"""
return get_ports(device_owners=[n_const.DEVICE_OWNER_COMPUTE_PREFIX],
vnic_type=portbindings.VNIC_NORMAL)
def get_baremetal_ports():
"""Returns filtered list of baremetals that may be relevant on CVX"""
return get_ports(vnic_type=portbindings.VNIC_BAREMETAL)
return {bm_port.port_id: _make_port_dict(bm_port)
for bm_port in bm_ports}
def get_port_bindings():
"""Returns filtered list of port bindings that may be relevant on CVX
def get_all_portbindings():
"""Returns a list of all ports bindings."""
This query is a little complex as we need all binding levels for any
binding that has a single managed physnet, but we need to filter bindings
that have no managed physnets. In order to achieve this, we join to the
binding_level_model once to filter bindings with no managed levels,
then a second time to get all levels for the remaining bindings.
The loop at the end is a convenience to associate levels with bindings
as a list. This would ideally be done through the use of an orm.relation,
but due to some sqlalchemy limitations imposed to make OVO work, we can't
add relations to existing models.
"""
session = db.get_reader_session()
with session.begin():
query = session.query(ml2_models.PortBinding)
ports = query.all()
binding_level_model = ml2_models.PortBindingLevel
aliased_blm = aliased(ml2_models.PortBindingLevel)
port_binding_model = ml2_models.PortBinding
dist_binding_model = ml2_models.DistributedPortBinding
bindings = (session.query(port_binding_model, aliased_blm)
.join(binding_level_model,
and_(
port_binding_model.port_id ==
binding_level_model.port_id,
port_binding_model.host ==
binding_level_model.host))
.filter_unnecessary_ports()
.join(aliased_blm,
and_(port_binding_model.port_id ==
aliased_blm.port_id,
port_binding_model.host ==
aliased_blm.host)))
dist_bindings = (session.query(dist_binding_model, aliased_blm)
.join(
binding_level_model,
and_(dist_binding_model.port_id ==
binding_level_model.port_id,
dist_binding_model.host ==
binding_level_model.host))
.filter_unnecessary_ports()
.join(aliased_blm,
and_(dist_binding_model.port_id ==
aliased_blm.port_id,
dist_binding_model.host ==
aliased_blm.host)))
binding_levels = collections.defaultdict(list)
for binding, level in bindings.all() + dist_bindings.all():
binding_levels[binding].append(level)
bindings_with_levels = list()
for binding, levels in binding_levels.items():
binding.levels = levels
bindings_with_levels.append(binding)
return bindings_with_levels
# # # BEGIN LEGACY DB LIBS # # #
return {port.port_id: _make_port_dict(port)
for port in ports}
def tenant_provisioned(tid):
"""Returns true if any networks or ports exist for a tenant."""
session = db.get_reader_session()
with session.begin():
network_model = models_v2.Network
port_model = models_v2.Port
res = bool(
session.query(network_model).filter_by(tenant_id=tid).count() or
session.query(port_model).filter_by(tenant_id=tid).count()
)
return res
def get_port_binding_level(filters):

40
networking_arista/common/utils.py

@ -13,23 +13,47 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.services.trunk import constants as t_const
from neutron_lib import constants as n_const
from oslo_config import cfg
from oslo_log import log as logging
SUPPORTED_NETWORK_TYPES = [
n_const.TYPE_VLAN,
n_const.TYPE_VXLAN]
SUPPORTED_DEVICE_OWNERS = [
n_const.DEVICE_OWNER_COMPUTE_PREFIX,
n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
n_const.DEVICE_OWNER_DHCP,
n_const.DEVICE_OWNER_DVR_INTERFACE,
t_const.TRUNK_SUBPORT_OWNER]
UNSUPPORTED_DEVICE_OWNERS = [
n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'probe']
UNSUPPORTED_DEVICE_IDS = [
n_const.DEVICE_ID_RESERVED_DHCP_PORT]
LOG = logging.getLogger(__name__)
def supported_device_owner(device_owner):
supported_device_owner = [n_const.DEVICE_OWNER_DHCP,
n_const.DEVICE_OWNER_DVR_INTERFACE]
if any([device_owner in supported_device_owner,
device_owner.startswith('compute') and
device_owner != 'compute:probe',
device_owner.startswith('baremetal'),
device_owner.startswith('trunk')]):
if (any([device_owner.startswith(supported_owner) for
supported_owner in SUPPORTED_DEVICE_OWNERS]) and
not any([device_owner.startswith(unsupported_owner) for
unsupported_owner in UNSUPPORTED_DEVICE_OWNERS])):
return True
LOG.debug('Unsupported device owner: %s', device_owner)
return False
def hostname(hostname):
fqdns_used = cfg.CONF.ml2_arista['use_fqdn']
return hostname if fqdns_used else hostname.split('.')[0]

395
networking_arista/ml2/arista_resources.py

@ -0,0 +1,395 @@
# Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from neutron_lib import constants as n_const
from networking_arista.common import db_lib
from networking_arista.common import utils
class AttributeFormatter(object):
"""Formats a single attribute of the CVX model based on the neutron model
There are 4 elements to an AttributeFormatter:
1. neutron_key - name of the key in the neutron model
2. cvx_key - name of the key in the cvx model
3. format(optional) - function to alter the value to be CVX compatible
4. submodel(optional) - If the get_db_resources function queries multiple
models, the name of the model that contains the neutron_key must be
specified
"""
def __init__(self, neutron_key, cvx_key, format=None, submodel=None):
self.neutron_key = neutron_key
self.cvx_key = cvx_key
self.format = format or (lambda arg: arg)
self.submodel = submodel
def transform(self, resource):
if self.submodel:
resource = getattr(resource, self.submodel)
return (self.cvx_key, self.format(resource[self.neutron_key]))
class AristaResourcesBase(object):
"""Tracks state of resources of one resource type on neutron and CVX
An AristaResources class is responsible for:
- tracking resources that have been provisioned in neutron
- tracking resources that have been provisioned on CVX
- creating and deleting resources on CVX to bring it in line with neutron
- formatting neutron resources to be compatible with CVX's API
- tracking the correct endpoint for CVX API calls
In order to facilitate this each resource type should define:
1. formatter - a list of AttributeFormatters to convert neutron attributes
to models compatible with CVX's API
2. id_key - the key in the CVX model that uniquely identifies the resource
3. endpoint - format string for region resource endpoint
4. get_db_resources - function that queries the neutron db for all
resources of the resource type in question
"""
formatter = [AttributeFormatter('id', 'id')]
id_key = 'id'
endpoint = 'region/%(region)s'
def __init__(self, rpc):
self.region = rpc.region
self.rpc = rpc
self.cvx_ids = set()
self.neutron_resources = dict()
def clear_cvx_data(self):
self.cvx_ids = set()
def clear_neutron_data(self):
self.neutron_resources = dict()
def clear_all_data(self):
self.clear_cvx_data()
self.clear_neutron_data()
def add_neutron_resource(self, resource):
formatted_resource = self.format_for_create(resource)
self.neutron_resources.update(formatted_resource)
def delete_neutron_resource(self, id):
del self.neutron_resources[id]
def get_endpoint(self):
return self.endpoint % {'region': self.region}
@classmethod
def get_resource_ids(cls, resource):
return set([resource[cls.id_key]])
def get_cvx_ids(self):
if not self.cvx_ids:
cvx_data = self.rpc.send_api_request(self.get_endpoint(), 'GET')
for resource in cvx_data:
self.cvx_ids |= self.get_resource_ids(resource)
return self.cvx_ids
@staticmethod
def get_db_resources():
raise NotImplementedError
def get_neutron_ids(self):
if not self.neutron_resources:
self.get_neutron_resources()
return set(self.neutron_resources.keys())
def get_neutron_resources(self):
if not self.neutron_resources:
for resource in self.get_db_resources():
self.add_neutron_resource(resource)
return self.neutron_resources
def resource_ids_to_delete(self):
cvx_resource_ids = self.get_cvx_ids()
neutron_resource_ids = self.get_neutron_ids()
return (cvx_resource_ids - neutron_resource_ids)
def resource_ids_to_create(self):
cvx_resource_ids = self.get_cvx_ids()
neutron_resource_ids = self.get_neutron_ids()
return (neutron_resource_ids - cvx_resource_ids)
@classmethod
def format_for_create(cls, neutron_resource):
cvx_resource = dict()
cvx_resource.update(
attr.transform(neutron_resource) for attr in cls.formatter)
return {cvx_resource[cls.id_key]: cvx_resource}
@classmethod
def format_for_delete(cls, id):
return {cls.id_key: id}
def create_cvx_resources(self):
resource_ids_to_create = self.resource_ids_to_create()
neutron_resources = self.get_neutron_resources()
resources_to_create = list(neutron_resources[resource_id] for
resource_id in resource_ids_to_create)
if resources_to_create:
self.rpc.send_api_request(self.get_endpoint(), 'POST',
resources_to_create)
self.cvx_ids.update(resource_ids_to_create)
return resources_to_create
def delete_cvx_resources(self):
resource_ids_to_delete = self.resource_ids_to_delete()
resources_to_delete = list(self.format_for_delete(id) for id in
resource_ids_to_delete)
if resources_to_delete:
self.rpc.send_api_request(self.get_endpoint(), 'DELETE',
resources_to_delete)
self.cvx_ids -= resource_ids_to_delete
return resources_to_delete
class Tenants(AristaResourcesBase):
endpoint = 'region/%(region)s/tenant'
formatter = [AttributeFormatter('project_id', 'id')]
get_db_resources = staticmethod(db_lib.get_tenants)
class Networks(AristaResourcesBase):
def _is_shared(rbac_entries):
for entry in rbac_entries:
if (entry.action == 'access_as_shared' and
entry.target_tenant == '*'):
return True
return False
formatter = [AttributeFormatter('id', 'id'),
AttributeFormatter('project_id', 'tenantId'),
AttributeFormatter('name', 'networkName'),
AttributeFormatter('rbac_entries', 'shared', _is_shared)]
endpoint = 'region/%(region)s/network'
get_db_resources = staticmethod(db_lib.get_networks)
class Segments(AristaResourcesBase):
formatter = [AttributeFormatter('id', 'id'),
AttributeFormatter('network_type', 'type'),
AttributeFormatter('segmentation_id', 'segmentationId'),
AttributeFormatter('network_id', 'networkId'),
AttributeFormatter('is_dynamic', 'segmentType',
lambda x: 'dynamic' if x else 'static')]
endpoint = 'region/%(region)s/segment'
get_db_resources = staticmethod(db_lib.get_segments)
class Dhcps(AristaResourcesBase):
formatter = [AttributeFormatter('device_id', 'dhcpInstanceId',
submodel='Port'),
AttributeFormatter('host', 'dhcpHostId',
utils.hostname,
submodel='PortBinding'),
AttributeFormatter('project_id', 'tenantId',
submodel='Port')]
id_key = 'dhcpInstanceId'
endpoint = 'region/%(region)s/dhcp'
get_db_resources = staticmethod(db_lib.get_dhcp_instances)
class Routers(AristaResourcesBase):
formatter = [AttributeFormatter('device_id', 'routerInstanceId',
submodel='Port'),
AttributeFormatter('device_owner', 'routerHostId',
lambda *args: 'distributed',
submodel='Port'),
AttributeFormatter('project_id', 'tenantId',
submodel='Port')]
id_key = 'routerInstanceId'
endpoint = 'region/%(region)s/router'
get_db_resources = staticmethod(db_lib.get_router_instances)
class Vms(AristaResourcesBase):
formatter = [AttributeFormatter('device_id', 'vmInstanceId',
submodel='Port'),
AttributeFormatter('host', 'vmHostId',
utils.hostname,
submodel='PortBinding'),
AttributeFormatter('project_id', 'tenantId',
submodel='Port')]
id_key = 'vmInstanceId'
endpoint = 'region/%(region)s/vm'
get_db_resources = staticmethod(db_lib.get_vm_instances)
class Baremetals(AristaResourcesBase):
formatter = [AttributeFormatter('device_id', 'baremetalInstanceId',
submodel='Port'),
AttributeFormatter('host', 'baremetalHostId',
submodel='PortBinding'),
AttributeFormatter('project_id', 'tenantId',
submodel='Port')]
id_key = 'baremetalInstanceId'
endpoint = 'region/%(region)s/baremetal'
get_db_resources = staticmethod(db_lib.get_baremetal_instances)
class PortResourcesBase(AristaResourcesBase):
id_key = 'id'
class DhcpPorts(PortResourcesBase):
endpoint = 'region/%(region)s/port?type=dhcp'
formatter = [AttributeFormatter('id', 'id'),
AttributeFormatter('name', 'portName'),
AttributeFormatter('device_owner', 'vlanType',
lambda *args: 'allowed'),
AttributeFormatter('network_id', 'networkId'),
AttributeFormatter('device_id', 'instanceId'),
AttributeFormatter('device_owner', 'instanceType',
lambda *args: 'dhcp'),
AttributeFormatter('project_id', 'tenantId')]
get_db_resources = staticmethod(db_lib.get_dhcp_ports)
class RouterPorts(PortResourcesBase):
endpoint = 'region/%(region)s/port?type=router'
formatter = [AttributeFormatter('id', 'id'),
AttributeFormatter('name', 'portName'),
AttributeFormatter('device_owner', 'vlanType',
lambda *args: 'allowed'),
AttributeFormatter('network_id', 'networkId'),
AttributeFormatter('device_id', 'instanceId'),
AttributeFormatter('device_owner', 'instanceType',
lambda *args: 'router'),
AttributeFormatter('project_id', 'tenantId')]
get_db_resources = staticmethod(db_lib.get_router_ports)
class VmPorts(PortResourcesBase):
endpoint = 'region/%(region)s/port?type=vm'
formatter = [AttributeFormatter('id', 'id'),
AttributeFormatter('name', 'portName'),
AttributeFormatter('device_owner', 'vlanType',
lambda *args: 'allowed'),
AttributeFormatter('network_id', 'networkId'),
AttributeFormatter('device_id', 'instanceId'),
AttributeFormatter('device_owner', 'instanceType',
lambda *args: 'vm'),
AttributeFormatter('project_id', 'tenantId')]
get_db_resources = staticmethod(db_lib.get_vm_ports)
class BaremetalPorts(PortResourcesBase):
def _get_vlan_type(device_owner):
if (device_owner.startswith(n_const.DEVICE_OWNER_BAREMETAL_PREFIX) or
device_owner.startswith(n_const.DEVICE_OWNER_COMPUTE_PREFIX)):
return 'native'
else:
return 'allowed'
endpoint = 'region/%(region)s/port?type=baremetal'
formatter = [AttributeFormatter('id', 'id'),
AttributeFormatter('name', 'portName'),
AttributeFormatter('device_owner', 'vlanType',
_get_vlan_type),
AttributeFormatter('network_id', 'networkId'),
AttributeFormatter('device_id', 'instanceId'),
AttributeFormatter('device_owner', 'instanceType',
lambda *args: 'baremetal'),
AttributeFormatter('project_id', 'tenantId')]
get_db_resources = staticmethod(db_lib.get_baremetal_ports)
class PortBindings(AristaResourcesBase):
endpoint = 'region/%(region)s/portbinding'
get_db_resources = staticmethod(db_lib.get_port_bindings)
@classmethod
def get_resource_ids(cls, resource):
resource_ids = set()
port_id = resource['portId']
for host_binding in resource.get('hostBinding', []):
resource_ids.add((port_id, host_binding['host']))
for switch_binding in resource.get('switchBinding', []):
resource_ids.add((port_id, (switch_binding['switch'],
switch_binding['interface'])))
return resource_ids
@classmethod
def format_for_delete(cls, id):
model = dict()
port_id, binding = id
model['portId'] = port_id
if type(binding) == tuple:
switch, interface = binding
model['switchBinding'] = [{'switch': switch,
'interface': interface}]
else:
host = binding
model['hostBinding'] = [{'host': host}]
return model
@classmethod
def format_for_create(cls, binding):
cvx_resources = {}
# First build the list of segments to which the port is bound
# binding levels are in order from 0 -> highest
# which is typically vxlan -> vlan
# The Arista JSON API depends on this ordering being present in
# the segments list
segments = []
for binding_level in (sorted(binding['levels'],
key=lambda bl: bl.level)):
segments.append({'id': binding_level.segment_id})
# Determine if this is a switch or host bindings and populate
# the appropriate model attribute accordingly
host = utils.hostname(binding['host'])
port_id = binding['port_id']
# If the binding profile isn't valid json, this is a host binding
try:
profile = json.loads(binding.profile)
except ValueError:
profile = {}
if profile.get('local_link_information'):
for link in profile['local_link_information']:
switch_binding = {'host': host,
'switch': link['switch_id'],
'interface': link['port_id'],
'segment': segments}
binding_key = (link['switch_id'], link['port_id'])
cvx_resources[(port_id, binding_key)] = {
'portId': port_id,
'hostBinding': [],
'switchBinding': [switch_binding]}
else:
cvx_resources[(port_id, host)] = {
'portId': port_id,
'hostBinding': [{'host': host, 'segment': segments}],
'switchBinding': []}
return cvx_resources

215
networking_arista/ml2/arista_sync.py

@ -22,8 +22,8 @@ from oslo_service import loopingcall
from networking_arista._i18n import _LI
from networking_arista.common import constants
from networking_arista.common import db_lib
from networking_arista.common import exceptions as arista_exc
from networking_arista.ml2 import arista_resources as resources
LOG = logging.getLogger(__name__)
@ -84,6 +84,22 @@ class SyncService(object):
self._manage_fabric = manage_fabric
self._managed_physnets = managed_physnets
# Sync order is important because of entity dependencies:
# PortBinding -> Port -> Instance -> Tenant
# -> Segment -> Network -> Tenant
self.sync_order = [resources.Tenants(self._rpc),
resources.Networks(self._rpc),
resources.Segments(self._rpc),
resources.Dhcps(self._rpc),
resources.Routers(self._rpc),
resources.Vms(self._rpc),
resources.Baremetals(self._rpc),
resources.DhcpPorts(self._rpc),
resources.RouterPorts(self._rpc),
resources.VmPorts(self._rpc),
resources.BaremetalPorts(self._rpc),
resources.PortBindings(self._rpc)]
def force_sync(self):
"""Sets the force_sync flag."""
self._force_sync = True
@ -95,6 +111,7 @@ class SyncService(object):
send it down to EOS.
"""
# Perform sync of Security Groups unconditionally
# TODO(mitchell): Move security group sync to a separate worker
try:
self._rpc.perform_sync_of_sg()
except Exception as e:
@ -113,7 +130,7 @@ class SyncService(object):
# Send 'sync start' marker.
if not self._rpc.sync_start():
LOG.info(_LI('Not starting sync, setting force'))
self._force_sync = True
self.force_sync()
return
# Perform the actual synchronization.
@ -122,149 +139,46 @@ class SyncService(object):
# Send 'sync end' marker.
if not self._rpc.sync_end():
LOG.info(_LI('Sync end failed, setting force'))
self._force_sync = True
self.force_sync()
return
self._set_region_updated_time()
def synchronize(self):
"""Sends data to EOS which differs from neutron DB."""
"""Sends data to EOS which differs from neutron DB.
We need to compute resources to sync in reverse sync order
in order to avoid missing dependencies on creation
Eg. If we query in sync order
1. Query Instances -> I1 isn't there
2. Query Port table -> Port P1 is there, connected to I1
3. We send P1 to CVX without sending I1 -> Error raised
But if we query P1 first:
1. Query Ports P1 -> P1 is not there
2. Query Instances -> find I1
3. We create I1, not P1 -> harmless, mech driver creates P1
Missing dependencies on deletion will helpfully result in the
dependent resource not being created:
1. Query Ports -> P1 is found
2. Query Instances -> I1 not found
3. Creating P1 fails on CVX
"""
LOG.info(_LI('Syncing Neutron <-> EOS'))
try:
# Register with EOS to ensure that it has correct credentials
self._rpc.register_with_eos(sync=True)
eos_tenants = self._rpc.get_tenants()
except arista_exc.AristaRpcError:
LOG.warning(constants.EOS_UNREACHABLE_MSG)
self._force_sync = True
return
db_tenants = db_lib.get_tenants()
# Delete tenants that are in EOS, but not in the database
tenants_to_delete = frozenset(eos_tenants.keys()).difference(
db_tenants)
if tenants_to_delete:
try:
self._rpc.delete_tenant_bulk(tenants_to_delete, sync=True)
except arista_exc.AristaRpcError:
LOG.warning(constants.EOS_UNREACHABLE_MSG)
self._force_sync = True
return
# None of the commands have failed till now. But if subsequent
# operations fail, then force_sync is set to true
self._force_sync = False
# Get Baremetal port switch_bindings, if any
port_profiles = db_lib.get_all_portbindings()
# To support shared networks, split the sync loop in two parts:
# In first loop, delete unwanted VM and networks and update networks
# In second loop, update VMs. This is done to ensure that networks for
# all tenats are updated before VMs are updated
instances_to_update = {}
for tenant in db_tenants:
db_nets = {n['id']: n
for n in self._ndb.get_all_networks_for_tenant(tenant)}
db_instances = db_lib.get_instances(tenant)
eos_nets = self._get_eos_networks(eos_tenants, tenant)
eos_vms, eos_bms, eos_routers = self._get_eos_vms(eos_tenants,
tenant)
db_nets_key_set = frozenset(db_nets.keys())
db_instances_key_set = frozenset(db_instances)
eos_nets_key_set = frozenset(eos_nets.keys())
eos_vms_key_set = frozenset(eos_vms.keys())
eos_routers_key_set = frozenset(eos_routers.keys())
eos_bms_key_set = frozenset(eos_bms.keys())
# Create a candidate list by incorporating all instances
eos_instances_key_set = (eos_vms_key_set | eos_routers_key_set |
eos_bms_key_set)
# Find the networks that are present on EOS, but not in Neutron DB
nets_to_delete = eos_nets_key_set.difference(db_nets_key_set)
# Find the VMs that are present on EOS, but not in Neutron DB
instances_to_delete = eos_instances_key_set.difference(
db_instances_key_set)
vms_to_delete = [
vm for vm in eos_vms_key_set if vm in instances_to_delete]
routers_to_delete = [
r for r in eos_routers_key_set if r in instances_to_delete]
bms_to_delete = [
b for b in eos_bms_key_set if b in instances_to_delete]
# Find the Networks that are present in Neutron DB, but not on EOS
nets_to_update = db_nets_key_set.difference(eos_nets_key_set)
# Find the VMs that are present in Neutron DB, but not on EOS
instances_to_update[tenant] = db_instances_key_set.difference(
eos_instances_key_set)
try:
if vms_to_delete:
self._rpc.delete_vm_bulk(tenant, vms_to_delete, sync=True)
if routers_to_delete:
self._rpc.delete_instance_bulk(
tenant,
routers_to_delete,
constants.InstanceType.ROUTER,
sync=True)
if bms_to_delete:
self._rpc.delete_instance_bulk(
tenant,
bms_to_delete,
constants.InstanceType.BAREMETAL,
sync=True)
if nets_to_delete:
self._rpc.delete_network_bulk(tenant, nets_to_delete,
sync=True)
if nets_to_update:
networks = [{
'network_id': net_id,
'network_name':
db_nets.get(net_id, {'name': ''})['name'],
'shared':
db_nets.get(net_id,
{'shared': False})['shared'],
'segments': self._ndb.get_all_network_segments(net_id),
}
for net_id in nets_to_update
]
self._rpc.create_network_bulk(tenant, networks, sync=True)
except arista_exc.AristaRpcError:
LOG.warning(constants.EOS_UNREACHABLE_MSG)
self._force_sync = True
# Now update the instances
for tenant in instances_to_update:
if not instances_to_update[tenant]:
continue
try:
# Filter the ports to only the vms that we are interested
# in.
ports_of_interest = {}
for port in self._ndb.get_all_ports_for_tenant(tenant):
ports_of_interest.update(
self._port_dict_representation(port))
if ports_of_interest:
instance_ports = db_lib.get_instance_ports(
tenant, self._manage_fabric, self._managed_physnets)
if instance_ports:
self._rpc.create_instance_bulk(tenant,
ports_of_interest,
instance_ports,
port_profiles,
sync=True)
except arista_exc.AristaRpcError:
LOG.warning(constants.EOS_UNREACHABLE_MSG)
self._force_sync = True
# Compute resources to sync
for resource_type in reversed(self.sync_order):
# Clear all resources for now, once resource passing from
# mech driver is implemented, we'll be more selective
# and do so only when a full sync is required
resource_type.clear_all_data()
resource_type.get_cvx_ids()
resource_type.get_neutron_resources()
# Sync any necessary resources
for resource_type in self.sync_order:
resource_type.delete_cvx_resources()
resource_type.create_cvx_resources()
def _region_in_sync(self):
"""Checks if the region is in sync with EOS.
@ -302,30 +216,3 @@ class SyncService(object):
except arista_exc.AristaRpcError:
# Force an update incase of an error.
self._force_sync = True
def _get_eos_networks(self, eos_tenants, tenant):
networks = {}
if eos_tenants and tenant in eos_tenants:
networks = eos_tenants[tenant]['tenantNetworks']
return networks
def _get_eos_vms(self, eos_tenants, tenant):
vms = {}
bms = {}
routers = {}
if eos_tenants and tenant in eos_tenants:
vms = eos_tenants[tenant]['tenantVmInstances']
if 'tenantBaremetalInstances' in eos_tenants[tenant]:
# Check if baremetal service is supported
bms = eos_tenants[tenant]['tenantBaremetalInstances']
if 'tenantRouterInstances' in eos_tenants[tenant]:
routers = eos_tenants[tenant]['tenantRouterInstances']
return vms, bms, routers
def _port_dict_representation(self, port):
return {port['id']: {'device_owner': port['device_owner'],
'device_id': port['device_id'],
'name': port['name'],
'id': port['id'],
'tenant_id': port['tenant_id'],
'network_id': port['network_id']}}

1
networking_arista/ml2/mechanism_arista.py

@ -60,7 +60,6 @@ class AristaDriver(driver_api.MechanismDriver):
self.ndb = db_lib.NeutronNets()
confg = cfg.CONF.ml2_arista
self.segmentation_type = db_lib.VLAN_SEGMENTATION
self.timer = None
self.managed_physnets = confg['managed_physnets']
self.manage_fabric = confg['manage_fabric']

82
networking_arista/ml2/rpc/arista_json.py

@ -123,7 +123,7 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
return self._server_ip
return None
def _send_api_request(self, path, method, data=None, sanitized_data=None):
def send_api_request(self, path, method, data=None, sanitized_data=None):
host = self._get_eos_master()
if not host:
msg = six.text_type("Could not find CVX leader")
@ -139,7 +139,7 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
'name': self.region,
'syncInterval': self.sync_interval
}
self._send_api_request(path, 'PUT', [data])
self.send_api_request(path, 'PUT', [data])
def register_with_eos(self, sync=False):
self.create_region(self.region)
@ -148,7 +148,7 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
def get_region_updated_time(self):
path = 'agent/'
try:
data = self._send_api_request(path, 'GET')
data = self.send_api_request(path, 'GET')
return {'regionTimestamp': data.get('uuid', '')}
except arista_exc.AristaRpcError:
return {'regionTimestamp': ''}
@ -156,12 +156,12 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
def create_region(self, region):
path = 'region/'
data = {'name': region}
return self._send_api_request(path, 'POST', [data])
return self.send_api_request(path, 'POST', [data])
def delete_region(self, region):
path = 'region/'
data = {'name': region}
return self._send_api_request(path, 'DELETE', [data])
return self.send_api_request(path, 'DELETE', [data])
def delete_this_region(self):
return self.delete_region(self.region)
@ -169,7 +169,7 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
def get_region(self, name):
path = 'region/%s' % name
try:
regions = self._send_api_request(path, 'GET')
regions = self.send_api_request(path, 'GET')
for region in regions:
if region['name'] == name:
return region
@ -197,7 +197,7 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
'requestId': req_id
}
path = 'region/' + self.region + '/sync'
self._send_api_request(path, 'POST', data)
self.send_api_request(path, 'POST', data)
self.current_sync_name = req_id
return True
except (KeyError, arista_exc.AristaRpcError):
@ -210,7 +210,7 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
LOG.info('Attempting to end sync')
try:
path = 'region/' + self.region + '/sync'
self._send_api_request(path, 'DELETE')
self.send_api_request(path, 'DELETE')
self.current_sync_name = None
return True
except arista_exc.AristaRpcError:
@ -219,28 +219,28 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
def get_vms_for_tenant(self, tenant):
path = 'region/' + self.region + '/vm?tenantId=' + tenant
return self._send_api_request(path, 'GET')
return self.send_api_request(path, 'GET')
def get_dhcps_for_tenant(self, tenant):
path = 'region/' + self.region + '/dhcp?tenantId=' + tenant
return self._send_api_request(path, 'GET')
return self.send_api_request(path, 'GET')
def get_baremetals_for_tenant(self, tenant):
path = 'region/' + self.region + '/baremetal?tenantId=' + tenant
return self._send_api_request(path, 'GET')
return self.send_api_request(path, 'GET')
def get_routers_for_tenant(self, tenant):
path = 'region/' + self.region + '/router?tenantId=' + tenant
return self._send_api_request(path, 'GET')
return self.send_api_request(path, 'GET')
def get_ports_for_tenant(self, tenant, pType):
path = 'region/%s/port?tenantId=%s&type=%s' % (self.region,
tenant, pType)
return self._send_api_request(path, 'GET')
return self.send_api_request(path, 'GET')
def get_tenants(self):
path = 'region/' + self.region + '/tenant'
tenants = self._send_api_request(path, 'GET')
tenants = self.send_api_request(path, 'GET')
d = {}
for ten in tenants:
ten['tenantId'] = ten.pop('id')
@ -276,11 +276,11 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
def delete_tenant_bulk(self, tenant_list, sync=False):
path = 'region/' + self.region + '/tenant'
data = [{'id': t} for t in tenant_list]
return self._send_api_request(path, 'DELETE', data)
return self.send_api_request(path, 'DELETE', data)
def get_networks(self, tenant):
path = 'region/' + self.region + '/network?tenantId=' + tenant
return self._send_api_request(path, 'GET')
return self.send_api_request(path, 'GET')
def create_network_bulk(self, tenant_id, network_list, sync=False):
self._create_tenant_if_needed(tenant_id)
@ -317,11 +317,11 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
if networks:
path = 'region/' + self.region + '/network'
self._send_api_request(path, 'POST', networks)
self.send_api_request(path, 'POST', networks)
if segments:
path = 'region/' + self.region + '/segment'
self._send_api_request(path, 'POST', segments)
self.send_api_request(path, 'POST', segments)
def create_network_segments(self, tenant_id, network_id,
network_name, segments):
@ -340,7 +340,7 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
})
path = 'region/' + self.region + '/segment'
self._send_api_request(path, 'POST', segment_data)
self.send_api_request(path, 'POST', segment_data)
def delete_network_segments(self, tenant_id, segments):
segment_data = []
@ -349,12 +349,12 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
'id': segment['id'],
})
path = 'region/' + self.region + '/segment'
self._send_api_request(path, 'DELETE', segment_data)
self.send_api_request(path, 'DELETE', segment_data)
def delete_network_bulk(self, tenant_id, network_id_list, sync=False):
path = 'region/' + self.region + '/network'
data = [{'id': n, 'tenantId': tenant_id} for n in network_id_list]
return self._send_api_request(path, 'DELETE', data)
return self.send_api_request(path, 'DELETE', data)
def _create_instance_data(self, vm_id, host_id):
return {
@ -389,7 +389,7 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
def get_tenant(self, tenant_id):
path = 'region/' + self.region + '/tenant?tenantId=' + tenant_id
tenants = self._send_api_request(path, 'GET')
tenants = self.send_api_request(path, 'GET')
if tenants:
try:
return tenants[0]
@ -400,7 +400,7 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
def create_tenant_bulk(self, tenant_ids):
path = 'region/' + self.region + '/tenant'
data = [{'id': tid} for tid in tenant_ids]
return self._send_api_request(path, 'POST', data)
return self.send_api_request(path, 'POST', data)
def create_instance_bulk(self, tenant_id, neutron_ports, vms,
port_profiles, sync=False):
@ -490,25 +490,25 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
# create instances first
if vmInst:
path = 'region/' + self.region + '/vm?tenantId=' + tenant_id
self._send_api_request(path, 'POST', list(vmInst.values()))
self.send_api_request(path, 'POST', list(vmInst.values()))
if dhcpInst:
path = 'region/' + self.region + '/dhcp?tenantId=' + tenant_id
self._send_api_request(path, 'POST', list(dhcpInst.values()))
self.send_api_request(path, 'POST', list(dhcpInst.values()))
if baremetalInst:
path = 'region/' + self.region + '/baremetal?tenantId=' + tenant_id
self._send_api_request(path, 'POST', list(baremetalInst.values()))
self.send_api_request(path, 'POST', list(baremetalInst.values()))
if routerInst:
path = 'region/' + self.region + '/router?tenantId=' + tenant_id
self._send_api_request(path, 'POST', list(routerInst.values()))
self.send_api_request(path, 'POST', list(routerInst.values()))
# now create ports for the instances
path = 'region/' + self.region + '/port'
self._send_api_request(path, 'POST', portInst)
self.send_api_request(path, 'POST', portInst)
# TODO(shashank): Optimize this
for port_id, bindings in portBindings.items():
url = 'region/' + self.region + '/port/' + port_id + '/binding'
self._send_api_request(url, 'POST', bindings)
self.send_api_request(url, 'POST', bindings)
def delete_instance_bulk(self, tenant_id, instance_id_list, instance_type,
sync=False):
@ -517,7 +517,7 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
'type': instance_type}
data = [{'id': i} for i in instance_id_list]
return self._send_api_request(path, 'DELETE', data)
return self.send_api_request(path, 'DELETE', data)
def delete_vm_bulk(self, tenant_id, vm_id_list, sync=False):
self.delete_instance_bulk(tenant_id, vm_id_list, const.InstanceType.VM)
@ -533,12 +533,12 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
port = self._create_port_data(port_id, None, None, instance_id,
None, instance_type, None,
device_owner)
return self._send_api_request(path, 'DELETE', [port])
return self.send_api_request(path, 'DELETE', [port])
def get_instance_ports(self, instance_id, instance_type):
path = ('region/%s/port?id=%s&type=%s' %
(self.region, instance_id, instance_type))
return self._send_api_request(path, 'GET')
return self.send_api_request(path, 'GET')
def plug_port_into_network(self, device_id, host_id, port_id,
net_id, tenant_id, port_name, device_owner,
@ -570,9 +570,9 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
'device_type': device_type,
'tenant_id': tenant_id,
}
self._send_api_request(url, 'POST', [instance])
self._send_api_request('region/' + self.region + '/port', 'POST',
[port])
self.send_api_request(url, 'POST', [instance])
self.send_api_request('region/' + self.region + '/port', 'POST',
[port])
if trunk_details and trunk_details.get('sub_ports'):
for subport in trunk_details['sub_ports']:
subport_id = subport['port_id']
@ -585,8 +585,8 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
subport_name, device_type,
[host_id], sub_device_owner)
self._send_api_request('region/' + self.region + '/port',
'POST', [port])
self.send_api_request('region/' + self.region + '/port',
'POST', [port])
if device_type in const.InstanceType.VIRTUAL_INSTANCE_TYPES:
self.bind_port_to_host(port_id, host_id, net_id, segments)
if trunk_details and trunk_details.get('sub_ports'):
@ -694,7 +694,7 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
url = 'region/' + self.region + '/port/' + port_id + '/binding'
bindings = self._get_host_bindings(port_id, host, network_id,
segments)
self._send_api_request(url, 'POST', bindings)
self.send_api_request(url, 'POST', bindings)
def unbind_port_from_host(self, port_id, host):
url = 'region/' + self.region + '/port/' + port_id + '/binding'
@ -702,7 +702,7 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
'hostBinding': [{
'host': host,
}]}