Browse Source

Merge "Remove use of the arista_provisioned_vms table"

changes/78/517378/6
Zuul 4 years ago committed by Gerrit Code Review
parent
commit
2b30b92acc
  1. 49
      networking_arista/common/db.py
  2. 228
      networking_arista/common/db_lib.py
  3. 35
      networking_arista/common/utils.py
  4. 2
      networking_arista/db/migration/alembic_migrations/versions/CONTRACT_HEAD
  5. 34
      networking_arista/db/migration/alembic_migrations/versions/queens/contract/941bad5630c1_drop_aristaprovisionedvms.py
  6. 37
      networking_arista/ml2/arista_sync.py
  7. 204
      networking_arista/ml2/mechanism_arista.py
  8. 5
      networking_arista/ml2/rpc/arista_json.py
  9. 316
      networking_arista/tests/unit/common/test_db_lib.py
  10. 4
      networking_arista/tests/unit/ml2/rpc/test_arista_json_rpc_wrapper.py
  11. 177
      networking_arista/tests/unit/ml2/test_arista_mechanism_driver.py
  12. 45
      networking_arista/tests/unit/ml2/test_arista_sync.py
  13. 354
      networking_arista/tests/unit/ml2/test_mechanism_arista.py
  14. 90
      networking_arista/tests/unit/utils.py

49
networking_arista/common/db.py

@ -1,49 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.db import constants as db_const
from neutron_lib.db import model_base
import sqlalchemy as sa
UUID_LEN = 36
STR_LEN = 255
class HasTenant(object):
"""Tenant mixin, add to subclasses that have a tenant."""
tenant_id = sa.Column(sa.String(db_const.PROJECT_ID_FIELD_SIZE),
index=True)
class AristaProvisionedVms(model_base.BASEV2, model_base.HasId,
HasTenant):
"""Stores VMs provisioned on Arista EOS.
All VMs launched on physical hosts connected to Arista
Switches are remembered
"""
__tablename__ = 'arista_provisioned_vms'
vm_id = sa.Column(sa.String(STR_LEN))
host_id = sa.Column(sa.String(STR_LEN))
port_id = sa.Column(sa.String(UUID_LEN))
network_id = sa.Column(sa.String(UUID_LEN))
def eos_port_representation(self):
return {u'portId': self.port_id,
u'deviceId': self.vm_id,
u'hosts': [self.host_id],
u'networkId': self.network_id}

228
networking_arista/common/db_lib.py

@ -26,223 +26,73 @@ from neutron.db import segments_db
from neutron.plugins.ml2 import models as ml2_models
from neutron.services.trunk import models as trunk_models
from networking_arista.common import db as db_models
from networking_arista.common import utils
VLAN_SEGMENTATION = 'vlan'
def remember_vm(vm_id, host_id, port_id, network_id, tenant_id):
"""Stores all relevant information about a VM in repository.
:param vm_id: globally unique identifier for VM instance
:param host_id: ID of the host where the VM is placed
:param port_id: globally unique port ID that connects VM to network
:param network_id: globally unique neutron network identifier
:param tenant_id: globally unique neutron tenant identifier
"""
session = db.get_writer_session()
with session.begin():
vm = db_models.AristaProvisionedVms(
vm_id=vm_id,
host_id=host_id,
port_id=port_id,
network_id=network_id,
tenant_id=tenant_id)
session.add(vm)
def forget_all_ports_for_network(net_id):
"""Removes all ports for a given network fron repository.
:param net_id: globally unique network ID
"""
session = db.get_writer_session()
with session.begin():
(session.query(db_models.AristaProvisionedVms).
filter_by(network_id=net_id).delete())
def update_port(vm_id, host_id, port_id, network_id, tenant_id):
"""Updates the port details in the database.
:param vm_id: globally unique identifier for VM instance
:param host_id: ID of the new host where the VM is placed
:param port_id: globally unique port ID that connects VM to network
:param network_id: globally unique neutron network identifier
:param tenant_id: globally unique neutron tenant identifier
"""
session = db.get_writer_session()
with session.begin():
port = session.query(db_models.AristaProvisionedVms).filter_by(
port_id=port_id).first()
if port:
# Update the VM's host id
port.host_id = host_id
port.vm_id = vm_id
port.network_id = network_id
port.tenant_id = tenant_id
def forget_port(port_id, host_id):
"""Deletes the port from the database
:param port_id: globally unique port ID that connects VM to network
:param host_id: host to which the port is bound to
"""
session = db.get_writer_session()
with session.begin():
session.query(db_models.AristaProvisionedVms).filter_by(
port_id=port_id,
host_id=host_id).delete()
def is_vm_provisioned(vm_id, host_id, port_id,
network_id, tenant_id):
"""Checks if a VM is already known to EOS
:returns: True, if yes; False otherwise.
:param vm_id: globally unique identifier for VM instance
:param host_id: ID of the host where the VM is placed
:param port_id: globally unique port ID that connects VM to network
:param network_id: globally unique neutron network identifier
:param tenant_id: globally unique neutron tenant identifier
"""
session = db.get_reader_session()
with session.begin():
num_vm = (session.query(db_models.AristaProvisionedVms).
filter_by(tenant_id=tenant_id,
vm_id=vm_id,
port_id=port_id,
network_id=network_id,
host_id=host_id).count())
return num_vm > 0
def is_port_provisioned(port_id, host_id=None):
"""Checks if a port is already known to EOS
:returns: True, if yes; False otherwise.
:param port_id: globally unique port ID that connects VM to network
:param host_id: host to which the port is bound to
"""
filters = {
'port_id': port_id
}
if host_id:
filters['host_id'] = host_id
def get_instance_ports(tenant_id, manage_fabric=True, managed_physnets=None):
"""Returns all instance ports for a given tenant."""
session = db.get_reader_session()
with session.begin():
num_ports = (session.query(db_models.AristaProvisionedVms).
filter_by(**filters).count())
return num_ports > 0
def num_vms_provisioned(tenant_id):
"""Returns number of VMs for a given tennat.
:param tenant_id: globally unique neutron tenant identifier
"""
session = db.get_reader_session()
with session.begin():
return (session.query(db_models.AristaProvisionedVms).
filter_by(tenant_id=tenant_id).count())
def get_vms(tenant_id):
"""Returns all VMs for a given tenant in EOS-compatible format.
:param tenant_id: globally unique neutron tenant identifier
"""
session = db.get_reader_session()
with session.begin():
model = db_models.AristaProvisionedVms
# hack for pep8 E711: comparison to None should be
# 'if cond is not None'
none = None
all_ports = (session.query(model).
filter(model.tenant_id == tenant_id,
model.host_id != none,
model.vm_id != none,
model.network_id != none,
model.port_id != none))
port_model = models_v2.Port
binding_level_model = ml2_models.PortBindingLevel
segment_model = segment_models.NetworkSegment
all_ports = (session
.query(port_model, binding_level_model, segment_model)
.join(binding_level_model)
.join(segment_model)
.filter(port_model.tenant_id == tenant_id,
binding_level_model.host != none,
port_model.device_id != none,
port_model.network_id != none))
if not manage_fabric:
all_ports = all_ports.filter(
segment_model.physical_network != none)
if managed_physnets is not None:
managed_physnets.append(None)
all_ports = all_ports.filter(segment_model.physical_network.in_(
managed_physnets))
def eos_port_representation(port):
return {u'portId': port.id,
u'deviceId': port.device_id,
u'hosts': set([bl.host for bl in port.binding_levels]),
u'networkId': port.network_id}
ports = {}
for port in all_ports:
if port.port_id not in ports:
ports[port.port_id] = port.eos_port_representation()
else:
ports[port.port_id]['hosts'].append(port.host_id)
if not utils.supported_device_owner(port.Port.device_owner):
continue
ports[port.Port.id] = eos_port_representation(port.Port)
vm_dict = dict()
def eos_vm_representation(port):
return {u'vmId': port['deviceId'],
u'baremetal_instance': False,
u'ports': [port]}
u'ports': {port['portId']: port}}
for port in ports.values():
deviceId = port['deviceId']
if deviceId in vm_dict:
vm_dict[deviceId]['ports'].append(port)
vm_dict[deviceId]['ports'][port['portId']] = port
else:
vm_dict[deviceId] = eos_vm_representation(port)
return vm_dict
def are_ports_attached_to_network(net_id):
"""Checks if a given network is used by any port, excluding dhcp port.
:param net_id: globally unique network ID
"""
session = db.get_reader_session()
with session.begin():
model = db_models.AristaProvisionedVms
return session.query(model).filter_by(network_id=net_id).filter(
~model.vm_id.startswith('dhcp')).count() > 0
def get_ports(tenant_id=None):
"""Returns all ports of VMs in EOS-compatible format.
:param tenant_id: globally unique neutron tenant identifier
"""
def get_instances(tenant):
"""Returns set of all instance ids that may be relevant on CVX."""
session = db.get_reader_session()
with session.begin():
model = db_models.AristaProvisionedVms
# hack for pep8 E711: comparison to None should be
# 'if cond is not None'
none = None
if tenant_id:
all_ports = (session.query(model).
filter(model.tenant_id == tenant_id,
model.host_id != none,
model.vm_id != none,
model.network_id != none,
model.port_id != none))
else:
all_ports = (session.query(model).
filter(model.tenant_id != none,
model.host_id != none,
model.vm_id != none,
model.network_id != none,
model.port_id != none))
ports = {}
for port in all_ports:
if port.port_id not in ports:
ports[port.port_id] = port.eos_port_representation()
ports[port.port_id]['hosts'].append(port.host_id)
return ports
def get_all_anet_nets():
"""Return the set of networks in the arista_provisioned_vms table."""
session = db.get_reader_session()
with session.begin():
model = db_models.AristaProvisionedVms
return set(nid[0] for nid in
session.query(model.network_id).distinct())
port_model = models_v2.Port
return set(device_id[0] for device_id in
session.query(port_model.device_id).
filter(port_model.tenant_id == tenant).distinct())
def tenant_provisioned(tid):

35
networking_arista/common/utils.py

@ -0,0 +1,35 @@
# Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib import constants as n_const
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def supported_device_owner(device_owner):
supported_device_owner = [n_const.DEVICE_OWNER_DHCP,
n_const.DEVICE_OWNER_DVR_INTERFACE]
if any([device_owner in supported_device_owner,
device_owner.startswith('compute') and
device_owner != 'compute:probe',
device_owner.startswith('baremetal'),
device_owner.startswith('trunk')]):
return True
LOG.debug('Unsupported device owner: %s', device_owner)
return False

2
networking_arista/db/migration/alembic_migrations/versions/CONTRACT_HEAD

@ -1 +1 @@
39c2eeb67116
941bad5630c1

34
networking_arista/db/migration/alembic_migrations/versions/queens/contract/941bad5630c1_drop_aristaprovisionedvms.py

@ -0,0 +1,34 @@
# Copyright 2017 Arista Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Drop AristaProvisionedVms
Revision ID: 941bad5630c1
Revises: 39c2eeb67116
Create Date: 2017-08-31 17:44:21.334780
"""
# revision identifiers, used by Alembic.
revision = '941bad5630c1'
down_revision = '39c2eeb67116'
branch_labels = None
depends_on = None
from alembic import op
def upgrade():
op.drop_table('arista_provisioned_vms')

37
networking_arista/ml2/arista_sync.py

@ -29,11 +29,12 @@ LOG = logging.getLogger(__name__)
class AristaSyncWorker(worker.BaseWorker):
def __init__(self, rpc, ndb):
def __init__(self, rpc, ndb, manage_fabric, managed_physnets):
super(AristaSyncWorker, self).__init__(worker_process_count=0)
self.ndb = ndb
self.rpc = rpc
self.sync_service = SyncService(rpc, ndb)
self.sync_service = SyncService(rpc, ndb, manage_fabric,
managed_physnets)
rpc.sync_service = self.sync_service
self._loop = None
@ -43,7 +44,6 @@ class AristaSyncWorker(worker.BaseWorker):
self._sync_running = True
self._sync_event = threading.Event()
self._cleanup_db()
# Registering with EOS updates self.rpc.region_updated_time. Clear it
# to force an initial sync
self.rpc.clear_region_updated_time()
@ -67,19 +67,6 @@ class AristaSyncWorker(worker.BaseWorker):
self.wait()
self.start()
def _cleanup_db(self):
"""Clean up any unnecessary entries in our DB."""
LOG.info('Arista Sync: DB Cleanup')
neutron_nets = self.ndb.get_all_networks()
arista_db_nets = db_lib.get_all_anet_nets()
neutron_net_ids = set([net['id'] for net in neutron_nets])
# Remove networks from the Arista DB if the network does not exist in
# Neutron DB
for net_id in arista_db_nets.difference(neutron_net_ids):
db_lib.forget_all_ports_for_network(net_id)
class SyncService(object):
"""Synchronization of information between Neutron and EOS
@ -88,11 +75,14 @@ class SyncService(object):
ensures that Networks and VMs configured on EOS/Arista HW
are always in sync with Neutron DB.
"""
def __init__(self, rpc_wrapper, neutron_db):
def __init__(self, rpc_wrapper, neutron_db, manage_fabric=True,
managed_physnets=None):
self._rpc = rpc_wrapper
self._ndb = neutron_db
self._force_sync = True
self._region_updated_time = None
self._manage_fabric = manage_fabric
self._managed_physnets = managed_physnets
def force_sync(self):
"""Sets the force_sync flag."""
@ -178,14 +168,14 @@ class SyncService(object):
for tenant in db_tenants:
db_nets = {n['id']: n
for n in self._ndb.get_all_networks_for_tenant(tenant)}
db_instances = db_lib.get_vms(tenant)
db_instances = db_lib.get_instances(tenant)
eos_nets = self._get_eos_networks(eos_tenants, tenant)
eos_vms, eos_bms, eos_routers = self._get_eos_vms(eos_tenants,
tenant)
db_nets_key_set = frozenset(db_nets.keys())
db_instances_key_set = frozenset(db_instances.keys())
db_instances_key_set = frozenset(db_instances)
eos_nets_key_set = frozenset(eos_nets.keys())
eos_vms_key_set = frozenset(eos_vms.keys())
eos_routers_key_set = frozenset(eos_routers.keys())
@ -251,7 +241,7 @@ class SyncService(object):
LOG.warning(constants.EOS_UNREACHABLE_MSG)
self._force_sync = True
# Now update the VMs
# Now update the instances
for tenant in instances_to_update:
if not instances_to_update[tenant]:
continue
@ -264,11 +254,12 @@ class SyncService(object):
self._port_dict_representation(port))
if ports_of_interest:
db_vms = db_lib.get_vms(tenant)
if db_vms:
instance_ports = db_lib.get_instance_ports(
tenant, self._manage_fabric, self._managed_physnets)
if instance_ports:
self._rpc.create_instance_bulk(tenant,
ports_of_interest,
db_vms,
instance_ports,
port_profiles,
sync=True)
except arista_exc.AristaRpcError:

204
networking_arista/ml2/mechanism_arista.py

@ -29,9 +29,9 @@ from neutron.services.trunk import constants as trunk_consts
from networking_arista._i18n import _, _LI, _LE
from networking_arista.common import constants
from networking_arista.common import db
from networking_arista.common import db_lib
from networking_arista.common import exceptions as arista_exc
from networking_arista.common import utils
from networking_arista.ml2 import arista_sync
from networking_arista.ml2.rpc.arista_eapi import AristaRPCWrapperEapi
from networking_arista.ml2.rpc.arista_json import AristaRPCWrapperJSON
@ -58,7 +58,6 @@ class AristaDriver(driver_api.MechanismDriver):
def __init__(self, rpc=None):
self.ndb = db_lib.NeutronNets()
self.db_vms = db.AristaProvisionedVms()
confg = cfg.CONF.ml2_arista
self.segmentation_type = db_lib.VLAN_SEGMENTATION
@ -88,7 +87,9 @@ class AristaDriver(driver_api.MechanismDriver):
trunk_consts.SUBPORTS, events.AFTER_DELETE)
def get_workers(self):
return [arista_sync.AristaSyncWorker(self.rpc, self.ndb)]
return [arista_sync.AristaSyncWorker(self.rpc, self.ndb,
self.manage_fabric,
self.managed_physnets)]
def create_network_postcommit(self, context):
"""Provision the network on the Arista Hardware."""
@ -153,16 +154,6 @@ class AristaDriver(driver_api.MechanismDriver):
'Reason: %(err)s'),
{'name': network_name, 'err': err})
def delete_network_precommit(self, context):
"""Delete the network information from the DB."""
network = context.current
network_id = network['id']
with self.eos_sync_lock:
if db_lib.are_ports_attached_to_network(network_id):
db_lib.forget_all_ports_for_network(network_id)
LOG.info(_LI('Deleting all ports on network %s'),
network_id)
def delete_network_postcommit(self, context):
"""Send network delete request to Arista HW."""
network = context.current
@ -182,20 +173,6 @@ class AristaDriver(driver_api.MechanismDriver):
'network %(network_id)s. Reason: %(err)s'),
{'network_id': network_id, 'err': err})
def create_port_precommit(self, context):
"""Remember the information about a VM and its ports
A VM information, along with the physical host information
is saved.
"""
# Returning from here, since the update_port_precommit is performing
# same operation, and also need of port binding information to decide
# whether to react to a port create event which is not available when
# this method is called.
return
def _get_physnet_from_link_info(self, port, physnet_info):
binding_profile = port.get(portbindings.PROFILE)
@ -304,33 +281,6 @@ class AristaDriver(driver_api.MechanismDriver):
# The network_type is vlan, try binding process for baremetal.
self._bind_port_to_baremetal(context, segment)
def create_port_postcommit(self, context):
"""Plug a physical host into a network.
Send provisioning request to Arista Hardware to plug a host
into appropriate network.
"""
# Returning from here, since the update_port_postcommit is performing
# same operation, and also need of port binding information to decide
# whether to react to a port create event which is not available when
# this method is called.
return
def _supported_device_owner(self, device_owner):
supported_device_owner = [n_const.DEVICE_OWNER_DHCP,
n_const.DEVICE_OWNER_DVR_INTERFACE]
if any([device_owner in supported_device_owner,
device_owner.startswith('compute') and
device_owner != 'compute:probe',
device_owner.startswith('baremetal'),
device_owner.startswith('trunk')]):
return True
LOG.debug('Unsupported device owner: %s', device_owner)
def _network_owner_tenant(self, context, network_id, tenant_id):
tid = tenant_id
if network_id and tenant_id:
@ -372,34 +322,6 @@ class AristaDriver(driver_api.MechanismDriver):
bound_segments.append(bound_segment)
return bound_segments
def _handle_port_migration_precommit(self, context):
"""Handles port migration in precommit
It updates the port's new host in the DB
"""
orig_port = context.original
orig_host = context.original_host
new_host = context.host
new_port = context.current
port_id = orig_port['id']
if new_host and orig_host and new_host != orig_host:
LOG.debug("Handling port migration for: %s " % orig_port)
network_id = orig_port['network_id']
tenant_id = orig_port['tenant_id'] or constants.INTERNAL_TENANT_ID
# Ensure that we use tenant Id for the network owner
tenant_id = self._network_owner_tenant(context, network_id,
tenant_id)
device_id = new_port['device_id']
with self.eos_sync_lock:
port_provisioned = db_lib.is_port_provisioned(port_id,
orig_host)
if port_provisioned:
db_lib.update_port(device_id, new_host, port_id,
network_id, tenant_id)
return True
def _handle_port_migration_postcommit(self, context):
"""Handles port migration in postcommit
@ -447,95 +369,6 @@ class AristaDriver(driver_api.MechanismDriver):
return True
def update_port_precommit(self, context):
"""Update the name of a given port.
At the moment we only support port name change.
Any other change to port is not supported at this time.
We do not store the port names, therefore, no DB store
action is performed here.
"""
new_port = context.current
orig_port = context.original
if new_port['name'] != orig_port['name']:
LOG.info(_LI('Port name changed to %s'), new_port['name'])
device_id = new_port['device_id']
host = context.host
pretty_log("update_port_precommit: new", new_port)
pretty_log("update_port_precommit: orig", orig_port)
if not self._supported_device_owner(new_port['device_owner']):
return
# Check if it is port migration case
if self._handle_port_migration_precommit(context):
return
# Check if the port is part of managed physical network
seg_info = self._bound_segments(context)
if not seg_info:
# Ignoring the update as the port is not managed by
# arista mechanism driver.
return
# device_id and device_owner are set on VM boot
port_id = new_port['id']
network_id = new_port['network_id']
tenant_id = new_port['tenant_id'] or constants.INTERNAL_TENANT_ID
# Ensure that we use tenant Id for the network owner
tenant_id = self._network_owner_tenant(context, network_id, tenant_id)
with self.eos_sync_lock:
port_down = False
if(new_port['device_owner'] ==
n_const.DEVICE_OWNER_DVR_INTERFACE):
# We care about port status only for DVR ports because
# for DVR, a single port exists on multiple hosts. If a port
# is no longer needed on a host then the driver gets a
# port_update notification for that <port, host> with the
# port status as PORT_STATUS_DOWN.
port_down = context.status == n_const.PORT_STATUS_DOWN
if host and not port_down:
port_host_filter = None
if(new_port['device_owner'] ==
n_const.DEVICE_OWNER_DVR_INTERFACE):
# <port, host> uniquely identifies a DVR port. Other
# ports are identified by just the port id
port_host_filter = host
port_provisioned = db_lib.is_port_provisioned(
port_id, port_host_filter)
if not port_provisioned:
LOG.info("Remembering the port")
# Create a new port in the DB
db_lib.remember_vm(device_id, host, port_id,
network_id, tenant_id)
else:
if(new_port['device_id'] != orig_port['device_id'] or
context.host != context.original_host or
new_port['network_id'] != orig_port['network_id'] or
new_port['tenant_id'] != orig_port['tenant_id']):
LOG.info("Updating the port")
# Port exists in the DB. Update it
db_lib.update_port(device_id, host, port_id,
network_id, tenant_id)
else: # Unbound or down port does not concern us
orig_host = context.original_host
LOG.info("Forgetting the port on %s" % str(orig_host))
db_lib.forget_port(port_id, orig_host)
def _port_updated(self, context):
"""Returns true if any port parameters have changed."""
new_port = context.current
orig_port = context.original
return (new_port['device_id'] != orig_port['device_id'] or
context.host != context.original_host or
new_port['network_id'] != orig_port['network_id'] or
new_port['tenant_id'] != orig_port['tenant_id'])
def update_port_postcommit(self, context):
"""Update the name of a given port in EOS.
@ -555,7 +388,7 @@ class AristaDriver(driver_api.MechanismDriver):
if not device_owner and orig_port.get('trunk_details'):
device_owner = orig_port['device_owner']
if not self._supported_device_owner(device_owner):
if not utils.supported_device_owner(device_owner):
return
vnic_type = port['binding:vnic_type']
@ -592,16 +425,6 @@ class AristaDriver(driver_api.MechanismDriver):
with self.eos_sync_lock:
hostname = self._host_name(host)
port_host_filter = None
if(port['device_owner'] ==
n_const.DEVICE_OWNER_DVR_INTERFACE):
# <port, host> uniquely identifies a DVR port. Other
# ports are identified by just the port id
port_host_filter = host
port_provisioned = db_lib.is_port_provisioned(port_id,
port_host_filter)
try:
orig_host = context.original_host
port_down = False
@ -618,8 +441,7 @@ class AristaDriver(driver_api.MechanismDriver):
# connected to the port was deleted or its in DOWN
# state. So delete the old port on the old host.
self._delete_port(orig_port, orig_host, tenant_id)
if(port_provisioned and hostname and
is_vm_boot and not port_down and
if(hostname and is_vm_boot and not port_down and
device_id != n_const.DEVICE_ID_RESERVED_DHCP_PORT):
segments = seg_info
all_segments = self.ndb.get_all_network_segments(
@ -655,18 +477,6 @@ class AristaDriver(driver_api.MechanismDriver):
'port %(port_id)s. Reason: %(err)s'),
{'port_id': port_id, 'err': err})
def delete_port_precommit(self, context):
"""Delete information about a VM and host from the DB."""
port = context.current
pretty_log("delete_port_precommit:", port)
port_id = port['id']
host_id = context.host
with self.eos_sync_lock:
if db_lib.is_port_provisioned(port_id, host_id):
db_lib.forget_port(port_id, host_id)
def delete_port_postcommit(self, context):
"""Unplug a physical host from a network.
@ -711,7 +521,7 @@ class AristaDriver(driver_api.MechanismDriver):
network_id = port['network_id']
device_owner = port['device_owner']
if not self._supported_device_owner(device_owner):
if not utils.supported_device_owner(device_owner):
return
vnic_type = port['binding:vnic_type']

5
networking_arista/ml2/rpc/arista_json.py

@ -415,8 +415,7 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
portBindings = {}
for vm in vms.values():
for v_port in vm['ports']:
port_id = v_port['portId']
for port_id, v_port in six.iteritems(vm['ports']):
if not v_port['hosts']:
# Skip all the ports that have no host associsted with them
continue
@ -426,7 +425,7 @@ class AristaRPCWrapperJSON(AristaRPCWrapperBase):
neutron_port = neutron_ports[port_id]
inst_id = vm['vmId']
inst_host = vm['ports'][0]['hosts'][0]
inst_host = v_port['hosts'][0]
instance = self._create_instance_data(inst_id, inst_host)
device_owner = neutron_port['device_owner']

316
networking_arista/tests/unit/common/test_db_lib.py

@ -132,15 +132,6 @@ class DbLibTest(testlib_api.SqlTestCase):
self.assertEqual(tenants, set([tenant_1_id, tenant_2_id,
tenant_3_id, tenant_4_id]))
def test_get_all_anet_nets(self):
net_1_id = 'n1'
net_2_id = 'n2'
db_lib.remember_vm('vm1', 'h1', 'p1', net_1_id, 't1')
db_lib.remember_vm('vm2', 'h2', 'p2', net_2_id, 't2')
db_lib.remember_vm('vm3', 'h3', 'p3', net_2_id, 't3')
self.assertEqual(db_lib.get_all_anet_nets(),
set([net_1_id, net_2_id]))
def test_tenant_provisioned(self):
tenant_1_id = 't1'
port_1_id = 'p1'
@ -165,3 +156,310 @@ class DbLibTest(testlib_api.SqlTestCase):
utils.delete_network(n_ctx, network_id)
self.assertFalse(db_lib.tenant_provisioned(tenant_1_id))
self.assertFalse(db_lib.tenant_provisioned(tenant_2_id))
def test_get_instances(self):
# First check that get_instances initially returns an empty set
tenant_1_id = 't1'
self.assertEqual(db_lib.get_instances(tenant_1_id), set())
# Create two ports for two instances and check that both are returned
port_1_id = 'p1'
network_1_id = 'n1'
device_1_id = 'vm1'
port_2_id = 'p2'
network_2_id = 'n2'
device_2_id = 'vm2'
n1_ctx = utils.create_network(tenant_1_id, network_1_id, 11,
shared=True)
p1_ctx = utils.create_port(tenant_1_id, network_1_id, device_1_id,
port_1_id, n1_ctx)
n2_ctx = utils.create_network(tenant_1_id, network_2_id, 21)
p2_ctx = utils.create_port(tenant_1_id, network_2_id, device_2_id,
port_2_id, n2_ctx)
self.assertEqual(db_lib.get_instances(tenant_1_id),
set([device_1_id, device_2_id]))
# Add another port on an existing instance, instance set should not
# change
port_3_id = 'p3'
p3_ctx = utils.create_port(tenant_1_id, network_1_id, device_2_id,
port_3_id, n1_ctx)
self.assertEqual(db_lib.get_instances(tenant_1_id),
set([device_1_id, device_2_id]))
# Add ports under another tenant, the first tenants instances should
# remain the same
tenant_2_id = 't2'
port_4_id = 'p4'
device_3_id = 'vm3'
p4_ctx = utils.create_port(tenant_2_id, network_1_id, device_3_id,
port_4_id, n1_ctx)
self.assertEqual(db_lib.get_instances(tenant_1_id),
set([device_1_id, device_2_id]))
self.assertEqual(db_lib.get_instances(tenant_2_id),
set([device_3_id]))
# Delete all ports and check that an empty set is once again returned
utils.delete_port(p1_ctx, port_1_id)
utils.delete_port(p2_ctx, port_2_id)
utils.delete_port(p3_ctx, port_3_id)
utils.delete_port(p4_ctx, port_4_id)
self.assertEqual(db_lib.get_instances(tenant_1_id), set())
self.assertEqual(db_lib.get_instances(tenant_2_id), set())
def test_get_instance_ports(self):
# Create 3 ports on two VMs, validate the dict returned
host = 'ubuntu1'
tenant_1_id = 't1'
port_1_id = 'p1'
network_1_id = 'n1'
device_1_id = 'vm1'
port_2_id = 'p2'
network_2_id = 'n2'
device_2_id = 'vm2'
port_3_id = 'p3'
n1_ctx = utils.create_network(tenant_1_id, network_1_id, 11,
shared=True)
n2_ctx = utils.create_network(tenant_1_id, network_2_id, 21)
p1_ctx = utils.create_port(tenant_1_id, network_1_id, device_1_id,
port_1_id, n1_ctx)
p2_ctx = utils.create_port(tenant_1_id, network_2_id, device_2_id,
port_2_id, n2_ctx)
p3_ctx = utils.create_port(tenant_1_id, network_1_id, device_2_id,
port_3_id, n1_ctx)
instance_ports = db_lib.get_instance_ports(tenant_1_id)
expected_instance_ports = {
device_1_id: {'vmId': device_1_id,
'baremetal_instance': False,
'ports': {port_1_id: {'portId': port_1_id,
'deviceId': device_1_id,
'hosts': set([host]),
'networkId': network_1_id}}},
device_2_id: {'vmId': device_2_id,
'baremetal_instance': False,
'ports': {port_2_id: {'portId': port_2_id,
'deviceId': device_2_id,
'hosts': set([host]),
'networkId': network_2_id},
port_3_id: {'portId': port_3_id,
'deviceId': device_2_id,
'hosts': set([host]),
'networkId': network_1_id}}}}
self.assertEqual(instance_ports, expected_instance_ports)
# Add ports under another tenant, the first tenant's instances should
# remain the same
tenant_2_id = 't2'
port_4_id = 'p4'
device_3_id = 'vm3'
p4_ctx = utils.create_port(tenant_2_id, network_1_id, device_3_id,
port_4_id, n1_ctx)
instance_ports = db_lib.get_instance_ports(tenant_1_id)
expected_instance_ports = {
device_1_id: {'vmId': device_1_id,
'baremetal_instance': False,
'ports': {port_1_id: {'portId': port_1_id,
'deviceId': device_1_id,
'hosts': set([host]),
'networkId': network_1_id}}},
device_2_id: {'vmId': device_2_id,
'baremetal_instance': False,
'ports': {port_2_id: {'portId': port_2_id,
'deviceId': device_2_id,
'hosts': set([host]),
'networkId': network_2_id},
port_3_id: {'portId': port_3_id,
'deviceId': device_2_id,
'hosts': set([host]),
'networkId': network_1_id}}}}
self.assertEqual(instance_ports, expected_instance_ports)
instance_ports = db_lib.get_instance_ports(tenant_2_id)
expected_instance_ports = {
device_3_id: {'vmId': device_3_id,
'baremetal_instance': False,
'ports': {port_4_id: {'portId': port_4_id,
'deviceId': device_3_id,
'hosts': set([host]),
'networkId': network_1_id}}}}
self.assertEqual(instance_ports, expected_instance_ports)
# Delete all ports and check that an empty set is once again returned
utils.delete_port(p1_ctx, port_1_id)
utils.delete_port(p2_ctx, port_2_id)
utils.delete_port(p3_ctx, port_3_id)
utils.delete_port(p4_ctx, port_4_id)
self.assertEqual(db_lib.get_instance_ports(tenant_1_id), dict())
self.assertEqual(db_lib.get_instance_ports(tenant_2_id), dict())
def test_get_instance_ports_device_owner(self):
# Create a port with an unsupported device owner, check that no ports
# are returned
tenant_id = 'tid'
network_id = 'nid'
device_id = 'vm'
port_id = 'pid'
n_ctx = utils.create_network(tenant_id, network_id, 11)
utils.create_port(tenant_id, network_id, device_id,
port_id, n_ctx, device_owner='compute:probe')
self.assertEqual(db_lib.get_instance_ports(tenant_id), dict())
def test_get_instance_ports_dvr(self):
# Create a port bound to 3 hosts, ensure that all 3 hosts are in
# the dict returned
tenant_id = 'tid'
network_id = 'nid'
device_id = 'rtr'
port_id = 'pid'
host_1 = 'h1'
host_2 = 'h2'
host_3 = 'h3'
n_ctx = utils.create_network(tenant_id, network_id, 11)
p_ctx = utils.create_port(tenant_id, network_id, device_id,
port_id, n_ctx, host=host_1)
utils.bind_port_to_host(port_id, host_2, n_ctx)
utils.bind_port_to_host(port_id, host_3, n_ctx)
instance_ports = db_lib.get_instance_ports(tenant_id)
expected_instance_ports = {
device_id: {'vmId': device_id,
'baremetal_instance': False,
'ports': {port_id: {'portId': port_id,
'deviceId': device_id,
'hosts': set([host_1, host_2,
host_3]),
'networkId': network_id}}}}
self.assertEqual(instance_ports, expected_instance_ports)
# Unbind a host from the port, check that the host is not returned
utils.unbind_port_from_host(port_id, host_1)
instance_ports = db_lib.get_instance_ports(tenant_id)
expected_instance_ports = {
device_id: {'vmId': device_id,
'baremetal_instance': False,
'ports': {port_id: {'portId': port_id,
'deviceId': device_id,
'hosts': set([host_2, host_3]),
'networkId': network_id}}}}
self.assertEqual(instance_ports, expected_instance_ports)
# Delete the port, check that an empty dict is returned
utils.delete_port(p_ctx, port_id)
self.assertEqual(db_lib.get_instance_ports(tenant_id), dict())
def test_get_instance_ports_hpb(self):
# Create network with multiple segments, bind a port to the network
# and validate the dictionary
host = 'ubuntu1'
tenant_id = 'tid'
network_id = 'nid'
device_id = 'vm'
port_id = 'pid'
n_ctx = utils.create_network(tenant_id, network_id, 10001,
network_type='vxlan',
physical_network=None)
dyn_seg = utils.create_dynamic_segment(network_id, 11, 'vlan',
'default')
p_ctx = utils.create_port(tenant_id, network_id, device_id,
port_id, n_ctx, dynamic_segment=dyn_seg)
instance_ports = db_lib.get_instance_ports(tenant_id)
expected_instance_ports = {
device_id: {'vmId': device_id,
'baremetal_instance': False,
'ports': {port_id: {'portId': port_id,
'deviceId': device_id,
'hosts': set([host]),
'networkId': network_id}}}}
self.assertEqual(instance_ports, expected_instance_ports)
# Delete the port, check that an empty dict is returned
utils.delete_port(p_ctx, port_id)
self.assertEqual(db_lib.get_instance_ports(tenant_id), dict())
def test_get_instance_ports_manage_fabric(self):
# Create a network with only a fabric segment, check that no ports
# are returned
host = 'ubuntu1'
tenant_id = 'tid'
network_id = 'nid'
device_id = 'vm'
port_id = 'pid'
n_ctx = utils.create_network(tenant_id, network_id, 10001,
network_type='vxlan',
physical_network=None)
p_ctx = utils.create_port(tenant_id, network_id, device_id,
port_id, n_ctx)
instance_ports = db_lib.get_instance_ports(tenant_id,
manage_fabric=False)
self.assertEqual(instance_ports, dict())
# Add a VLAN segment, check that the port is now returned
utils.delete_port(p_ctx, port_id)
dyn_seg = utils.create_dynamic_segment(network_id, 11, 'vlan',
'default')
p_ctx = utils.create_port(tenant_id, network_id, device_id,
port_id, n_ctx, dynamic_segment=dyn_seg)
instance_ports = db_lib.get_instance_ports(tenant_id,
manage_fabric=False)
expected_instance_ports = {
device_id: {'vmId': device_id,
'baremetal_instance': False,
'ports': {port_id: {'portId': port_id,
'deviceId': device_id,
'hosts': set([host]),
'networkId': network_id}}}}
self.assertEqual(instance_ports, expected_instance_ports)
# Delete the port, check that an empty dict is returned
utils.delete_port(p_ctx, port_id)
self.assertEqual(db_lib.get_instance_ports(tenant_id,
manage_fabric=False),
dict())
def test_get_instance_ports_managed_physnets(self):
# Bind a port to an unmananaged physnet, check that no ports are
# returned
physnet_1 = 'physnet1'
physnet_2 = 'physnet2'
managed_physnets = [physnet_2]
tenant_id = 'tid'
network_id = 'nid'
host_1 = 'host1'
device_1_id = 'vm1'
port_1_id = 'p1'
n_ctx = utils.create_network(tenant_id, network_id, 10001,
network_type='vxlan',
physical_network=None)
dyn_seg_1 = utils.create_dynamic_segment(network_id, 11, 'vlan',
physnet_1)
utils.create_port(tenant_id, network_id, device_1_id,
port_1_id, n_ctx, host=host_1,
dynamic_segment=dyn_seg_1)
instance_ports = db_lib.get_instance_ports(
tenant_id, manage_fabric=False, managed_physnets=managed_physnets)
self.assertEqual(instance_ports, dict())
# Bind a port to a managed physnet on the same network, check that
# only the managed host is returned
host_2 = 'host2'
device_2_id = 'vm2'
port_2_id = 'p2'
dyn_seg_2 = utils.create_dynamic_segment(network_id, 21, 'vlan',
physnet_2)
p2_ctx = utils.create_port(tenant_id, network_id, device_2_id,
port_2_id, n_ctx, host=host_2,
dynamic_segment=dyn_seg_2)
instance_ports = db_lib.get_instance_ports(
tenant_id, manage_fabric=True, managed_physnets=managed_physnets)
expected_instance_ports = {
device_2_id: {'vmId': device_2_id,
'baremetal_instance': False,
'ports': {port_2_id: {'portId': port_2_id,
'deviceId': device_2_id,
'hosts': set([host_2]),
'networkId': network_id}}}}
self.assertEqual(instance_ports, expected_instance_ports)
# Delete the port, check that an empty dict is returned
utils.delete_port(p2_ctx, port_2_id)
self.assertEqual(db_lib.get_instance_ports(
tenant_id, manage_fabric=False,
managed_physnets=managed_physnets), dict())

4
networking_arista/tests/unit/ml2/rpc/test_arista_json_rpc_wrapper.py

@ -273,7 +273,7 @@ class TestAristaJSONRPCWrapper(testlib_api.SqlTestCase):
dev_id = 'dev-id-%d' % device_id
devices[dev_id] = {'vmId': dev_id,
'baremetal_instance': False,
'ports': []
'ports': {}
}
for port_id in range(0, num_ports_per_device):
port_id = 'port-id-%d-%d' % (device_id, port_id)
@ -282,7 +282,7 @@ class TestAristaJSONRPCWrapper(testlib_api.SqlTestCase):
'hosts': ['host_%d' % (device_count)],
'portId': port_id
}
devices[dev_id]['ports'].append(port)
devices[dev_id]['ports'][port_id] = port
device_count += 1
device_owners = [n_const.DEVICE_OWNER_DHCP,

177
networking_arista/tests/unit/ml2/test_arista_mechanism_driver.py

@ -1,177 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from neutron.tests.unit import testlib_api
from networking_arista.common import db_lib
from networking_arista.ml2 import mechanism_arista
from networking_arista.tests.unit import utils
def setup_valid_config():
utils.setup_arista_wrapper_config(cfg)
class AristaProvisionedVlansStorageTestCase(testlib_api.SqlTestCase):
"""Test storing and retriving functionality of Arista mechanism driver.
Tests all methods of this class by invoking them separately as well
as a group.
"""
def test_vm_is_remembered(self):
vm_id = 'VM-1'
tenant_id = 'test'
network_id = '123'
port_id = 456
host_id = 'ubuntu1'
db_lib.remember_vm(vm_id, host_id, port_id, network_id, tenant_id)
vm_provisioned = db_lib.is_vm_provisioned(vm_id, host_id, port_id,
network_id, tenant_id)
self.assertTrue(vm_provisioned, 'VM must be provisioned')
def test_vm_is_removed(self):
vm_id = 'VM-1'
tenant_id = 'test'
network_id = '123'
port_id = 456
host_id = 'ubuntu1'
db_lib.remember_vm(vm_id, host_id, port_id, network_id, tenant_id)
db_lib.forget_port(port_id, host_id)
vm_provisioned = db_lib.is_vm_provisioned(vm_id, host_id, port_id,
network_id, tenant_id)
self.assertFalse(vm_provisioned, 'The vm should be deleted')
def test_num_vm_is_valid(self):
tenant_id = 'test'
network_id = '123'
port_id_base = 'port-id'
host_id = 'ubuntu1'
vm_to_remember = ['vm1', 'vm2', 'vm3']
vm_to_forget = ['vm2', 'vm1']
for vm in vm_to_remember:
port_id = port_id_base + vm
db_lib.remember_vm(vm, host_id, port_id, network_id, tenant_id)
for vm in vm_to_forget:
port_id = port_id_base + vm
db_lib.forget_port(port_id, host_id)
num_vms = len(db_lib.get_vms(tenant_id))
expected = len(vm_to_remember) - len(vm_to_forget)
self.assertEqual(expected, num_vms,
'There should be %d records, '
'got %d records' % (expected, num_vms))
# clean up afterwards
db_lib.forget_port(port_id, host_id)
class RealNetStorageAristaDriverTestCase(testlib_api.SqlTestCase):
"""Main test cases for Arista Mechanism driver.
Tests all mechanism driver APIs supported by Arista Driver. It invokes
all the APIs as they would be invoked in real world scenarios and
verifies the functionality.
"""
def setUp(self):
super(RealNetStorageAristaDriverTestCase, self).setUp()
setup_valid_config()
self.fake_rpc = mock.MagicMock()
self.drv = mechanism_arista.AristaDriver(self.fake_rpc)
def tearDown(self):
super(RealNetStorageAristaDriverTestCase, self).tearDown()
def test_create_and_delete_ports(self):
tenant_id = 'ten-1'
network_id = 'net1-id'
segmentation_id = 1001
vms = ['vm1', 'vm2', 'vm3']
network_context = utils.get_network_context(tenant_id,
network_id,
segmentation_id)
self.drv.create_network_precommit(network_context)
for vm_id in vms:
port_id = '%s_%s' % (vm_id, 101)
port_context = utils.get_port_context(tenant_id,
network_id,
vm_id,
network_context,
port_id=port_id)
self.drv.update_port_precommit(port_context)
vm_list = db_lib.get_vms(tenant_id)
provisioned_vms = len(vm_list)
expected_vms = len(vms)
self.assertEqual(expected_vms, provisioned_vms,
'There should be %d '
'hosts, not %d' % (expected_vms, provisioned_vms))
# Now test the delete ports
for vm_id in vms:
port_id = '%s_%s' % (vm_id, 101)
port_context = utils.get_port_context(tenant_id,
network_id,
vm_id,
network_context,
port_id=port_id)
self.drv.delete_port_precommit(port_context)