Fixs shared networks in Arista ML2 driver

When a shared network is created, it is stored
in arista_provisioned_nets under the tenant_id of
owner of the network. Later, when a different
tenant launches an instance on the shared network,
above mentioned DB is looked to find the network
under requesting tenant's ID, and it is not found,
and hence, the request is rejected.

The fix is made such that when a network is not
found under the requesting tenant's ID, check if
this network is shared network. If yes, then
check the DB to ensure that owner tenant exists
in the DB. If yes, then this is a valid request
and do not reject it. Otherwise reject it - as an
unknown tenant or network is specified in the
request.

Change-Id: I8f25d8f84af844afdb3b607bd8ade32f0f5e81fb
Closes-bug: 1409176
This commit is contained in:
Sukhdev 2015-01-09 17:02:53 -08:00
parent 75832ea45a
commit 141e7d19e2
3 changed files with 82 additions and 20 deletions

View File

@ -395,6 +395,14 @@ class NeutronNets(db_base_plugin_v2.NeutronDbPluginV2):
return super(NeutronNets,
self).get_ports(self.admin_ctx, filters=filters) or []
def get_shared_network_owner_id(self, network_id):
filters = {'id': [network_id]}
nets = self.get_networks(self.admin_ctx, filters=filters) or []
if not nets:
return
if nets[0]['shared']:
return nets[0]['tenant_id']
def _get_network(self, tenant_id, network_id):
filters = {'tenant_id': [tenant_id],
'id': [network_id]}

View File

@ -243,6 +243,10 @@ class AristaRPCWrapper(object):
network['segmentation_id'] = DEFAULT_VLAN
append_cmd('segment 1 type vlan id %d' %
network['segmentation_id'])
if network['shared']:
append_cmd('shared')
else:
append_cmd('no shared')
cmds.extend(self._get_exit_mode_cmds(['segment', 'network', 'tenant']))
self._run_openstack_cmds(cmds)
@ -635,6 +639,11 @@ class SyncService(object):
# operations fail, then force_sync is set to true
self._force_sync = False
# To support shared networks, split the sync loop in two parts:
# In first loop, delete unwanted VM and networks and update networks
# In second loop, update VMs. This is done to ensure that networks for
# all tenats are updated before VMs are updated
vms_to_update = {}
for tenant in db_tenants:
db_nets = db.get_networks(tenant)
db_vms = db.get_vms(tenant)
@ -656,7 +665,7 @@ class SyncService(object):
nets_to_update = db_nets_key_set.difference(eos_nets_key_set)
# Find the VMs that are present in Neutron DB, but not on EOS
vms_to_update = db_vms_key_set.difference(eos_vms_key_set)
vms_to_update[tenant] = db_vms_key_set.difference(eos_vms_key_set)
try:
if vms_to_delete:
@ -675,18 +684,29 @@ class SyncService(object):
'segmentation_id':
db_nets[net_id]['segmentationTypeId'],
'network_name':
neutron_nets.get(net_id, {'name': ''})['name'], }
neutron_nets.get(net_id, {'name': ''})['name'],
'shared':
neutron_nets.get(net_id,
{'shared': False})['shared']}
for net_id in nets_to_update
]
self._rpc.create_network_bulk(tenant, networks)
if vms_to_update:
# Filter the ports to only the vms that we are interested
# in.
vm_ports = [
port for port in self._ndb.get_all_ports_for_tenant(
tenant) if port['device_id'] in vms_to_update
]
self._rpc.create_vm_port_bulk(tenant, vm_ports, db_vms)
except arista_exc.AristaRpcError:
LOG.warning(EOS_UNREACHABLE_MSG)
self._force_sync = True
# Now update the VMs
for tenant in vms_to_update:
try:
# Filter the ports to only the vms that we are interested
# in.
vm_ports = [
port for port in self._ndb.get_all_ports_for_tenant(
tenant) if port['device_id'] in vms_to_update[tenant]
]
if vm_ports:
db_vms = db.get_vms(tenant)
self._rpc.create_vm_port_bulk(tenant, vm_ports, db_vms)
except arista_exc.AristaRpcError:
LOG.warning(EOS_UNREACHABLE_MSG)
self._force_sync = True
@ -758,13 +778,15 @@ class AristaDriver(driver_api.MechanismDriver):
tenant_id = network['tenant_id']
segments = context.network_segments
vlan_id = segments[0]['segmentation_id']
shared_net = network['shared']
with self.eos_sync_lock:
if db.is_network_provisioned(tenant_id, network_id):
try:
network_dict = {
'network_id': network_id,
'segmentation_id': vlan_id,
'network_name': network_name}
'network_name': network_name,
'shared': shared_net}
self.rpc.create_network(tenant_id, network_dict)
except arista_exc.AristaRpcError:
LOG.info(EOS_UNREACHABLE_MSG)
@ -793,18 +815,21 @@ class AristaDriver(driver_api.MechanismDriver):
"""
new_network = context.current
orig_network = context.original
if new_network['name'] != orig_network['name']:
if ((new_network['name'] != orig_network['name']) or
(new_network['shared'] != orig_network['shared'])):
network_id = new_network['id']
network_name = new_network['name']
tenant_id = new_network['tenant_id']
vlan_id = new_network['provider:segmentation_id']
shared_net = new_network['shared']
with self.eos_sync_lock:
if db.is_network_provisioned(tenant_id, network_id):
try:
network_dict = {
'network_id': network_id,
'segmentation_id': vlan_id,
'network_name': network_name}
'network_name': network_name,
'shared': shared_net}
self.rpc.create_network(tenant_id, network_dict)
except arista_exc.AristaRpcError:
LOG.info(EOS_UNREACHABLE_MSG)
@ -858,6 +883,7 @@ class AristaDriver(driver_api.MechanismDriver):
network_id = port['network_id']
tenant_id = port['tenant_id']
with self.eos_sync_lock:
db.remember_tenant(tenant_id)
db.remember_vm(device_id, host, port_id,
network_id, tenant_id)
@ -886,8 +912,12 @@ class AristaDriver(driver_api.MechanismDriver):
port_id,
network_id,
tenant_id)
net_provisioned = db.is_network_provisioned(tenant_id,
network_id)
# If network does not exist under this tenant,
# it may be a shared network. Get shared network owner Id
net_provisioned = (
db.is_network_provisioned(tenant_id, network_id) or
self.ndb.get_shared_network_owner_id(network_id)
)
if vm_provisioned and net_provisioned:
try:
self.rpc.plug_port_into_network(device_id,
@ -948,9 +978,13 @@ class AristaDriver(driver_api.MechanismDriver):
port_id,
network_id,
tenant_id)
net_provisioned = db.is_network_provisioned(tenant_id,
network_id,
segmentation_id)
# If network does not exist under this tenant,
# it may be a shared network. Get shared network owner Id
net_provisioned = (
db.is_network_provisioned(tenant_id, network_id,
segmentation_id) or
self.ndb.get_shared_network_owner_id(network_id)
)
if vm_provisioned and net_provisioned:
try:
self.rpc.plug_port_into_network(device_id,

View File

@ -311,12 +311,30 @@ class PositiveRPCWrapperValidConfigTestCase(base.BaseTestCase):
network = {
'network_id': 'net-id',
'network_name': 'net-name',
'segmentation_id': 123}
'segmentation_id': 123,
'shared': False}
self.drv.create_network(tenant_id, network)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'network id net-id name "net-name"',
'segment 1 type vlan id 123',
'no shared',
'exit', 'exit', 'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_create_shared_network(self):
tenant_id = 'ten-1'
network = {
'network_id': 'net-id',
'network_name': 'net-name',
'segmentation_id': 123,
'shared': True}
self.drv.create_network(tenant_id, network)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'network id net-id name "net-name"',
'segment 1 type vlan id 123',
'shared',
'exit', 'exit', 'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
@ -326,7 +344,8 @@ class PositiveRPCWrapperValidConfigTestCase(base.BaseTestCase):
networks = [{
'network_id': 'net-id-%d' % net_id,
'network_name': 'net-name-%d' % net_id,
'segmentation_id': net_id} for net_id in range(1, num_networks)
'segmentation_id': net_id,
'shared': True} for net_id in range(1, num_networks)
]
self.drv.create_network_bulk(tenant_id, networks)
@ -340,6 +359,7 @@ class PositiveRPCWrapperValidConfigTestCase(base.BaseTestCase):
cmds.append('network id net-id-%d name "net-name-%d"' %
(net_id, net_id))
cmds.append('segment 1 type vlan id %d' % net_id)
cmds.append('shared')
cmds.extend(self._get_exit_mode_cmds(['tenant', 'region', 'openstack',
'cvx', 'configure', 'enable']))