Ensure non-overlapping cidrs in subnetpools without galera
_get_allocated_cidrs[1] locks only allocated subnets in a subnetpool (with mysql/postgresql at least). It ensures we don't allocate a cidr overlapping with existent cidrs but nothing disallows a concurrent subnet allocation to create a subnet in the same subnetpool. This change replaces the lock on subnetpool subnets by a lock on the subnetpool itself. It disallows to allocate concurrently 2 subnets in the same subnetpool and ensure non-overlapping cidrs in the same subnetpool. Moreover this change solves a trouble with postgresql which disallows to lock an empty select with an outer join: it happens on first subnet allocation in a subnetpool when no specific cidr is provided. Moving the lock ensures the lock is done on a non-empty select. But this change does not ensure non-overlapping cidrs in subnetpools with galera because galera doesn't support SELECT FOR UPDATE locks. A follow-up change will (try to?) remove locks from subnet allocation[1] in order to ensure non-overlapping cidrs in subnetpools also with galera. [1] in neutron.ipam.subnet_alloc.SubnetAllocator Closes-Bug: #1451558 Partial-Bug: #1451576 Change-Id: I73854f9863f44621ae0d89c5dc4893ccc16d07e4
This commit is contained in:
parent
3425be06bf
commit
3682e3391f
|
@ -38,9 +38,20 @@ class SubnetAllocator(driver.Pool):
|
|||
super(SubnetAllocator, self).__init__(subnetpool, context)
|
||||
self._sp_helper = SubnetPoolHelper()
|
||||
|
||||
def _lock_subnetpool(self):
|
||||
"""Lock subnetpool associated row.
|
||||
|
||||
This method disallows to allocate concurrently 2 subnets in the same
|
||||
subnetpool, it's required to ensure non-overlapping cidrs in the same
|
||||
subnetpool.
|
||||
"""
|
||||
# FIXME(cbrandily): not working with Galera
|
||||
(self._context.session.query(models_v2.SubnetPool.id).
|
||||
filter_by(id=self._subnetpool['id']).
|
||||
with_lockmode('update').first())
|
||||
|
||||
def _get_allocated_cidrs(self):
|
||||
query = self._context.session.query(
|
||||
models_v2.Subnet).with_lockmode('update')
|
||||
query = self._context.session.query(models_v2.Subnet)
|
||||
subnets = query.filter_by(subnetpool_id=self._subnetpool['id'])
|
||||
return (x.cidr for x in subnets)
|
||||
|
||||
|
@ -62,8 +73,7 @@ class SubnetAllocator(driver.Pool):
|
|||
subnetpool_id = self._subnetpool['id']
|
||||
tenant_id = self._subnetpool['tenant_id']
|
||||
with self._context.session.begin(subtransactions=True):
|
||||
qry = self._context.session.query(
|
||||
models_v2.Subnet).with_lockmode('update')
|
||||
qry = self._context.session.query(models_v2.Subnet)
|
||||
allocations = qry.filter_by(subnetpool_id=subnetpool_id,
|
||||
tenant_id=tenant_id)
|
||||
value = 0
|
||||
|
@ -88,6 +98,7 @@ class SubnetAllocator(driver.Pool):
|
|||
|
||||
def _allocate_any_subnet(self, request):
|
||||
with self._context.session.begin(subtransactions=True):
|
||||
self._lock_subnetpool()
|
||||
self._check_subnetpool_tenant_quota(request.tenant_id,
|
||||
request.prefixlen)
|
||||
prefix_pool = self._get_available_prefix_list()
|
||||
|
@ -111,6 +122,7 @@ class SubnetAllocator(driver.Pool):
|
|||
|
||||
def _allocate_specific_subnet(self, request):
|
||||
with self._context.session.begin(subtransactions=True):
|
||||
self._lock_subnetpool()
|
||||
self._check_subnetpool_tenant_quota(request.tenant_id,
|
||||
request.prefixlen)
|
||||
cidr = request.subnet_cidr
|
||||
|
|
Loading…
Reference in New Issue