Add lock to pools poulation

We have seen issues that the Neutron Server was not
able to respond in time during the creation of Ports
in bulk. In order to avoid this type of failures and
improve time taken for Pods to be created, this commit
includes a lock that will only allow one population to
happen at time for a specific pool, instead of controling
all the pools population with the frequency setting.

Change-Id: I76f5d08e744b7bbc64093ab1a54084a1f97d4aa7
This commit is contained in:
Maysa Macedo
2021-12-03 09:24:45 +00:00
committed by Michał Dulko
parent f8c0b736c1
commit e2bfcaca2b
3 changed files with 105 additions and 121 deletions

View File

@@ -39,12 +39,8 @@ in a bulk request upon pool population, can be modified:
Note this value should be smaller than the ports_pool_max (if the Note this value should be smaller than the ports_pool_max (if the
ports_pool_max is enabled). ports_pool_max is enabled).
Finally, the interval between pools updating actions (in seconds) can be Finally, to define the frequency (in seconds) of ports recycle to allow them
modified, and it should be adjusted based on your specific deployment, e.g., if to be reused by future pods, configure the following option:
the port creation actions are slow, it is desirable to raise it in order not to
have overlapping actions. As a simple rule of thumbs, the frequency should be
at least as large as the time needed to perform the bulk requests (ports
creation, including subports attachment for the nested case):
.. code-block:: ini .. code-block:: ini

View File

@@ -17,6 +17,7 @@ import abc
import collections import collections
import eventlet import eventlet
import os import os
import threading
import time import time
from kuryr.lib._i18n import _ from kuryr.lib._i18n import _
@@ -105,6 +106,7 @@ VIF_TYPE_TO_DRIVER_MAPPING = {
} }
NODE_PORTS_CLEAN_FREQUENCY = 600 # seconds NODE_PORTS_CLEAN_FREQUENCY = 600 # seconds
POPULATE_POOL_TIMEOUT = 420 # seconds
class NoopVIFPool(base.VIFPoolDriver): class NoopVIFPool(base.VIFPoolDriver):
@@ -146,6 +148,8 @@ class BaseVIFPool(base.VIFPoolDriver, metaclass=abc.ABCMeta):
_last_update is a dictionary with the timestamp of the last population _last_update is a dictionary with the timestamp of the last population
action for each pool. The keys are the pool_keys and the values are the action for each pool. The keys are the pool_keys and the values are the
timestamps. timestamps.
_populate_pool_lock is a dict with the pool_key as key and a lock as value.
Also, there is a _lock to control access to _populate_pool_lock dict.
The following driver configuration options exist: The following driver configuration options exist:
- ports_pool_max: it specifies how many ports can be kept at each pool. - ports_pool_max: it specifies how many ports can be kept at each pool.
@@ -157,7 +161,7 @@ class BaseVIFPool(base.VIFPoolDriver, metaclass=abc.ABCMeta):
- ports_pool_batch: target number of ports to be created in bulk requests - ports_pool_batch: target number of ports to be created in bulk requests
when populating pools. when populating pools.
- ports_pool_update_frequency: interval in seconds between ports pool - ports_pool_update_frequency: interval in seconds between ports pool
updates, both for populating pools as well as for recycling ports. updates for recycling ports.
""" """
def __init__(self): def __init__(self):
@@ -207,24 +211,28 @@ class BaseVIFPool(base.VIFPoolDriver, metaclass=abc.ABCMeta):
pool_key = self._get_pool_key(host_addr, project_id, None, subnets) pool_key = self._get_pool_key(host_addr, project_id, None, subnets)
# NOTE(maysams): It's possible that more recent Pods will retrieve
# the Ports from the pool that older Pods were waiting for. In case
# this happens, the event will be retried.
try: try:
return self._get_port_from_pool(pool_key, pod, subnets, return self._get_port_from_pool(pool_key, pod, subnets,
tuple(sorted(security_groups))) tuple(sorted(security_groups)))
except exceptions.ResourceNotReady: except exceptions.ResourceNotReady:
LOG.debug("Ports pool does not have available ports: %s", pool_key) LOG.debug("Ports pool does not have available ports: %s", pool_key)
# NOTE(dulek): We're passing raise_not_ready=False because this if self._populate_pool(pool_key, pod, subnets,
# will be run outside of handlers thread, so raising tuple(sorted(security_groups))):
# it will only result in an ugly log from eventlet. return self._get_port_from_pool(
eventlet.spawn(self._populate_pool, pool_key, pod, subnets, pool_key, pod, subnets, tuple(sorted(security_groups)))
tuple(sorted(security_groups)),
raise_not_ready=False)
raise raise
def _get_port_from_pool(self, pool_key, pod, subnets, security_groups): def _get_port_from_pool(self, pool_key, pod, subnets, security_groups):
raise NotImplementedError() raise NotImplementedError()
def _populate_pool(self, pool_key, pod, subnets, security_groups, def _get_populate_pool_lock(self, pool_key):
raise_not_ready=True): with self._lock:
return self._populate_pool_lock[pool_key]
def _populate_pool(self, pool_key, pod, subnets, security_groups):
# REVISIT(ltomasbo): Drop the subnets parameter and get the information # REVISIT(ltomasbo): Drop the subnets parameter and get the information
# from the pool_key, which will be required when multi-network is # from the pool_key, which will be required when multi-network is
# supported # supported
@@ -232,50 +240,44 @@ class BaseVIFPool(base.VIFPoolDriver, metaclass=abc.ABCMeta):
if not self._recovered_pools: if not self._recovered_pools:
LOG.debug("Kuryr-controller not yet ready to populate pools.") LOG.debug("Kuryr-controller not yet ready to populate pools.")
if raise_not_ready: return False
raise exceptions.ResourceNotReady(pod) ports_pool_min = oslo_cfg.CONF.vif_pool.ports_pool_min
else: lock = self._get_populate_pool_lock(pool_key)
return # NOTE(maysams): Only allow one request vifs per pool and times out
now = time.time() # if takes 420 sec.
last_update = 0 if lock.acquire(timeout=POPULATE_POOL_TIMEOUT):
pool_updates = self._last_update.get(pool_key) pool_size = self._get_pool_size(pool_key)
if pool_updates:
last_update = pool_updates.get(security_groups, 0)
try: try:
if (now - oslo_cfg.CONF.vif_pool.ports_pool_update_frequency < if pool_size < ports_pool_min:
last_update): num_ports = max(oslo_cfg.CONF.vif_pool.ports_pool_batch,
LOG.debug("Not enough time since the last pool update") ports_pool_min - pool_size)
return try:
except AttributeError: vifs = self._drv_vif.request_vifs(
LOG.debug("Kuryr-controller not yet ready to populate pools.") pod=pod,
return project_id=pool_key[1],
self._last_update[pool_key] = {security_groups: now} subnets=subnets,
security_groups=security_groups,
num_ports=num_ports)
except os_exc.SDKException as exc:
kubernetes.add_event(
pod, 'FailToPopulateVIFPool',
f'There was an error during populating VIF pool '
f'for pod: {exc.message}', type_='Warning')
raise
pool_size = self._get_pool_size(pool_key) for vif in vifs:
if pool_size < oslo_cfg.CONF.vif_pool.ports_pool_min: self._existing_vifs[vif.id] = vif
num_ports = max(oslo_cfg.CONF.vif_pool.ports_pool_batch, self._available_ports_pools.setdefault(
oslo_cfg.CONF.vif_pool.ports_pool_min - pool_size) pool_key, {}).setdefault(
try: security_groups, []).append(vif.id)
vifs = self._drv_vif.request_vifs( if vifs:
pod=pod, now = time.time()
project_id=pool_key[1], self._last_update[pool_key] = {security_groups: now}
subnets=subnets, finally:
security_groups=security_groups, lock.release()
num_ports=num_ports) else:
except os_exc.SDKException as exc: return False
kubernetes.add_event(pod, 'FailToPopulateVIFPool', return True
f'There was an error during populating '
f'VIF pool for pod: {exc.message}',
type_='Warning')
raise
for vif in vifs:
self._existing_vifs[vif.id] = vif
self._available_ports_pools.setdefault(
pool_key, {}).setdefault(
security_groups, []).append(vif.id)
if not vifs:
self._last_update[pool_key] = {security_groups: last_update}
def release_vif(self, pod, vif, project_id, security_groups, def release_vif(self, pod, vif, project_id, security_groups,
host_addr=None): host_addr=None):
@@ -376,6 +378,8 @@ class BaseVIFPool(base.VIFPoolDriver, metaclass=abc.ABCMeta):
self._existing_vifs = collections.defaultdict() self._existing_vifs = collections.defaultdict()
self._recyclable_ports = collections.defaultdict() self._recyclable_ports = collections.defaultdict()
self._last_update = collections.defaultdict() self._last_update = collections.defaultdict()
self._lock = threading.Lock()
self._populate_pool_lock = collections.defaultdict(threading.Lock)
def _get_trunks_info(self): def _get_trunks_info(self):
"""Returns information about trunks and their subports. """Returns information about trunks and their subports.
@@ -639,11 +643,8 @@ class NeutronVIFPool(BaseVIFPool):
os_net = clients.get_network_client() os_net = clients.get_network_client()
os_net.update_port(port_id, name=c_utils.get_port_name(pod), os_net.update_port(port_id, name=c_utils.get_port_name(pod),
device_id=pod['metadata']['uid']) device_id=pod['metadata']['uid'])
# check if the pool needs to be populated eventlet.spawn(self._populate_pool, pool_key, pod, subnets,
if (self._get_pool_size(pool_key) < security_groups)
oslo_cfg.CONF.vif_pool.ports_pool_min):
eventlet.spawn(self._populate_pool, pool_key, pod, subnets,
security_groups)
# Add protection from port_id not in existing_vifs # Add protection from port_id not in existing_vifs
try: try:
port = self._existing_vifs[port_id] port = self._existing_vifs[port_id]
@@ -805,6 +806,11 @@ class NeutronVIFPool(BaseVIFPool):
os_net.delete_port(port_id) os_net.delete_port(port_id)
self._available_ports_pools[pool_key] = {} self._available_ports_pools[pool_key] = {}
with self._lock:
try:
del self._populate_pool_lock[pool_key]
except KeyError:
pass
class NestedVIFPool(BaseVIFPool): class NestedVIFPool(BaseVIFPool):
@@ -905,11 +911,8 @@ class NestedVIFPool(BaseVIFPool):
os_net.update_port(port_id, security_groups=list(security_groups)) os_net.update_port(port_id, security_groups=list(security_groups))
if config.CONF.kubernetes.port_debug: if config.CONF.kubernetes.port_debug:
os_net.update_port(port_id, name=c_utils.get_port_name(pod)) os_net.update_port(port_id, name=c_utils.get_port_name(pod))
# check if the pool needs to be populated eventlet.spawn(self._populate_pool, pool_key, pod, subnets,
if (self._get_pool_size(pool_key) < security_groups)
oslo_cfg.CONF.vif_pool.ports_pool_min):
eventlet.spawn(self._populate_pool, pool_key, pod, subnets,
security_groups)
# Add protection from port_id not in existing_vifs # Add protection from port_id not in existing_vifs
try: try:
port = self._existing_vifs[port_id] port = self._existing_vifs[port_id]
@@ -1190,6 +1193,11 @@ class NestedVIFPool(BaseVIFPool):
os_net.delete_port(port_id) os_net.delete_port(port_id)
self._available_ports_pools[pool_key] = {} self._available_ports_pools[pool_key] = {}
with self._lock:
try:
del self._populate_pool_lock[pool_key]
except KeyError:
pass
class MultiVIFPool(base.VIFPoolDriver): class MultiVIFPool(base.VIFPoolDriver):

View File

@@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
import collections import collections
import threading
from unittest import mock from unittest import mock
import uuid import uuid
@@ -97,8 +98,7 @@ class BaseVIFPool(test_base.TestCase):
self.assertEqual(vif, cls.request_vif(m_driver, pod, project_id, self.assertEqual(vif, cls.request_vif(m_driver, pod, project_id,
subnets, security_groups)) subnets, security_groups))
@mock.patch('eventlet.spawn') def test_request_vif_empty_pool(self):
def test_request_vif_empty_pool(self, m_eventlet):
cls = vif_pool.BaseVIFPool cls = vif_pool.BaseVIFPool
m_driver = mock.MagicMock(spec=cls) m_driver = mock.MagicMock(spec=cls)
@@ -123,7 +123,7 @@ class BaseVIFPool(test_base.TestCase):
self.assertRaises(exceptions.ResourceNotReady, cls.request_vif, self.assertRaises(exceptions.ResourceNotReady, cls.request_vif,
m_driver, pod, project_id, subnets, security_groups) m_driver, pod, project_id, subnets, security_groups)
m_eventlet.assert_called_once() m_driver._populate_pool.assert_called_once()
def test_request_vif_pod_without_host(self): def test_request_vif_pod_without_host(self):
cls = vif_pool.BaseVIFPool cls = vif_pool.BaseVIFPool
@@ -199,9 +199,9 @@ class BaseVIFPool(test_base.TestCase):
pool_key = (mock.sentinel.host_addr, project_id) pool_key = (mock.sentinel.host_addr, project_id)
m_driver._recovered_pools = False m_driver._recovered_pools = False
self.assertRaises(exceptions.ResourceNotReady, cls._populate_pool, self.assertFalse(cls._populate_pool(
m_driver, pool_key, pod, subnets, m_driver, pool_key, pod, subnets,
tuple(security_groups)) tuple(security_groups)))
m_driver._drv_vif.request_vifs.assert_not_called() m_driver._drv_vif.request_vifs.assert_not_called()
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@@ -224,12 +224,15 @@ class BaseVIFPool(test_base.TestCase):
m_driver._recovered_pools = False m_driver._recovered_pools = False
cls._populate_pool(m_driver, pool_key, pod, subnets, cls._populate_pool(m_driver, pool_key, pod, subnets,
tuple(security_groups), raise_not_ready=False) tuple(security_groups))
m_driver._drv_vif.request_vifs.assert_not_called() m_driver._drv_vif.request_vifs.assert_not_called()
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client') @mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('time.time', return_value=0) @mock.patch('time.time', return_value=0)
def test__populate_pool_no_update(self, m_time, m_get_kubernetes_client): @ddt.data((neutron_vif.NeutronPodVIFDriver),
(nested_vlan_vif.NestedVlanPodVIFDriver))
def test__populate_pool_no_update(self, m_vif_driver, m_time,
m_get_kubernetes_client):
cls = vif_pool.BaseVIFPool cls = vif_pool.BaseVIFPool
m_driver = mock.MagicMock(spec=cls) m_driver = mock.MagicMock(spec=cls)
@@ -238,41 +241,17 @@ class BaseVIFPool(test_base.TestCase):
subnets = mock.sentinel.subnets subnets = mock.sentinel.subnets
security_groups = 'test-sg' security_groups = 'test-sg'
pool_key = (mock.sentinel.host_addr, project_id) pool_key = (mock.sentinel.host_addr, project_id)
m_driver._get_pool_size.return_value = 4
oslo_cfg.CONF.set_override('ports_pool_update_frequency',
15,
group='vif_pool')
m_driver._last_update = {pool_key: {tuple(security_groups): 1}}
m_driver._recovered_pools = True
cls._populate_pool(m_driver, pool_key, pod, subnets,
tuple(security_groups))
m_driver._get_pool_size.assert_not_called()
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('time.time', return_value=50)
@ddt.data((neutron_vif.NeutronPodVIFDriver),
(nested_vlan_vif.NestedVlanPodVIFDriver))
def test__populate_pool_large_pool(self, m_vif_driver, m_time,
m_get_kubernetes_client):
cls = vif_pool.BaseVIFPool
m_driver = mock.MagicMock(spec=cls)
cls_vif_driver = m_vif_driver cls_vif_driver = m_vif_driver
vif_driver = mock.MagicMock(spec=cls_vif_driver) vif_driver = mock.MagicMock(spec=cls_vif_driver)
m_driver._drv_vif = vif_driver m_driver._drv_vif = vif_driver
pod = mock.sentinel.pod
project_id = str(uuid.uuid4())
subnets = mock.sentinel.subnets
security_groups = 'test-sg'
pool_key = (mock.sentinel.host_addr, project_id)
oslo_cfg.CONF.set_override('ports_pool_update_frequency', oslo_cfg.CONF.set_override('ports_pool_update_frequency',
15, 15,
group='vif_pool') group='vif_pool')
oslo_cfg.CONF.set_override('ports_pool_min', oslo_cfg.CONF.set_override('ports_pool_min',
5, 3,
group='vif_pool') group='vif_pool')
m_driver._last_update = {pool_key: {tuple(security_groups): 1}} m_driver._last_update = {pool_key: {tuple(security_groups): 1}}
m_driver._get_pool_size.return_value = 10 m_driver._get_pool_size.return_value = 10
@@ -280,7 +259,7 @@ class BaseVIFPool(test_base.TestCase):
cls._populate_pool(m_driver, pool_key, pod, subnets, cls._populate_pool(m_driver, pool_key, pod, subnets,
tuple(security_groups)) tuple(security_groups))
m_driver._get_pool_size.assert_called_once() m_driver._get_pool_size.assert_called()
m_driver._drv_vif.request_vifs.assert_not_called() m_driver._drv_vif.request_vifs.assert_not_called()
def test_release_vif(self): def test_release_vif(self):
@@ -480,31 +459,22 @@ class NeutronVIFPool(test_base.TestCase):
subnets = mock.sentinel.subnets subnets = mock.sentinel.subnets
security_groups = 'test-sg' security_groups = 'test-sg'
oslo_cfg.CONF.set_override('port_debug',
True, group='kubernetes')
pod = get_pod_obj() pod = get_pod_obj()
m_get_port_name.return_value = get_pod_name(pod)
m_driver._available_ports_pools = { m_driver._available_ports_pools = {
pool_key: {tuple(security_groups): collections.deque([port_id])}} pool_key: {tuple(security_groups): collections.deque([port_id])}}
m_driver._existing_vifs = {port_id: port} m_driver._existing_vifs = {port_id: port}
m_get_port_name.return_value = get_pod_name(pod) m_get_port_name.return_value = get_pod_name(pod)
oslo_cfg.CONF.set_override('ports_pool_min',
5,
group='vif_pool')
oslo_cfg.CONF.set_override('port_debug',
True,
group='kubernetes')
oslo_cfg.CONF.set_override('port_debug',
True,
group='kubernetes')
pool_length = 5
m_driver._get_pool_size.return_value = pool_length
self.assertEqual(port, cls._get_port_from_pool( self.assertEqual(port, cls._get_port_from_pool(
m_driver, pool_key, pod, subnets, tuple(security_groups))) m_driver, pool_key, pod, subnets, tuple(security_groups)))
os_net.update_port.assert_called_once_with( os_net.update_port.assert_called_once_with(
port_id, name=get_pod_name(pod), device_id=pod['metadata']['uid']) port_id, name=get_pod_name(pod), device_id=pod['metadata']['uid'])
m_eventlet.assert_not_called() m_eventlet.assert_called()
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_port_name') @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_port_name')
@mock.patch('eventlet.spawn') @mock.patch('eventlet.spawn')
@@ -583,8 +553,6 @@ class NeutronVIFPool(test_base.TestCase):
oslo_cfg.CONF.set_override('port_debug', oslo_cfg.CONF.set_override('port_debug',
False, False,
group='kubernetes') group='kubernetes')
pool_length = 5
m_driver._get_pool_size.return_value = pool_length
m_driver._available_ports_pools = { m_driver._available_ports_pools = {
pool_key: {tuple(security_groups): collections.deque([]), pool_key: {tuple(security_groups): collections.deque([]),
@@ -598,7 +566,7 @@ class NeutronVIFPool(test_base.TestCase):
os_net.update_port.assert_called_once_with( os_net.update_port.assert_called_once_with(
port_id, security_groups=list(security_groups)) port_id, security_groups=list(security_groups))
m_eventlet.assert_not_called() m_eventlet.assert_called()
@mock.patch('eventlet.spawn') @mock.patch('eventlet.spawn')
def test__get_port_from_pool_empty_pool_reuse_no_update_info(self, def test__get_port_from_pool_empty_pool_reuse_no_update_info(self,
@@ -633,7 +601,7 @@ class NeutronVIFPool(test_base.TestCase):
os_net.update_port.assert_called_once_with( os_net.update_port.assert_called_once_with(
port_id, security_groups=list(security_groups)) port_id, security_groups=list(security_groups))
m_eventlet.assert_not_called() m_eventlet.assert_called()
def test__get_port_from_pool_empty_pool_reuse_no_ports(self): def test__get_port_from_pool_empty_pool_reuse_no_ports(self):
cls = vif_pool.NeutronVIFPool cls = vif_pool.NeutronVIFPool
@@ -931,6 +899,9 @@ class NeutronVIFPool(test_base.TestCase):
port_id = str(uuid.uuid4()) port_id = str(uuid.uuid4())
m_driver._available_ports_pools = {pool_key: { m_driver._available_ports_pools = {pool_key: {
tuple(['security_group']): [port_id]}} tuple(['security_group']): [port_id]}}
m_driver._lock = threading.Lock()
m_driver._populate_pool_lock = {
pool_key: mock.MagicMock(spec=threading.Lock())}
m_driver._existing_vifs = {port_id: mock.sentinel.vif} m_driver._existing_vifs = {port_id: mock.sentinel.vif}
m_driver._recovered_pools = True m_driver._recovered_pools = True
@@ -968,6 +939,9 @@ class NeutronVIFPool(test_base.TestCase):
port_id = str(uuid.uuid4()) port_id = str(uuid.uuid4())
m_driver._available_ports_pools = {pool_key: { m_driver._available_ports_pools = {pool_key: {
tuple(['security_group']): [port_id]}} tuple(['security_group']): [port_id]}}
m_driver._lock = threading.Lock()
m_driver._populate_pool_lock = {
pool_key: mock.MagicMock(spec=threading.Lock())}
m_driver._existing_vifs = {} m_driver._existing_vifs = {}
m_driver._recovered_pools = True m_driver._recovered_pools = True
@@ -1053,7 +1027,7 @@ class NestedVIFPool(test_base.TestCase):
os_net.update_port.assert_called_once_with( os_net.update_port.assert_called_once_with(
port_id, name=get_pod_name(pod)) port_id, name=get_pod_name(pod))
m_eventlet.assert_not_called() m_eventlet.assert_called()
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_port_name') @mock.patch('kuryr_kubernetes.controller.drivers.utils.get_port_name')
@mock.patch('eventlet.spawn') @mock.patch('eventlet.spawn')
@@ -1147,7 +1121,7 @@ class NestedVIFPool(test_base.TestCase):
os_net.update_port.assert_called_once_with( os_net.update_port.assert_called_once_with(
port_id, security_groups=list(security_groups)) port_id, security_groups=list(security_groups))
m_eventlet.assert_not_called() m_eventlet.assert_called()
@mock.patch('eventlet.spawn') @mock.patch('eventlet.spawn')
def test__get_port_from_pool_empty_pool_reuse_no_update_info(self, def test__get_port_from_pool_empty_pool_reuse_no_update_info(self,
@@ -1182,7 +1156,7 @@ class NestedVIFPool(test_base.TestCase):
os_net.update_port.assert_called_once_with( os_net.update_port.assert_called_once_with(
port_id, security_groups=list(security_groups)) port_id, security_groups=list(security_groups))
m_eventlet.assert_not_called() m_eventlet.assert_called()
def test__get_port_from_pool_empty_pool_reuse_no_ports(self): def test__get_port_from_pool_empty_pool_reuse_no_ports(self):
cls = vif_pool.NestedVIFPool cls = vif_pool.NestedVIFPool
@@ -1836,6 +1810,9 @@ class NestedVIFPool(test_base.TestCase):
vif.vlan_id = vlan_id vif.vlan_id = vlan_id
m_driver._available_ports_pools = {pool_key: { m_driver._available_ports_pools = {pool_key: {
tuple(['security_group']): [port_id]}} tuple(['security_group']): [port_id]}}
m_driver._lock = threading.Lock()
m_driver._populate_pool_lock = {
pool_key: mock.MagicMock(spec=threading.Lock())}
m_driver._existing_vifs = {port_id: vif} m_driver._existing_vifs = {port_id: vif}
m_driver._recovered_pools = True m_driver._recovered_pools = True
@@ -1925,6 +1902,9 @@ class NestedVIFPool(test_base.TestCase):
vif.vlan_id = vlan_id vif.vlan_id = vlan_id
m_driver._available_ports_pools = {pool_key: { m_driver._available_ports_pools = {pool_key: {
tuple(['security_group']): [port_id]}} tuple(['security_group']): [port_id]}}
m_driver._lock = threading.Lock()
m_driver._populate_pool_lock = {
pool_key: mock.MagicMock(spec=threading.Lock())}
m_driver._existing_vifs = {} m_driver._existing_vifs = {}
m_driver._recovered_pools = True m_driver._recovered_pools = True