Fix random failing of test_init_monitoring test

The existing unit test case [1] is failing randomly [2]
when both test modules `test_plugin` and `test_monitoring`
runs in a single worker.

Reason: VNFMonitor class is using singleton pattern.
In above test modules same object of VNFMonitor class
is returned from __new__ method but internally it makes
a call __init__ explicitly.
Since we are not initializing `_hosting_vnfs` attribute in
the __init__ method, when we try to access __hosting_vnf in
the second test `test_plugin`, it finds two vnf objects.

This issue is fixed in this patch by reinitializing the
`_hosting_vnfs` class level attribute as empty dict in the
test_init_monitoring unit test.

[1]: https://github.com/openstack/tacker/blob/master/tacker/tests/unit/vnfm/test_plugin.py#L75
[2]: http://logs.openstack.org/10/609610/9/check/openstack-tox-py35/b99de12/testr_results.html.gz

Change-Id: I2a5933ed21e478e860cb12b378a4368f9402291d
This commit is contained in:
bhagyashris 2019-01-16 17:35:35 +09:00 committed by niraj singh
parent a3f0b6d4e9
commit 4b9bcfeeef
3 changed files with 12 additions and 5 deletions

View File

@ -30,6 +30,7 @@ from tacker.extensions import vnfm
from tacker.plugins.common import constants
from tacker.tests.unit.db import base as db_base
from tacker.tests.unit.db import utils
from tacker.vnfm import monitor
from tacker.vnfm import plugin
@ -111,6 +112,12 @@ class TestVNFMPluginMonitor(db_base.SqlTestCase):
}]
mock_get_vnfs.return_value = vnfs
# NOTE(bhagyashris): VNFMonitor class is using a singleton pattern
# and '_hosting_vnfs' is defined as a class level attribute.
# If one of the unit test adds a VNF to monitor it will show up here
# provided both the unit tests runs in the same process.
# Hence, you must reinitialize '_hosting_vnfs' to empty dict.
monitor.VNFMonitor._hosting_vnfs = dict()
vnfm_plugin = plugin.VNFMPlugin()
hosting_vnfs = vnfm_plugin._vnf_monitor._hosting_vnfs.values()
hosting_vnf = hosting_vnfs[0]['vnf']

View File

@ -95,7 +95,7 @@ class VNFMonitor(object):
time.sleep(self._status_check_intvl)
with self._lock:
for hosting_vnf in self._hosting_vnfs.values():
for hosting_vnf in VNFMonitor._hosting_vnfs.values():
if hosting_vnf.get('dead', False):
LOG.debug('monitor skips dead vnf %s', hosting_vnf)
continue
@ -124,7 +124,7 @@ class VNFMonitor(object):
'ips': new_vnf['management_ip_addresses']})
new_vnf['boot_at'] = timeutils.utcnow()
with self._lock:
self._hosting_vnfs[new_vnf['id']] = new_vnf
VNFMonitor._hosting_vnfs[new_vnf['id']] = new_vnf
attrib_dict = new_vnf['vnf']['attributes']
mon_policy_dict = attrib_dict['monitoring_policy']
@ -136,7 +136,7 @@ class VNFMonitor(object):
def delete_hosting_vnf(self, vnf_id):
LOG.debug('deleting vnf_id %(vnf_id)s', {'vnf_id': vnf_id})
with self._lock:
hosting_vnf = self._hosting_vnfs.pop(vnf_id, None)
hosting_vnf = VNFMonitor._hosting_vnfs.pop(vnf_id, None)
if hosting_vnf:
LOG.debug('deleting vnf_id %(vnf_id)s, Mgmt IP %(ips)s',
{'vnf_id': vnf_id,
@ -179,7 +179,7 @@ class VNFMonitor(object):
hosting_vnf['action_cb'](action)
def mark_dead(self, vnf_id):
self._hosting_vnfs[vnf_id]['dead'] = True
VNFMonitor._hosting_vnfs[vnf_id]['dead'] = True
def _invoke(self, driver, **kwargs):
method = inspect.stack()[1][3]

View File

@ -17,7 +17,7 @@ install_command =
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
stestr run --slowest {posargs}
stestr run --slowest --concurrency 1 {posargs}
[testenv:functional]
setenv = {[testenv]setenv}