Add VNF to monitor after restarting tacker service
If you restart tacker service, it doesn't monitor previously
created VNFs.
This patch will read all VNFs from db and add to monitor
during initialization of VFNM plugin.
Change-Id: I1d49f9442dbc34794e01fbefb4ef82af21171956
Closes-Bug: #1616712
(cherry picked from commit 5ae9dd03c7
)
This commit is contained in:
parent
203e19e6eb
commit
4d1e0ec14b
|
@ -28,6 +28,7 @@ from tacker.extensions import vnfm
|
|||
from tacker.plugins.common import constants
|
||||
from tacker.tests.unit.db import base as db_base
|
||||
from tacker.tests.unit.db import utils
|
||||
from tacker.vnfm import monitor
|
||||
from tacker.vnfm import plugin
|
||||
|
||||
|
||||
|
@ -54,6 +55,75 @@ class FakeVimClient(mock.Mock):
|
|||
pass
|
||||
|
||||
|
||||
class TestVNFMPluginMonitor(db_base.SqlTestCase):
|
||||
def setUp(self):
|
||||
super(TestVNFMPluginMonitor, self).setUp()
|
||||
self._mock_vnf_manager()
|
||||
|
||||
def _mock_vnf_manager(self):
|
||||
self._vnf_manager = mock.Mock(wraps=FakeDriverManager())
|
||||
self._vnf_manager.__contains__ = mock.Mock(
|
||||
return_value=True)
|
||||
fake_vnf_manager = mock.Mock()
|
||||
fake_vnf_manager.return_value = self._vnf_manager
|
||||
self._mock(
|
||||
'tacker.common.driver_manager.DriverManager', fake_vnf_manager)
|
||||
|
||||
@mock.patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb.get_vnfs')
|
||||
@mock.patch('tacker.vnfm.monitor.VNFMonitor.__run__')
|
||||
def test_init_monitoring(self, mock_run, mock_get_vnfs):
|
||||
vnf_id = uuidutils.generate_uuid()
|
||||
vnfs = [{
|
||||
'id': vnf_id,
|
||||
'vnf': {
|
||||
'id': vnf_id,
|
||||
'status': 'ACTIVE',
|
||||
'name': 'fake_vnf',
|
||||
'attributes': {
|
||||
'monitoring_policy':
|
||||
'{"vdus": '
|
||||
'{"VDU1": {"ping": {"actions": {"failure": "respawn"},'
|
||||
'"name": "ping", "parameters": {"count": 3,'
|
||||
'"interval": 1, "monitoring_delay": 45, "timeout": 2},'
|
||||
'"monitoring_params": {"count": 3, "interval": 1,'
|
||||
'"monitoring_delay": 45, "timeout": 2}}}}}'}
|
||||
},
|
||||
'name': 'fake_vnf',
|
||||
'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
|
||||
'description': 'fake_vnf_description',
|
||||
'instance_id': 'da85ea1a-4ec4-4201-bbb2-8d9249eca7ec',
|
||||
'vnfd_id': 'eb094833-995e-49f0-a047-dfb56aaf7c4e',
|
||||
'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
|
||||
'placement_attr': {'region': 'RegionOne'},
|
||||
'status': 'ACTIVE',
|
||||
'attributes': {
|
||||
'monitoring_policy':
|
||||
'{"vdus": '
|
||||
'{"VDU1": {"ping": {"actions": {"failure": "respawn"},'
|
||||
'"name": "ping", "parameters": {"count": 3,'
|
||||
'"interval": 1, "monitoring_delay": 45, "timeout": 2},'
|
||||
'"monitoring_params": {"count": 3, "interval": 1,'
|
||||
'"monitoring_delay": 45, "timeout": 2}}}}}'},
|
||||
'mgmt_url': '{"VDU1": "a.b.c.d"}',
|
||||
'deleted_at': datetime.min,
|
||||
'management_ip_addresses': 'a.b.c.d'
|
||||
}]
|
||||
|
||||
mock_get_vnfs.return_value = vnfs
|
||||
# NOTE(bhagyashris): VNFMonitor class is using a singleton pattern
|
||||
# and '_hosting_vnfs' is defined as a class level attribute.
|
||||
# If one of the unit test adds a VNF to monitor it will show up here
|
||||
# provided both the unit tests runs in the same process.
|
||||
# Hence, you must reinitialize '_hosting_vnfs' to empty dict.
|
||||
monitor.VNFMonitor._hosting_vnfs = dict()
|
||||
vnfm_plugin = plugin.VNFMPlugin()
|
||||
hosting_vnfs = vnfm_plugin._vnf_monitor._hosting_vnfs.values()
|
||||
hosting_vnfs_list = list(hosting_vnfs)
|
||||
hosting_vnf = hosting_vnfs_list[0]['vnf']
|
||||
self.assertEqual('{"VDU1": "a.b.c.d"}', hosting_vnf['mgmt_url'])
|
||||
self.assertEqual(1, len(hosting_vnfs_list))
|
||||
|
||||
|
||||
class TestVNFMPlugin(db_base.SqlTestCase):
|
||||
def setUp(self):
|
||||
super(TestVNFMPlugin, self).setUp()
|
||||
|
|
|
@ -95,7 +95,7 @@ class VNFMonitor(object):
|
|||
time.sleep(self._status_check_intvl)
|
||||
|
||||
with self._lock:
|
||||
for hosting_vnf in self._hosting_vnfs.values():
|
||||
for hosting_vnf in VNFMonitor._hosting_vnfs.values():
|
||||
if hosting_vnf.get('dead', False):
|
||||
LOG.debug('monitor skips dead vnf %s', hosting_vnf)
|
||||
continue
|
||||
|
@ -120,7 +120,7 @@ class VNFMonitor(object):
|
|||
'ips': new_vnf['management_ip_addresses']})
|
||||
new_vnf['boot_at'] = timeutils.utcnow()
|
||||
with self._lock:
|
||||
self._hosting_vnfs[new_vnf['id']] = new_vnf
|
||||
VNFMonitor._hosting_vnfs[new_vnf['id']] = new_vnf
|
||||
|
||||
attrib_dict = new_vnf['vnf']['attributes']
|
||||
mon_policy_dict = attrib_dict['monitoring_policy']
|
||||
|
@ -132,7 +132,7 @@ class VNFMonitor(object):
|
|||
def delete_hosting_vnf(self, vnf_id):
|
||||
LOG.debug('deleting vnf_id %(vnf_id)s', {'vnf_id': vnf_id})
|
||||
with self._lock:
|
||||
hosting_vnf = self._hosting_vnfs.pop(vnf_id, None)
|
||||
hosting_vnf = VNFMonitor._hosting_vnfs.pop(vnf_id, None)
|
||||
if hosting_vnf:
|
||||
LOG.debug('deleting vnf_id %(vnf_id)s, Mgmt IP %(ips)s',
|
||||
{'vnf_id': vnf_id,
|
||||
|
@ -175,7 +175,7 @@ class VNFMonitor(object):
|
|||
hosting_vnf['action_cb'](action)
|
||||
|
||||
def mark_dead(self, vnf_id):
|
||||
self._hosting_vnfs[vnf_id]['dead'] = True
|
||||
VNFMonitor._hosting_vnfs[vnf_id]['dead'] = True
|
||||
|
||||
def _invoke(self, driver, **kwargs):
|
||||
method = inspect.stack()[1][3]
|
||||
|
|
|
@ -29,6 +29,7 @@ from tacker.api.v1 import attributes
|
|||
from tacker.common import driver_manager
|
||||
from tacker.common import exceptions
|
||||
from tacker.common import utils
|
||||
from tacker import context as t_context
|
||||
from tacker.db.vnfm import vnfm_db
|
||||
from tacker.extensions import vnfm
|
||||
from tacker.plugins.common import constants
|
||||
|
@ -143,6 +144,16 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
|
|||
cfg.CONF.tacker.policy_action)
|
||||
self._vnf_monitor = monitor.VNFMonitor(self.boot_wait)
|
||||
self._vnf_alarm_monitor = monitor.VNFAlarmMonitor()
|
||||
self._init_monitoring()
|
||||
|
||||
def _init_monitoring(self):
|
||||
context = t_context.get_admin_context()
|
||||
vnfs = self.get_vnfs(context)
|
||||
for vnf in vnfs:
|
||||
# Add tenant_id in context object as it is required
|
||||
# to get VIM in monitoring.
|
||||
context.tenant_id = vnf['tenant_id']
|
||||
self.add_vnf_to_monitor(context, vnf)
|
||||
|
||||
def spawn_n(self, function, *args, **kwargs):
|
||||
self._pool.spawn_n(function, *args, **kwargs)
|
||||
|
|
2
tox.ini
2
tox.ini
|
@ -13,7 +13,7 @@ deps = -r{toxinidir}/requirements.txt
|
|||
-r{toxinidir}/test-requirements.txt
|
||||
commands =
|
||||
rm -f .testrepository/times.dbm
|
||||
{toxinidir}/tools/ostestr_compat_shim.sh {posargs}
|
||||
{toxinidir}/tools/ostestr_compat_shim.sh --concurrency 1 {posargs}
|
||||
|
||||
[testenv:functional]
|
||||
setenv = OS_TEST_PATH=./tacker/tests/functional
|
||||
|
|
Loading…
Reference in New Issue