Add new policy action: vdu_autoheal

This patch adds below functionality:

- New policy action ``vdu_autoheal`` for recovering failed VDUs
  as reported by the respective monitoring driver configured in
  the monitoring policy of the VNFD template.

- Add unit/functional tests.

- Added oslo_versioned library to implement HealVnfRequest object.

Note: The implementation of vdu_autoheal policy action will support
HealVnfRequest interface  as mentioned in the ETSI standard [1]

[1]: https://www.etsi.org/deliver/etsi_gs/NFV-SOL/001_099/003/02.05.01_60/gs_NFV-SOL003v020501p.pdf

Implements: blueprint vdu-auto-healing
Change-Id: If62acbdac41c92842de0ae3b7dedcda9fd1f86e6
changes/95/612595/16
bhagyashris 5 years ago
parent 4b9bcfeeef
commit 5f1e48ff46

@ -83,7 +83,13 @@ The available actions that a monitor driver can call when a particular event
occurs.
#. respawn
In case of OpenStack VIM, when any VDU monitoring fails, it will delete
the entire VNF and create a new one.
#. log
#. vdu_autoheal
In case of OpenStack VIM, when any VDU monitoring fails, it will delete
only that specific VDU resource and create a new one alone with it's
dependent resources like CP.
How to write TOSCA template to monitor VNF entities
----------------------------------------------------
@ -134,3 +140,9 @@ Example Template
max_foo_reached: scale_up
min_foo_reached: scale_down
vdu3:
monitoring_policy:
ping:
actions:
failure: vdu_autoheal

@ -84,6 +84,7 @@ oslo.serialization==2.18.0
oslo.service==1.24.0
oslo.upgradecheck==0.1.0
oslo.utils==3.33.0
oslo.versionedobjects==1.33.3
oslotest==3.2.0
packaging==17.1
paramiko==2.0.0

@ -0,0 +1,11 @@
---
features:
- |
Added a new monitoring policy action ``vdu_autoheal`` to bring back the
failed VDU. If a VNF contains one or more VDUs with monitoring policy
action set to `vdu_autoheal` and if any one of the VDU is unreachable,
it will simply delete the resources of that particular VDUs and re-create
them again.
The `vdu_autoheal` monitoring policy action is implemented only for
openstack infra driver.

@ -34,6 +34,7 @@ oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
oslo.service!=1.28.1,>=1.24.0 # Apache-2.0
oslo.upgradecheck>=0.1.0 # Apache-2.0
oslo.utils>=3.33.0 # Apache-2.0
oslo.versionedobjects>=1.33.3 # Apache-2.0
openstackdocstheme>=1.18.1 # Apache-2.0
python-neutronclient>=6.7.0 # Apache-2.0
python-novaclient>=9.1.0 # Apache-2.0

@ -73,6 +73,7 @@ tacker.tacker.policy.actions =
respawn = tacker.vnfm.policy_actions.respawn.respawn:VNFActionRespawn
log = tacker.vnfm.policy_actions.log.log:VNFActionLog
log_and_kill = tacker.vnfm.policy_actions.log.log:VNFActionLogAndKill
vdu_autoheal = tacker.vnfm.policy_actions.vdu_autoheal.vdu_autoheal:VNFActionVduAutoheal
oslo.config.opts =
tacker.common.config = tacker.common.config:config_opts
tacker.wsgi = tacker.wsgi:config_opts

@ -11,7 +11,9 @@
# under the License.
from tacker.conductor import conductor_server
from tacker import objects
def main():
objects.register_all()
conductor_server.main()

@ -25,6 +25,7 @@ import oslo_i18n
from oslo_service import service as common_service
from tacker.common import config
from tacker import objects
from tacker import service
@ -34,6 +35,7 @@ oslo_i18n.install("tacker")
def main():
# the configuration will be read into the cfg.CONF global data structure
config.init(sys.argv[1:])
objects.register_all()
if not cfg.CONF.config_file:
sys.exit(_("ERROR: Unable to find configuration file via the default"
" search paths (~/.tacker/, ~/, /etc/tacker/, /etc/) and"

@ -28,6 +28,7 @@ from tacker.db.common_services import common_services_db
from tacker.db.nfvo import nfvo_db
from tacker.extensions import nfvo
from tacker import manager
from tacker import objects
from tacker.plugins.common import constants
from tacker import service as tacker_service
from tacker import version
@ -80,6 +81,7 @@ def init(args, **kwargs):
def main(manager='tacker.conductor.conductor_server.Conductor'):
init(sys.argv[1:])
objects.register_all()
logging.setup(cfg.CONF, "tacker")
oslo_messaging.set_transport_defaults(control_exchange='tacker')
logging.setup(cfg.CONF, "tacker")

@ -40,11 +40,12 @@ from tacker import manager
from tacker.plugins.common import constants
LOG = logging.getLogger(__name__)
_ACTIVE_UPDATE = (constants.ACTIVE, constants.PENDING_UPDATE)
_ACTIVE_UPDATE = (constants.ACTIVE, constants.PENDING_UPDATE,
constants.PENDING_HEAL)
_ACTIVE_UPDATE_ERROR_DEAD = (
constants.PENDING_CREATE, constants.ACTIVE, constants.PENDING_UPDATE,
constants.PENDING_SCALE_IN, constants.PENDING_SCALE_OUT, constants.ERROR,
constants.PENDING_DELETE, constants.DEAD)
constants.PENDING_DELETE, constants.DEAD, constants.PENDING_HEAL)
CREATE_STATES = (constants.PENDING_CREATE, constants.DEAD)
@ -498,7 +499,8 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
"is not permited. Please contact your "
"Administrator.")
raise vnfm.VNFDeleteFailed(reason=error_reason)
if vnf_db.status == constants.PENDING_UPDATE:
if(vnf_db.status in [constants.PENDING_UPDATE,
constants.PENDING_HEAL]):
raise vnfm.VNFInUse(vnf_id=vnf_id)
return True
@ -522,28 +524,30 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
tstamp=timeutils.utcnow())
return updated_vnf_dict
def _update_vnf_pre(self, context, vnf_id):
def _update_vnf_pre(self, context, vnf_id, new_status):
with context.session.begin(subtransactions=True):
vnf_db = self._update_vnf_status_db(
context, vnf_id, _ACTIVE_UPDATE, constants.PENDING_UPDATE)
context, vnf_id, _ACTIVE_UPDATE, new_status)
updated_vnf_dict = self._make_vnf_dict(vnf_db)
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=updated_vnf_dict['status'],
evt_type=constants.RES_EVT_UPDATE,
tstamp=timeutils.utcnow())
if new_status in constants.VNF_STATUS_TO_EVT_TYPES:
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=updated_vnf_dict['status'],
evt_type=constants.VNF_STATUS_TO_EVT_TYPES[new_status],
tstamp=timeutils.utcnow())
return updated_vnf_dict
def _update_vnf_post(self, context, vnf_id, new_status,
new_vnf_dict):
new_vnf_dict, vnf_status, evt_type):
updated_time_stamp = timeutils.utcnow()
with context.session.begin(subtransactions=True):
(self._model_query(context, VNF).
filter(VNF.id == vnf_id).
filter(VNF.status == constants.PENDING_UPDATE).
filter(VNF.status == vnf_status).
update({'status': new_status,
'updated_at': updated_time_stamp}))
'updated_at': updated_time_stamp,
'mgmt_url': new_vnf_dict['mgmt_url']}))
dev_attrs = new_vnf_dict.get('attributes', {})
(context.session.query(VNFAttribute).
@ -559,7 +563,7 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=new_status,
evt_type=constants.RES_EVT_UPDATE,
evt_type=evt_type,
tstamp=updated_time_stamp)
def _delete_vnf_pre(self, context, vnf_id):
@ -635,7 +639,8 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
# reference implementation. needs to be overrided by subclass
def update_vnf(self, context, vnf_id, vnf):
vnf_dict = self._update_vnf_pre(context, vnf_id)
new_status = constants.PENDING_UPDATE
vnf_dict = self._update_vnf_pre(context, vnf_id, new_status)
# start actual update of hosting vnf
# waiting for completion of update should be done backgroundly
# by another thread if it takes a while

@ -63,6 +63,10 @@ class VNFCreateFailed(exceptions.TackerException):
message = _('creating VNF based on %(vnfd_id)s failed')
class VNFUpdateWaitFailed(exceptions.TackerException):
message = _('%(reason)s')
class VNFCreateWaitFailed(exceptions.TackerException):
message = _('%(reason)s')
@ -79,6 +83,10 @@ class VNFDeleteFailed(exceptions.TackerException):
message = _('%(reason)s')
class VNFHealFailed(exceptions.TackerException):
message = _('VNF %(vnf_id)s failed to heal')
class VNFDNotFound(exceptions.NotFound):
message = _('VNFD %(vnfd_id)s could not be found')

@ -0,0 +1,27 @@
# Copyright 2018 NTT DATA.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(bhagyashris): You may scratch your head as you see code that imports
# this module and then accesses attributes for objects such as Instance,
# etc, yet you do not see these attributes in here. Never fear, there is
# a little bit of magic. When objects are registered, an attribute is set
# on this module automatically, pointing to the newest/latest version of
# the object.
def register_all():
# NOTE(bhagyashris): You must make sure your object gets imported in this
# function in order for it to be registered by services that may
# need to receive it via RPC.
__import__('tacker.objects.heal_vnf_request')

@ -0,0 +1,48 @@
# Copyright 2018 NTT DATA.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import versionutils
from oslo_versionedobjects import base as ovoo_base
from tacker import objects
def get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
return '_obj_' + name
class TackerObjectRegistry(ovoo_base.VersionedObjectRegistry):
notification_classes = []
def registration_hook(self, cls, index):
# NOTE(bhagyashris): This is called when an object is registered,
# and is responsible for maintaining tacker.objects.$OBJECT
# as the highest-versioned implementation of a given object.
version = versionutils.convert_version_to_tuple(cls.VERSION)
if not hasattr(objects, cls.obj_name()):
setattr(objects, cls.obj_name(), cls)
else:
cur_version = versionutils.convert_version_to_tuple(
getattr(objects, cls.obj_name()).VERSION)
if version >= cur_version:
setattr(objects, cls.obj_name(), cls)
class TackerObject(ovoo_base.VersionedObject):
# NOTE(bhagyashris): OBJ_PROJECT_NAMESPACE needs to be set so that nova,
# tacker, and other objects can exist on the same bus and be distinguished
# from one another.
OBJ_SERIAL_NAMESPACE = 'tacker_object'
OBJ_PROJECT_NAMESPACE = 'tacker'

@ -0,0 +1,22 @@
# Copyright 2018 NTT Data.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import fields
# Import fields from oslo.versionedobjects
StringField = fields.StringField
ListOfObjectsField = fields.ListOfObjectsField
ListOfStringsField = fields.ListOfStringsField

@ -0,0 +1,42 @@
# Copyright 2018 NTT Data.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker.objects import base
from tacker.objects import fields
@base.TackerObjectRegistry.register
class HealVnfAdditionalParams(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'parameter': fields.StringField(),
'cause': fields.ListOfStringsField()
}
@base.TackerObjectRegistry.register
class HealVnfRequest(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'cause': fields.StringField(),
'additional_params': fields.ListOfObjectsField(
'HealVnfAdditionalParams')
}

@ -36,6 +36,7 @@ PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
PENDING_SCALE_IN = "PENDING_SCALE_IN"
PENDING_SCALE_OUT = "PENDING_SCALE_OUT"
PENDING_HEAL = "PENDING_HEAL"
DEAD = "DEAD"
ERROR = "ERROR"
@ -67,6 +68,14 @@ RES_EVT_MONITOR = "MONITOR"
RES_EVT_SCALE = "SCALE"
RES_EVT_NA_STATE = "Not Applicable"
RES_EVT_ONBOARDED = "OnBoarded"
RES_EVT_HEAL = "HEAL"
VNF_STATUS_TO_EVT_TYPES = {PENDING_CREATE: RES_EVT_CREATE,
PENDING_UPDATE: RES_EVT_UPDATE,
PENDING_DELETE: RES_EVT_DELETE,
PENDING_HEAL: RES_EVT_HEAL}
RES_EVT_CREATED_FLD = "created_at"
RES_EVT_DELETED_FLD = "deleted_at"

@ -22,3 +22,9 @@ SCALE_WINDOW_SLEEP_TIME = 120
NS_CREATE_TIMEOUT = 400
NS_DELETE_TIMEOUT = 300
NOVA_CLIENT_VERSION = 2
VDU_MARK_UNHEALTHY_TIMEOUT = 500
VDU_MARK_UNHEALTHY_SLEEP_TIME = 3
VDU_AUTOHEALING_TIMEOUT = 500
VDU_AUTOHEALING_SLEEP_TIME = 3
VNF_CIRROS_PENDING_HEAL_TIMEOUT = 300
PENDING_SLEEP_TIME = 3

@ -0,0 +1,88 @@
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
description: Demo example
metadata:
template_name: sample-tosca-vnfd
topology_template:
node_templates:
VDU1:
type: tosca.nodes.nfv.VDU.Tacker
capabilities:
nfv_compute:
properties:
disk_size: 1 GB
mem_size: 512 MB
num_cpus: 1
properties:
image: cirros-0.4.0-x86_64-disk
mgmt_driver: noop
availability_zone: nova
mgmt_driver: noop
monitoring_policy:
name: ping
parameters:
monitoring_delay: 45
count: 3
interval: 1
timeout: 2
actions:
failure: vdu_autoheal
CP1:
type: tosca.nodes.nfv.CP.Tacker
properties:
management: true
anti_spoofing_protection: false
requirements:
- virtualLink:
node: VL1
- virtualBinding:
node: VDU1
VDU2:
type: tosca.nodes.nfv.VDU.Tacker
capabilities:
nfv_compute:
properties:
disk_size: 1 GB
mem_size: 512 MB
num_cpus: 1
properties:
image: cirros-0.4.0-x86_64-disk
mgmt_driver: noop
availability_zone: nova
mgmt_driver: noop
monitoring_policy:
name: ping
parameters:
monitoring_delay: 45
count: 3
interval: 1
timeout: 2
actions:
failure: vdu_autoheal
user_data_format: RAW
user_data: |
#!/bin/sh
echo "my hostname is `hostname`" > /tmp/hostname
df -h > /home/cirros/diskinfo
sleep 90
sudo ifdown eth0
CP2:
type: tosca.nodes.nfv.CP.Tacker
properties:
management: true
anti_spoofing_protection: false
requirements:
- virtualLink:
node: VL1
- virtualBinding:
node: VDU2
VL1:
type: tosca.nodes.nfv.VL
properties:
network_name: net_mgmt
vendor: Tacker

@ -0,0 +1,55 @@
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
description: Demo example
metadata:
template_name: sample-tosca-vnfd
topology_template:
node_templates:
VDU1:
type: tosca.nodes.nfv.VDU.Tacker
capabilities:
nfv_compute:
properties:
disk_size: 1 GB
mem_size: 512 MB
num_cpus: 1
properties:
image: cirros-0.4.0-x86_64-disk
mgmt_driver: noop
availability_zone: nova
mgmt_driver: noop
monitoring_policy:
name: ping
parameters:
monitoring_delay: 45
count: 3
interval: 1
timeout: 2
actions:
failure: vdu_autoheal
user_data_format: RAW
user_data: |
#!/bin/sh
echo "my hostname is `hostname`" > /tmp/hostname
df -h > /home/cirros/diskinfo
sleep 90
sudo ifdown eth0
CP1:
type: tosca.nodes.nfv.CP.Tacker
properties:
management: true
anti_spoofing_protection: false
requirements:
- virtualLink:
node: VL1
- virtualBinding:
node: VDU1
VL1:
type: tosca.nodes.nfv.VL
properties:
network_name: net_mgmt
vendor: Tacker

@ -119,6 +119,9 @@ class BaseTackerTest(base.BaseTestCase):
auth_ses = session.Session(auth=auth, verify=verify)
return glance_client.Client(session=auth_ses)
def get_vdu_resource(self, stack_id, res_name):
return self.h_client.resources.get(stack_id, res_name)
def wait_until_vnf_status(self, vnf_id, target_status, timeout,
sleep_interval):
start_time = int(time.time())
@ -140,6 +143,17 @@ class BaseTackerTest(base.BaseTestCase):
self.wait_until_vnf_status(vnf_id, 'ACTIVE', timeout,
sleep_interval)
def verify_vnf_update(self, vnf_id):
self.wait_until_vnf_status(vnf_id, 'ACTIVE',
constants.VNF_CIRROS_CREATE_TIMEOUT,
constants.ACTIVE_SLEEP_TIME)
self.wait_until_vnf_status(vnf_id, 'PENDING_HEAL',
constants.VNF_CIRROS_PENDING_HEAL_TIMEOUT,
constants.PENDING_SLEEP_TIME)
self.wait_until_vnf_status(vnf_id, 'ACTIVE',
constants.VNF_CIRROS_CREATE_TIMEOUT,
constants.ACTIVE_SLEEP_TIME)
def wait_until_vnf_delete(self, vnf_id, timeout):
start_time = int(time.time())
while True:

@ -20,7 +20,7 @@ from tacker.tests.utils import read_file
class VnfTestPingMonitor(base.BaseTackerTest):
def _test_vnf_with_monitoring(self, vnfd_file, vnf_name):
def _vnfd_and_vnf_create(self, vnfd_file, vnf_name):
data = dict()
data['tosca'] = read_file(vnfd_file)
toscal = data['tosca']
@ -36,6 +36,15 @@ class VnfTestPingMonitor(base.BaseTackerTest):
vnf_arg = {'vnf': {'vnfd_id': vnfd_id, 'name': vnf_name}}
vnf_instance = self.client.create_vnf(body=vnf_arg)
# Delete vnfd_instance
self.addCleanup(self.client.delete_vnfd, vnfd_id)
return vnfd_instance, vnf_instance
def _test_vnf_with_monitoring(self, vnfd_file, vnf_name):
vnfd_instance, vnf_instance = self._vnfd_and_vnf_create(vnfd_file,
vnf_name)
# Verify vnf goes from ACTIVE->DEAD->ACTIVE states
self.verify_vnf_restart(vnfd_instance, vnf_instance)
@ -51,8 +60,6 @@ class VnfTestPingMonitor(base.BaseTackerTest):
vnf_state_list = [evt_constants.ACTIVE, evt_constants.DEAD]
self.verify_vnf_monitor_events(vnf_id, vnf_state_list)
# Delete vnfd_instance
self.addCleanup(self.client.delete_vnfd, vnfd_id)
self.addCleanup(self.wait_until_vnf_delete, vnf_id,
constants.VNF_CIRROS_DELETE_TIMEOUT)
@ -65,3 +72,39 @@ class VnfTestPingMonitor(base.BaseTackerTest):
self._test_vnf_with_monitoring(
'sample-tosca-vnfd-multi-vdu-monitoring.yaml',
'ping monitor multi vdu vnf with tosca template')
def _test_vnf_with_monitoring_vdu_autoheal_action(
self, vnfd_file, vnf_name):
vnfd_instance, vnf_instance = self._vnfd_and_vnf_create(vnfd_file,
vnf_name)
vnf_id = vnf_instance['vnf']['id']
self.verify_vnf_update(vnf_id)
# Delete vnf_instance with vnf_id
try:
self.client.delete_vnf(vnf_id)
except Exception:
assert False, ("Failed to delete vnf %s after the monitor test" %
vnf_id)
self.addCleanup(self.wait_until_vnf_delete, vnf_id,
constants.VNF_CIRROS_DELETE_TIMEOUT)
params = {'resource_id': vnf_id,
'resource_state': 'PENDING_UPDATE',
'event_type': evt_constants.RES_EVT_MONITOR}
vnf_events = self.client.list_vnf_events(**params)
# Check if vdu_autoheal action emits 4 monitoring events.
self.assertGreaterEqual(4, len(vnf_events['vnf_events']))
def test_vnf_monitoring_with_vdu_autoheal_action_for_multi_vdu(self):
self._test_vnf_with_monitoring_vdu_autoheal_action(
'sample-tosca-vnfd-multi-vdu-monitoring-vdu-autoheal.yaml',
'ping multi vdu monitor having vdu_autoheal failure action '
'with tosca template')
def test_vnf_monitoring_with_vdu_autoheal_action_for_single_vdu(self):
self._test_vnf_with_monitoring_vdu_autoheal_action(
'sample-tosca-vnfd-single-vdu-monitoring-vdu-autoheal.yaml',
'ping vdu monitor having vdu_autoheal failure action '
'with tosca template')

@ -0,0 +1,61 @@
# Copyright 2018 NTT DATA.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from tacker.objects import base
from tacker.objects import fields
from tacker.tests.unit import base as test_base
class TestString(test_base.TestCase):
def setUp(self):
super(TestString, self).setUp()
self.field = fields.StringField()
self.coerce_good_values = [('foo', 'foo'), (1, '1'), (True, 'True')]
if six.PY2:
self.coerce_good_values.append((int(1), '1'))
self.coerce_bad_values = [None]
def test_stringify(self):
self.assertEqual("'123'", self.field.stringify(123))
class TestListOfStrings(test_base.TestCase):
def setUp(self):
super(TestListOfStrings, self).setUp()
self.field = fields.ListOfStringsField()
def test_list_of_string(self):
self.assertEqual("['abc']", self.field.stringify(['abc']))
class TestListOfObjects(test_base.TestCase):
def test_list_of_obj(self):
@base.TackerObjectRegistry.register_if(False)
class MyObjElement(base.TackerObject):
fields = {'foo': fields.StringField()}
def __init__(self, foo):
super(MyObjElement, self).__init__()
self.foo = foo
@base.TackerObjectRegistry.register_if(False)
class MyList(base.TackerObject):
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList()
mylist.objects = [MyObjElement('a'), MyObjElement('b')]
self.assertEqual(['a', 'b'], [x.foo for x in mylist.objects])

@ -20,12 +20,63 @@ import os
import yaml
from tacker import context
from tacker.db.common_services import common_services_db_plugin
from tacker.extensions import vnfm
from tacker.tests.unit import base
from tacker.tests.unit.db import utils
from tacker.vnfm.infra_drivers.openstack import openstack
vnf_dict = {
'attributes': {
'heat_template': {
'outputs': {
'mgmt_ip-VDU1': {
'value': {
'get_attr': [
'CP1', 'fixed_ips', 0, 'ip_address']
}
}
},
'description': 'Demo example\n',
'parameters': {},
'resources': {
'VDU1': {
'type': 'OS::Nova::Server',
'properties': {
'user_data_format': 'SOFTWARE_CONFIG',
'availability_zone': 'nova',
'image': 'cirros-0.4.0-x86_64-disk',
'config_drive': False,
'flavor': {'get_resource': 'VDU1_flavor'},
'networks': [{'port': {'get_resource': 'CP1'}}]
}
},
'CP1': {
'type': 'OS::Neutron::Port',
'properties': {
'port_security_enabled': False,
'network': 'net_mgmt'
}
},
'VDU1_flavor': {
'type': 'OS::Nova::Flavor',
'properties': {'vcpus': 1, 'disk': 1, 'ram': 512}
}
}
}
},
'status': 'ACTIVE',
'vnfd_id': '576acf48-b9df-491d-a57c-342de660ec78',
'tenant_id': '13d2ca8de70d48b2a2e0dbac2c327c0b',
'vim_id': '3f41faa7-5630-47d2-9d4a-1216953c8887',
'instance_id': 'd1121d3c-368b-4ac2-b39d-835aa3e4ccd8',
'placement_attr': {'vim_name': 'openstack-vim'},
'id': 'a27fc58e-66ae-4031-bba4-efede318c60b',
'name': 'vnf_create_1'
}
class FakeHeatClient(mock.Mock):
class Stack(mock.Mock):
@ -59,6 +110,11 @@ class TestOpenStack(base.TestCase):
self.context = context.get_admin_context()
self.infra_driver = openstack.OpenStack()
self._mock_heat_client()
mock.patch('tacker.db.common_services.common_services_db_plugin.'
'CommonServicesPluginDb.create_event'
).start()
self._cos_db_plugin = \
common_services_db_plugin.CommonServicesPluginDb()
self.addCleanup(mock.patch.stopall)
def _mock_heat_client(self):
@ -181,6 +237,25 @@ class TestOpenStack(base.TestCase):
'config'])
self.assertEqual(expected_vnf_update, vnf_obj)
@mock.patch(
'tacker.vnfm.infra_drivers.openstack.vdu.Vdu')
def test_heal_vdu(self, mock_vdu):
self.infra_driver.heal_vdu(None, self.context, vnf_dict,
mock.ANY)
mock_vdu.assert_called_once_with(self.context, vnf_dict,
mock.ANY)
@mock.patch(
'tacker.vnfm.infra_drivers.openstack.vdu.Vdu')
@mock.patch('tacker.vnfm.infra_drivers.openstack.openstack.LOG')
def test_heal_vdu_failed(self, mock_log, mock_vdu):
mock_vdu.side_effect = Exception
self.assertRaises(vnfm.VNFHealFailed, self.infra_driver.heal_vdu,
None, self.context, vnf_dict,
mock.ANY)
mock_log.error.assert_called_with(
"VNF '%s' failed to heal", vnf_dict['id'])
def _get_expected_fields_tosca(self, template):
return {'stack_name':
'test_openwrt_eb84260e-5ff7-4332-b032-50a14d6c1123',

@ -0,0 +1,162 @@
# Copyright 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from tacker import context
from tacker.db.common_services import common_services_db_plugin
from tacker.objects import heal_vnf_request
from tacker.plugins.common import constants
from tacker.tests.unit import base
from tacker.vnfm.infra_drivers.openstack import vdu
vnf_dict = {
'attributes': {
'heat_template': {
'outputs': {
'mgmt_ip-VDU1': {
'value': {
'get_attr': [
'CP1', 'fixed_ips', 0, 'ip_address']
}
}
},
'description': 'Demo example\n',
'parameters': {},
'resources': {
'VDU1': {
'type': 'OS::Nova::Server',
'properties': {
'user_data_format': 'SOFTWARE_CONFIG',
'availability_zone': 'nova',
'image': 'cirros-0.4.0-x86_64-disk',
'config_drive': False,
'flavor': {'get_resource': 'VDU1_flavor'},
'networks': [{'port': {'get_resource': 'CP1'}}]
}
},
'CP1': {
'type': 'OS::Neutron::Port',
'properties': {
'port_security_enabled': False,
'network': 'net_mgmt'
}
},
'VDU1_flavor': {
'type': 'OS::Nova::Flavor',
'properties': {'vcpus': 1, 'disk': 1, 'ram': 512}
}
}
}
},
'status': 'ACTIVE',
'vnfd_id': '576acf48-b9df-491d-a57c-342de660ec78',
'tenant_id': '13d2ca8de70d48b2a2e0dbac2c327c0b',
'vim_id': '3f41faa7-5630-47d2-9d4a-1216953c8887',
'instance_id': 'd1121d3c-368b-4ac2-b39d-835aa3e4ccd8',
'placement_attr': {'vim_name': 'openstack-vim'},
'id': 'a27fc58e-66ae-4031-bba4-efede318c60b',
'name': 'vnf_create_1'
}
class FakeHeatClient(mock.Mock):
class Stack(mock.Mock):
stack_status = 'CREATE_COMPLETE'
outputs = [{u'output_value': u'192.168.120.31', u'description':
u'management ip address', u'output_key': u'mgmt_ip-vdu1'}]
def create(self, *args, **kwargs):
return {'stack': {'id': '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}}
def get(self, id):
return self.Stack()
def update(self, stack_id, **kwargs):
return self.Stack()
def resource_mark_unhealthy(self, stack_id, resource_name,
mark_unhealthy, resource_status_reason):
return self.Stack()
class TestVDU(base.TestCase):
def setUp(self):
super(TestVDU, self).setUp()
self.context = context.get_admin_context()
self._mock_heat_client()
mock.patch('tacker.vnfm.vim_client.VimClient.get_vim').start()
self.additional_paramas_obj = heal_vnf_request.HealVnfAdditionalParams(
parameter='VDU1',
cause=["Unable to reach while monitoring resource: 'VDU1'"])
self.heal_request_data_obj = heal_vnf_request.HealVnfRequest(
cause='VNF monitoring fails.',
additional_params=[self.additional_paramas_obj])
self.heal_vdu = vdu.Vdu(self.context, vnf_dict,
self.heal_request_data_obj)
mock.patch('tacker.db.common_services.common_services_db_plugin.'
'CommonServicesPluginDb.create_event'
).start()
self._cos_db_plugin = \
common_services_db_plugin.CommonServicesPluginDb()
self.addCleanup(mock.patch.stopall)
def _mock_heat_client(self):
self.heat_client = mock.Mock(wraps=FakeHeatClient())
fake_heat_client = mock.Mock()
fake_heat_client.return_value = self.heat_client
self._mock(
'tacker.vnfm.infra_drivers.openstack.heat_client.HeatClient',
fake_heat_client)
@mock.patch('tacker.vnfm.vim_client.VimClient.get_vim')
def test_heal_vdu(self, mock_get_vim):
mock_get_vim.return_value = mock.MagicMock()
self.heal_vdu.heal_vdu()
self.heat_client.update.assert_called_once_with(
stack_id=vnf_dict['instance_id'], existing=True)
self._cos_db_plugin.create_event.assert_called_with(
self.context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF, res_state=vnf_dict['status'],
evt_type=constants.RES_EVT_HEAL, tstamp=mock.ANY,
details=("HealVnfRequest invoked to update the stack '%s'" %
vnf_dict['instance_id']))
@mock.patch('tacker.vnfm.vim_client.VimClient.get_vim')
def test_resource_mark_unhealthy(self, mock_get_vim):
mock_get_vim.return_value = mock.MagicMock()
self.heal_vdu._resource_mark_unhealthy()
self.heat_client.resource_mark_unhealthy.assert_called_once_with(
stack_id=vnf_dict['instance_id'],
resource_name=self.additional_paramas_obj.parameter,
mark_unhealthy=True,
resource_status_reason=self.additional_paramas_obj.cause)
self._cos_db_plugin.create_event.assert_called_with(
self.context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF, res_state=vnf_dict['status'],
evt_type=constants.RES_EVT_HEAL, tstamp=mock.ANY,
details="HealVnfRequest invoked to mark resource 'VDU1' "
"to unhealthy.")

@ -0,0 +1,147 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from datetime import datetime
import mock
from oslo_utils import uuidutils
from tacker import context
from tacker.db.nfvo import nfvo_db
from tacker.objects import heal_vnf_request
from tacker.tests.unit.db import base as db_base
from tacker.vnfm import plugin
from tacker.vnfm.policy_actions.vdu_autoheal import vdu_autoheal
vnf_dict = {
'id': uuidutils.generate_uuid(),
'mgmt_url': '{"VDU1": "a.b.c.d"}',
'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
'instance_id': 'a737497c-761c-11e5-89c3-9cb6541d805d',
'attributes': {
'heat_template': {
'resources': {
'VDU1': {
'properties': {
'networks': [{'port': {'get_resource': 'CP1'}}]}
}
}
}
}
}
class FakeDriverManager(mock.Mock):
def invoke(self, *args, **kwargs):
if 'create' in args:
return uuidutils.generate_uuid()
if 'get_resource_info' in args:
return {'resources': {'name': 'dummy_vnf',
'type': 'dummy',
'id': uuidutils.generate_uuid()}}
class FakeVNFMonitor(mock.Mock):
pass
class TestVNFActionVduAutoheal(db_base.SqlTestCase):
def setUp(self):
super(TestVNFActionVduAutoheal, self).setUp()
self.context = context.get_admin_context()
self._mock_device_manager()
self._mock_vnf_monitor()
self._insert_dummy_vim()
self.vnfm_plugin = plugin.VNFMPlugin()
self.vdu_autoheal = vdu_autoheal.VNFActionVduAutoheal()
self.addCleanup(mock.patch.stopall)
def _mock_device_manager(self):
self._device_manager = mock.Mock(wraps=FakeDriverManager())
self._device_manager.__contains__ = mock.Mock(
return_value=True)
fake_device_manager = mock.Mock()
fake_device_manager.return_value = self._device_manager
self._mock(
'tacker.common.driver_manager.DriverManager', fake_device_manager)
def _mock_vnf_monitor(self):
self._vnf_monitor = mock.Mock(wraps=FakeVNFMonitor())
fake_vnf_monitor = mock.Mock()
fake_vnf_monitor.return_value = self._vnf_monitor
self._mock(
'tacker.vnfm.monitor.VNFMonitor', fake_vnf_monitor)
def _insert_dummy_vim(self):
session = self.context.session
vim_db = nfvo_db.Vim(
id='6261579e-d6f3-49ad-8bc3-a9cb974778ff',
tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
name='fake_vim',
description='fake_vim_description',
type='test_vim',
status='Active',
deleted_at=datetime.min,
placement_attr={'regions': ['RegionOne']})
vim_auth_db = nfvo_db.VimAuth(
vim_id='6261579e-d6f3-49ad-8bc3-a9cb974778ff',
password='encrypted_pw',
auth_url='http://localhost:5000',
vim_project={'name': 'test_project'},
auth_cred={'username': 'test_user', 'user_domain_id': 'default',
'project_domain_id': 'default'})
session.add(vim_db)
session.add(vim_auth_db)
session.flush()
@mock.patch('tacker.vnfm.plugin.VNFMPlugin.heal_vnf')
@mock.patch('yaml.safe_load')
@mock.patch('tacker.objects.HealVnfRequest')
def test_vdu_autoheal_execute_action(self, mock_heal_vnf_request,
mock_safe_load,
mock_heal_vnf):
# Here yaml.safe_load is mock as in the test case i am passing
# vnf_dict containing having vnf_dict['attributes']['heat_template']
# value in json format so while excution it giving the error as
# dict object has no read attribute where as in actual execution the
# value of vnf_dict['attributes']['heat_template'] is in ymal format.
mock_safe_load.return_value = vnf_dict['attributes']['heat_template']
resource_list = ['VDU1', 'CP1']
additional_params = []
for resource in resource_list:
additional_paramas_obj = heal_vnf_request.HealVnfAdditionalParams(
parameter=resource,
cause=["Unable to reach while monitoring resource: '%s'" %
resource])
additional_params.append(additional_paramas_obj)
heal_request_data_obj = heal_vnf_request.HealVnfRequest(
cause='VNF monitoring fails.',
additional_params=additional_params)
mock_heal_vnf_request.return_value = heal_request_data_obj
self.vdu_autoheal.execute_action(self.vnfm_plugin, self.context,
vnf_dict, args={'vdu_name': 'VDU1'})
mock_heal_vnf.assert_called_once_with(self.context, vnf_dict['id'],
heal_request_data_obj)
@mock.patch('tacker.vnfm.policy_actions.vdu_autoheal.'
'vdu_autoheal.LOG')
def test_vdu_autoheal_action_with_no_vdu_name(self, mock_log):
expected_error_msg = ("VDU resource of vnf '%s' is not present for "
"autoheal." % vnf_dict['id'])
self.vdu_autoheal.execute_action(self.vnfm_plugin, self.context,
vnf_dict, args={})
mock_log.error.assert_called_with(expected_error_msg)

@ -50,6 +50,33 @@ MOCK_VNF = {
}
MOCK_VNF_DEVICE_FOR_VDU_AUTOHEAL = {
'id': MOCK_VNF_ID,
'management_ip_addresses': {
'vdu1': 'a.b.c.d'
},
'monitoring_policy': {
'vdus': {
'vdu1': {
'ping': {
'actions': {
'failure': 'vdu_autoheal'
},
'monitoring_params': {
'count': 1,
'monitoring_delay': 0,
'interval': 0,
'timeout': 2
}
}
}
}
},
'boot_at': timeutils.utcnow(),
'action_cb': mock.MagicMock()
}
class TestVNFMonitor(testtools.TestCase):
def setUp(self):
@ -112,7 +139,7 @@ class TestVNFMonitor(testtools.TestCase):
@mock.patch('tacker.vnfm.monitor.VNFMonitor.__run__')
def test_run_monitor(self, mock_monitor_run):
test_hosting_vnf = MOCK_VNF
test_hosting_vnf['vnf'] = {}
test_hosting_vnf['vnf'] = {'status': 'ACTIVE'}
test_boot_wait = 30
mock_kwargs = {
'count': 1,
@ -125,6 +152,55 @@ class TestVNFMonitor(testtools.TestCase):
self.mock_monitor_manager.invoke = mock.MagicMock()
test_vnfmonitor._monitor_manager = self.mock_monitor_manager
test_vnfmonitor.run_monitor(test_hosting_vnf)
self.mock_monitor_manager\
.invoke.assert_called_once_with('ping', 'monitor_call', vnf={},
self.mock_monitor_manager \
.invoke.assert_called_once_with('ping', 'monitor_call',
vnf={'status': 'ACTIVE'},
kwargs=mock_kwargs)
@mock.patch('tacker.vnfm.monitor.VNFMonitor.__run__')
@mock.patch('tacker.vnfm.monitor.VNFMonitor.monitor_call')
def test_vdu_autoheal_action(self, mock_monitor_call, mock_monitor_run):
test_hosting_vnf = MOCK_VNF_DEVICE_FOR_VDU_AUTOHEAL
test_boot_wait = 30
test_device_dict = {
'status': 'ACTIVE',
'id': MOCK_VNF_ID,
'mgmt_url': '{"vdu1": "a.b.c.d"}',
'attributes': {
'monitoring_policy': json.dumps(
MOCK_VNF_DEVICE_FOR_VDU_AUTOHEAL['monitoring_policy'])
}
}
test_hosting_vnf['vnf'] = test_device_dict
mock_monitor_call.return_value = 'failure'
test_vnfmonitor = monitor.VNFMonitor(test_boot_wait)
test_vnfmonitor._monitor_manager = self.mock_monitor_manager
test_vnfmonitor.run_monitor(test_hosting_vnf)
test_hosting_vnf['action_cb'].assert_called_once_with(
'vdu_autoheal', vdu_name='vdu1')
@mock.patch('tacker.vnfm.monitor.VNFMonitor.__run__')
def test_update_hosting_vnf(self, mock_monitor_run):
test_boot_wait = 30
test_vnfmonitor = monitor.VNFMonitor(test_boot_wait)
vnf_dict = {
'id': MOCK_VNF_ID,
'mgmt_url': '{"vdu1": "a.b.c.d"}',
'management_ip_addresses': 'a.b.c.d',
'vnf': {
'id': MOCK_VNF_ID,
'mgmt_url': '{"vdu1": "a.b.c.d"}',
'attributes': {
'monitoring_policy': json.dumps(
MOCK_VNF['monitoring_policy'])
},
'status': 'ACTIVE',
}
}
test_vnfmonitor.add_hosting_vnf(vnf_dict)
vnf_dict['status'] = 'PENDING_HEAL'
test_vnfmonitor.update_hosting_vnf(vnf_dict)
test_device_status = test_vnfmonitor._hosting_vnfs[MOCK_VNF_ID][
'vnf']['status']
self.assertEqual('PENDING_HEAL', test_device_status)

@ -27,6 +27,7 @@ from tacker.db.nfvo import nfvo_db
from tacker.db.nfvo import ns_db
from tacker.db.vnfm import vnfm_db
from tacker.extensions import vnfm
from tacker.objects import heal_vnf_request
from tacker.plugins.common import constants
from tacker.tests.unit.db import base as db_base
from tacker.tests.unit.db import utils
@ -622,3 +623,32 @@ class TestVNFMPlugin(db_base.SqlTestCase):
self.context,
uuidutils.generate_uuid(),
policy_type='invalid_policy_type')
@mock.patch('tacker.vnfm.monitor.VNFMonitor.update_hosting_vnf')
def test_heal_vnf_vdu(self, mock_update_hosting_vnf):
self._insert_dummy_vnf_template()
dummy_device_obj = self._insert_dummy_vnf()
additional_params_obj = heal_vnf_request.HealVnfAdditionalParams(
parameter='VDU1',
cause=["Unable to reach while monitoring resource: 'VDU1'"])
heal_request_data_obj = heal_vnf_request.HealVnfRequest(
cause='VNF monitoring fails.',
additional_params=[additional_params_obj])
result = self.vnfm_plugin.heal_vnf(self.context,
dummy_device_obj['id'],
heal_request_data_obj)
self.assertIsNotNone(result)
self.assertEqual(dummy_device_obj['id'], result['id'])
self.assertIn('instance_id', result)
self.assertIn('status', result)
self.assertIn('attributes', result)
self.assertIn('mgmt_url', result)
self.assertIn('updated_at', result)
self._vnf_manager.invoke.assert_called_with(
'test_vim', 'heal_vdu', plugin=self.vnfm_plugin,
context=self.context, vnf_dict=mock.ANY,
heal_request_data_obj=heal_request_data_obj)
self._pool.spawn_n.assert_called_once_with(
self.vnfm_plugin._update_vnf_wait, self.context, mock.ANY,
mock.ANY, 'test_vim', vnf_heal=True)

@ -53,7 +53,7 @@ class VnfAbstractDriver(extensions.PluginInterface):
pass
@abc.abstractmethod
def update_wait(self, plugin, context, vnf_id):
def update_wait(self, plugin, context, vnf_dict):
pass
@abc.abstractmethod
@ -69,3 +69,7 @@ class VnfAbstractDriver(extensions.PluginInterface):
region_name=None):
'''Fetches optional details of a VNF'''
pass
@abc.abstractmethod
def heal_vdu(self, plugin, context, vnf_dict, heal_request_data):
pass

@ -545,3 +545,6 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
if file_descriptor is not None:
file_path = vim_auth.pop('ca_cert_file')
self.kubernetes.close_tmp_file(file_descriptor, file_path)
def heal_vdu(self, plugin, context, vnf_dict, heal_request_data):
pass

@ -73,3 +73,6 @@ class VnfNoop(abstract_driver.VnfAbstractDriver):
def get_resource_info(self, plugin, context, vnf_info, auth_attr,
region_name=None):
return {'noop': {'id': uuidutils.generate_uuid(), 'type': 'noop'}}
def heal_vdu(self, plugin, context, vnf_dict, heal_request_data):
pass

@ -0,0 +1,23 @@
# Copyright 2018 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# openstack infra constants
STACK_CREATE_IN_PROGRESS = "CREATE_IN_PROGRESS"
STACK_CREATE_COMPLETE = "CREATE_COMPLETE"
STACK_UPDATE_IN_PROGRESS = "UPDATE_IN_PROGRESS"
STACK_UPDATE_COMPLETE = "UPDATE_COMPLETE"
STACK_DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS"
STACK_DELETE_COMPLETE = "DELETE_COMPLETE"

@ -53,6 +53,9 @@ class HeatClient(object):
def get(self, stack_id):
return self.stacks.get(stack_id)