diff --git a/tacker/extensions/vnfm.py b/tacker/extensions/vnfm.py index a42fe4fbf..75cbaf78a 100644 --- a/tacker/extensions/vnfm.py +++ b/tacker/extensions/vnfm.py @@ -71,6 +71,10 @@ class VNFCreateWaitFailed(exceptions.TackerException): message = _('%(reason)s') +class VNFScaleWaitFailed(exceptions.TackerException): + message = _('%(reason)s') + + class VNFDeleteFailed(exceptions.TackerException): message = _('deleting VNF %(vnf_id)s failed') diff --git a/tacker/tests/constants.py b/tacker/tests/constants.py index 3e14a3a88..cce506260 100644 --- a/tacker/tests/constants.py +++ b/tacker/tests/constants.py @@ -3,3 +3,4 @@ VNF_CIRROS_DELETE_TIMEOUT = 300 VNF_CIRROS_DEAD_TIMEOUT = 250 ACTIVE_SLEEP_TIME = 3 DEAD_SLEEP_TIME = 1 +SCALE_WINDOW_SLEEP_TIME = 120 diff --git a/tacker/tests/etc/samples/sample-tosca-scale-all.yaml b/tacker/tests/etc/samples/sample-tosca-scale-all.yaml new file mode 100644 index 000000000..ffb5567d0 --- /dev/null +++ b/tacker/tests/etc/samples/sample-tosca-scale-all.yaml @@ -0,0 +1,50 @@ + +tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0 + +description: sample-tosca-vnfd-scaling + +metadata: + template_name: sample-tosca-vnfd-scaling + +topology_template: + node_templates: + VDU1: + type: tosca.nodes.nfv.VDU.Tacker + capabilities: + nfv_compute: + properties: + num_cpus: 1 + mem_size: 512 MB + disk_size: 1 GB + properties: + image: cirros-0.3.4-x86_64-uec + mgmt_driver: noop + availability_zone: nova + + CP1: + type: tosca.nodes.nfv.CP.Tacker + properties: + management: true + anti_spoofing_protection: false + requirements: + - virtualLink: + node: VL1 + - virtualBinding: + node: VDU1 + + VL1: + type: tosca.nodes.nfv.VL + properties: + network_name: net_mgmt + vendor: Tacker + + policies: + - SP1: + type: tosca.policy.tacker.Scaling + properties: + increment: 1 + cooldown: 60 + min_instances: 1 + max_instances: 3 + default_instances: 2 + targets: [VDU1] diff --git a/tacker/tests/functional/vnfm/test_tosca_vnf_scale.py b/tacker/tests/functional/vnfm/test_tosca_vnf_scale.py new file mode 100644 index 000000000..6130a8468 --- /dev/null +++ b/tacker/tests/functional/vnfm/test_tosca_vnf_scale.py @@ -0,0 +1,86 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import time + +from oslo_config import cfg + +from tacker.tests import constants +from tacker.tests.functional import base +from tacker.tests.utils import read_file + + +CONF = cfg.CONF + + +class VnfTestToscaScale(base.BaseTackerTest): + def test_vnf_tosca_scale(self): + data = dict() + data['tosca'] = read_file('sample-tosca-scale-all.yaml') + vnfd_name = 'test_tosca_vnf_scale_all' + toscal = data['tosca'] + tosca_arg = {'vnfd': {'name': vnfd_name, + 'attributes': {'vnfd': toscal}}} + + # Create vnfd with tosca template + vnfd_instance = self.client.create_vnfd(body=tosca_arg) + self.assertIsNotNone(vnfd_instance) + + # Create vnf with vnfd_id + vnfd_id = vnfd_instance['vnfd']['id'] + vnf_name = 'test_tosca_vnf_scale_all' + vnf_arg = {'vnf': {'vnfd_id': vnfd_id, 'name': vnf_name}} + vnf_instance = self.client.create_vnf(body=vnf_arg) + + self.validate_vnf_instance(vnfd_instance, vnf_instance) + + vnf_id = vnf_instance['vnf']['id'] + + # TODO(kanagaraj-manickam) once load-balancer support is enabled, + # update this logic to validate the scaling + def _wait(count): + self.wait_until_vnf_active( + vnf_id, + constants.VNF_CIRROS_CREATE_TIMEOUT, + constants.ACTIVE_SLEEP_TIME) + vnf = self.client.show_vnf(vnf_id)['vnf'] + + # {"VDU1": ["10.0.0.14", "10.0.0.5"]} + self.assertEqual(count, len(json.loads(vnf['mgmt_url'])['VDU1'])) + + _wait(2) + + def _scale(type, count): + body = {"scale": {'type': type, 'policy': 'SP1'}} + self.client.scale_vnf(vnf_id, body) + _wait(count) + + # scale out + time.sleep(constants.SCALE_WINDOW_SLEEP_TIME) + _scale('out', 3) + + # scale in + time.sleep(constants.SCALE_WINDOW_SLEEP_TIME) + _scale('in', 2) + + # Delete vnf_instance with vnf_id + try: + self.client.delete_vnf(vnf_id) + except Exception: + assert False, "vnf Delete failed" + + # Delete vnfd_instance + self.addCleanup(self.client.delete_vnfd, vnfd_id) + self.addCleanup(self.wait_until_vnf_delete, vnf_id, + constants.VNF_CIRROS_DELETE_TIMEOUT) diff --git a/tacker/vm/plugin.py b/tacker/vm/plugin.py index ebbe4c425..335e04f7c 100644 --- a/tacker/vm/plugin.py +++ b/tacker/vm/plugin.py @@ -469,7 +469,7 @@ class VNFMPlugin(vm_db.VNFMPluginDb, VNFMMgmtMixin): policy=policy['id'] ) - LOG.debug(_("Policy %s is validated successfully") % policy) + LOG.debug(_("Policy %s is validated successfully"), policy['id']) def _get_status(): if policy['action'] == constants.ACTION_SCALE_IN: @@ -487,7 +487,7 @@ class VNFMPlugin(vm_db.VNFMPluginDb, VNFMMgmtMixin): [constants.ACTIVE], status) LOG.debug(_("Policy %(policy)s vnf is at %(status)s"), - {'policy': policy, + {'policy': policy['id'], 'status': status}) return result @@ -500,14 +500,14 @@ class VNFMPlugin(vm_db.VNFMPluginDb, VNFMMgmtMixin): new_status, mgmt_url) LOG.debug(_("Policy %(policy)s vnf is at %(status)s"), - {'policy': policy, + {'policy': policy['id'], 'status': new_status}) return result # action def _vnf_policy_action(): try: - self._vnf_manager.invoke( + last_event_id = self._vnf_manager.invoke( infra_driver, 'scale', plugin=self, @@ -516,24 +516,25 @@ class VNFMPlugin(vm_db.VNFMPluginDb, VNFMMgmtMixin): policy=policy, region_name=region_name ) - LOG.debug(_("Policy %s action is started successfully") % - policy) + LOG.debug(_("Policy %s action is started successfully"), + policy['id']) + return last_event_id except Exception as e: - LOG.error(_("Policy %s action is failed to start") % + LOG.error(_("Policy %s action is failed to start"), policy) with excutils.save_and_reraise_exception(): vnf['status'] = constants.ERROR self.set_vnf_error_status_reason( context, - policy['vnf_id'], + policy['vnf']['id'], six.text_type(e)) _handle_vnf_scaling_post(constants.ERROR) # wait def _vnf_policy_action_wait(): try: - LOG.debug(_("Policy %s action is in progress") % - policy) + LOG.debug(_("Policy %s action is in progress"), + policy['id']) mgmt_url = self._vnf_manager.invoke( infra_driver, 'scale_wait', @@ -541,19 +542,20 @@ class VNFMPlugin(vm_db.VNFMPluginDb, VNFMMgmtMixin): context=context, auth_attr=vim_auth, policy=policy, - region_name=region_name + region_name=region_name, + last_event_id=last_event_id ) - LOG.debug(_("Policy %s action is completed successfully") % - policy) + LOG.debug(_("Policy %s action is completed successfully"), + policy['id']) _handle_vnf_scaling_post(constants.ACTIVE, mgmt_url) # TODO(kanagaraj-manickam): Add support for config and mgmt except Exception as e: LOG.error(_("Policy %s action is failed to complete") % - policy) + policy['id']) with excutils.save_and_reraise_exception(): self.set_vnf_error_status_reason( context, - policy['vnf_id'], + policy['vnf']['id'], six.text_type(e)) _handle_vnf_scaling_post(constants.ERROR) @@ -565,7 +567,7 @@ class VNFMPlugin(vm_db.VNFMPluginDb, VNFMMgmtMixin): infra_driver = self._infra_driver_name(vnf) vim_auth = self.get_vim(context, vnf) region_name = vnf.get('placement_attr', {}).get('region_name', None) - _vnf_policy_action() + last_event_id = _vnf_policy_action() self.spawn_n(_vnf_policy_action_wait) return policy diff --git a/tacker/vnfm/infra_drivers/heat/heat.py b/tacker/vnfm/infra_drivers/heat/heat.py index c31f84e70..5991ead4f 100644 --- a/tacker/vnfm/infra_drivers/heat/heat.py +++ b/tacker/vnfm/infra_drivers/heat/heat.py @@ -777,12 +777,12 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver, mgmt_ips = {} for group_name in group_names: + # Get scale group grp = heat_client.resource_get(instance_id, group_name) - # Get scale group for rsc in heat_client.resource_get_list( grp.physical_resource_id): - # Get list of resoruces in scale group + # Get list of resources in scale group scale_rsc = heat_client.resource_get( grp.physical_resource_id, rsc.resource_name) @@ -804,11 +804,21 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver, policy, region_name): heatclient_ = HeatClient(auth_attr, region_name) - return heatclient_.resource_signal(policy['instance_id'], - get_scaling_policy_name( + policy_rsc = get_scaling_policy_name( policy_name=policy['id'], action=policy['action'] - )) + ) + events = heatclient_.resource_event_list( + policy['instance_id'], + policy_rsc, + limit=1, + sort_dir='desc', + sort_keys='event_time' + ) + + heatclient_.resource_signal(policy['instance_id'], + policy_rsc) + return events[0].id @log.log def scale_wait(self, @@ -816,29 +826,59 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver, plugin, auth_attr, policy, - region_name): + region_name, + last_event_id): heatclient_ = HeatClient(auth_attr, region_name) # TODO(kanagaraj-manickam) make wait logic into separate utility method # and make use of it here and other actions like create and delete + stack_retries = STACK_RETRIES while (True): - time.sleep(STACK_RETRY_WAIT) try: - rsc = heatclient_.resource_get( - policy['instance_id'], - get_scaling_policy_name(policy_name=policy['id'], - action=policy['action'])) - except Exception: - LOG.exception(_("VNF scaling may not have " - "happened because Heat API request failed " - "while waiting for the stack %(stack)s to be " - "scaled"), {'stack': policy['instance_id']}) - break + time.sleep(STACK_RETRY_WAIT) + stack_id = policy['instance_id'] + policy_name = get_scaling_policy_name( + policy_name=policy['id'], + action=policy['action']) + events = heatclient_.resource_event_list( + stack_id, + policy_name, + limit=1, + sort_dir='desc', + sort_keys='event_time') - if rsc.resource_status == 'SIGNAL_IN_PROGRESS': - continue + if events[0].id != last_event_id: + if events[0].resource_status == 'SIGNAL_COMPLETE': + break + except Exception as e: + error_reason = _("VNF scaling failed for stack %(stack)s with " + "error %(error)s") % { + 'stack': policy['instance_id'], + 'error': e.message + } + LOG.warning(error_reason) + raise vnfm.VNFScaleWaitFailed( + vnf_id=policy['vnf']['id'], + reason=error_reason) - break + if stack_retries == 0: + metadata = heatclient_.resource_metadata(stack_id, policy_name) + if not metadata['scaling_in_progress']: + error_reason = _('when signal occurred within cool down ' + 'window, no events generated from heat, ' + 'so ignore it') + LOG.warning(error_reason) + break + error_reason = _( + "VNF scaling failed to complete within %{wait}s seconds " + "while waiting for the stack %(stack)s to be " + "scaled.") % {'stack': stack_id, + 'wait': STACK_RETRIES * STACK_RETRY_WAIT} + LOG.warning(error_reason) + raise vnfm.VNFScaleWaitFailed( + vnf_id=policy['vnf']['id'], + reason=error_reason) + stack_retries -= 1 def _fill_scaling_group_name(): vnf = policy['vnf'] @@ -916,3 +956,9 @@ class HeatClient(object): def resource_get(self, stack_id, rsc_name): return self.heat.resources.get(stack_id, rsc_name) + + def resource_event_list(self, stack_id, rsc_name, **kwargs): + return self.heat.events.list(stack_id, rsc_name, **kwargs) + + def resource_metadata(self, stack_id, rsc_name): + return self.heat.resources.metadata(stack_id, rsc_name)