Refactor scaling and monitoring policies in Tacker

1. scaling and monitoring policy nodes got failed

From tosca-parser 0.8.0, tosca nodes required to be validated.
Unfortunately, the scaling and monitoring policies are not fully
leveraged tosca-parser. This patch will fix this issue.

2. Multiple alarm actions support

Closes-bug: #1682098

Change-Id: I29cb35edfe2447628fa93c64583e5cb4f7bee2f8
This commit is contained in:
doantungbk 2017-05-04 10:58:06 -07:00
parent 9677abff15
commit af7d3d7cb1
35 changed files with 526 additions and 414 deletions

View File

@ -248,9 +248,9 @@ function configure_tacker {
# Experimental settings for monitor alarm auth settings,
# Will be changed according to new implementation.
iniset $TACKER_CONF alarm_auth username tacker
iniset $TACKER_CONF alarm_auth password "$SERVICE_PASSWORD"
iniset $TACKER_CONF alarm_auth project_name "$SERVICE_PROJECT_NAME"
iniset $TACKER_CONF alarm_auth username admin
iniset $TACKER_CONF alarm_auth password "$ADMIN_PASSWORD"
iniset $TACKER_CONF alarm_auth project_name admin
iniset $TACKER_CONF alarm_auth url http://$SERVICE_HOST:35357/v3
echo "Creating bridge"

View File

@ -0,0 +1,57 @@
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
description: Demo example
metadata:
template_name: sample-tosca-vnfd
topology_template:
node_templates:
VDU1:
type: tosca.nodes.nfv.VDU.Tacker
capabilities:
nfv_compute:
properties:
disk_size: 1 GB
mem_size: 512 MB
num_cpus: 2
properties:
image: cirros-0.3.5-x86_64-disk
mgmt_driver: noop
availability_zone: nova
metadata: {metering.vnf: VDU1}
CP1:
type: tosca.nodes.nfv.CP.Tacker
properties:
management: true
anti_spoofing_protection: false
requirements:
- virtualLink:
node: VL1
- virtualBinding:
node: VDU1
VL1:
type: tosca.nodes.nfv.VL
properties:
network_name: net_mgmt
vendor: Tacker
policies:
- vdu1_cpu_usage_monitoring_policy:
type: tosca.policies.tacker.Alarming
triggers:
vdu_hcpu_usage_respawning:
event_type:
type: tosca.events.resource.utilization
implementation: ceilometer
meter_name: cpu_util
condition:
threshold: 50
constraint: utilization greater_than 50%
period: 600
evaluations: 1
method: average
comparison_operator: gt
metadata: VDU1
action: [respawn, log]

View File

@ -45,13 +45,13 @@ topology_template:
event_type:
type: tosca.events.resource.utilization
implementation: ceilometer
metrics: cpu_util
meter_name: cpu_util
condition:
threshold: 50
constraint: utilization greater_than 50%
period: 600
evaluations: 1
method: avg
method: average
comparison_operator: gt
metadata: VDU1
actions: [respawn]
action: [respawn]

View File

@ -64,13 +64,13 @@ topology_template:
policies:
- SP1:
type: tosca.policies.tacker.Scaling
targets: [VDU1,VDU2]
properties:
increment: 1
cooldown: 120
min_instances: 1
max_instances: 3
default_instances: 2
targets: [VDU1,VDU2]
- vdu_cpu_usage_monitoring_policy:
type: tosca.policies.tacker.Alarming
@ -79,29 +79,28 @@ topology_template:
event_type:
type: tosca.events.resource.utilization
implementation: ceilometer
metrics: cpu_util
meter_name: cpu_util
condition:
threshold: 50
constraint: utilization greater_than 50%
period: 600
evaluations: 1
method: avg
method: average
comparison_operator: gt
metadata: SG1
actions: [SP1]
action: [SP1]
vdu_lcpu_usage_scaling_in:
targets: [VDU1, VDU2]
event_type:
type: tosca.events.resource.utilization
implementation: ceilometer
metrics: cpu_util
meter_name: cpu_util
condition:
threshold: 10
constraint: utilization less_than 10%
period: 600
evaluations: 1
method: avg
method: average
comparison_operator: lt
metadata: SG1
actions: [SP1]
action: [SP1]

View File

@ -56,10 +56,10 @@ topology_template:
policies:
- SP1:
type: tosca.policies.tacker.Scaling
targets: [VDU1, VDU2]
properties:
increment: 1
cooldown: 120
min_instances: 1
max_instances: 3
default_instances: 2
targets: [VDU1, VDU2]

View File

@ -63,7 +63,7 @@ tacker.tacker.alarm_monitor.drivers =
tacker.tacker.policy.actions =
autoscaling = tacker.vnfm.policy_actions.autoscaling.autoscaling:VNFActionAutoscaling
respawn = tacker.vnfm.policy_actions.respawn.respawn:VNFActionRespawn
log_only = tacker.vnfm.policy_actions.log.log:VNFActionLogOnly
log = tacker.vnfm.policy_actions.log.log:VNFActionLog
log_and_kill = tacker.vnfm.policy_actions.log.log:VNFActionLogAndKill
oslo.config.opts =
tacker.common.config = tacker.common.config:config_opts

View File

@ -14,7 +14,7 @@
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from six.moves.urllib import parse as urlparse
from six.moves.urllib import parse
from tacker.vnfm.monitor_drivers.token import Token
from tacker import wsgi
# check alarm url with db --> move to plugin
@ -23,14 +23,12 @@ from tacker import wsgi
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('username', default='tacker',
cfg.StrOpt('username', default='admin',
help=_('User name for alarm monitoring')),
cfg.StrOpt('password', default='nomoresecret',
cfg.StrOpt('password', default='devstack',
help=_('password for alarm monitoring')),
cfg.StrOpt('project_name', default='service',
cfg.StrOpt('project_name', default='admin',
help=_('project name for alarm monitoring')),
cfg.StrOpt('url', default='http://localhost:35357/v3',
help=_('url for alarm monitoring')),
]
cfg.CONF.register_opts(OPTS, 'alarm_auth')
@ -49,10 +47,11 @@ class AlarmReceiver(wsgi.Middleware):
if not self.handle_url(url):
return
prefix, info, params = self.handle_url(req.url)
auth = cfg.CONF.keystone_authtoken
token = Token(username=cfg.CONF.alarm_auth.username,
password=cfg.CONF.alarm_auth.password,
project_name=cfg.CONF.alarm_auth.project_name,
auth_url=cfg.CONF.alarm_auth.url,
auth_url=auth.auth_url + '/v3',
user_domain_name='default',
project_domain_name='default')
@ -79,14 +78,16 @@ class AlarmReceiver(wsgi.Middleware):
def handle_url(self, url):
# alarm_url = 'http://host:port/v1.0/vnfs/vnf-uuid/mon-policy-name/action-name/8ef785' # noqa
parts = urlparse.urlparse(url)
parts = parse.urlparse(url)
p = parts.path.split('/')
if len(p) != 7:
return None
if any((p[0] != '', p[2] != 'vnfs')):
return None
qs = urlparse.parse_qs(parts.query)
# decode action name: respawn%25log
p[5] = parse.unquote(p[5])
qs = parse.parse_qs(parts.query)
params = dict((k, v[0]) for k, v in qs.items())
prefix_url = '/%(collec)s/%(vnf_uuid)s/' % {'collec': p[2],
'vnf_uuid': p[3]}

View File

@ -45,13 +45,13 @@ topology_template:
event_type:
type: tosca.events.resource.utilization
implementation: ceilometer
metrics: cpu_util
meter_name: cpu_util
condition:
threshold: 50
constraint: utilization greater_than 50%
period: 600
evaluations: 1
method: avg
method: average
comparison_operator: gt
metadata: VDU1
actions: [respawn]
action: [respawn]

View File

@ -40,13 +40,13 @@ topology_template:
policies:
- SP1:
type: tosca.policies.tacker.Scaling
targets: [VDU1]
properties:
increment: 1
cooldown: 60
min_instances: 1
max_instances: 3
default_instances: 2
targets: [VDU1]
- vdu_cpu_usage_monitoring_policy:
type: tosca.policies.tacker.Alarming
@ -55,28 +55,28 @@ topology_template:
event_type:
type: tosca.events.resource.utilization
implementation: ceilometer
metrics: cpu_util
meter_name: cpu_util
condition:
threshold: 50
constraint: utilization greater_than 50%
period: 600
evaluations: 1
method: avg
method: average
comparison_operator: gt
metadata: SG1
actions: [SP1]
action: [SP1]
vdu_hcpu_usage_scaling_in:
event_type:
type: tosca.events.resource.utilization
implementation: ceilometer
metrics: cpu_util
meter_name: cpu_util
condition:
threshold: 10
constraint: utilization less_than 10%
period: 600
evaluations: 1
method: avg
method: average
comparison_operator: lt
metadata: SG1
actions: [SP1]
action: [SP1]

View File

@ -41,10 +41,10 @@ topology_template:
policies:
- SP1:
type: tosca.policies.tacker.Scaling
targets: [VDU1]
properties:
increment: 1
cooldown: 60
min_instances: 1
max_instances: 3
default_instances: 2
targets: [VDU1]

View File

@ -75,9 +75,9 @@ class VnfTestAlarmMonitor(base.BaseTackerTest):
if policy['type'] == constants.POLICY_ALARMING:
triggers = policy['triggers']
for trigger_name, trigger_dict in triggers.items():
policy_action_list = trigger_dict['actions']
for policy_action in policy_action_list:
mon_policy[trigger_name] = policy_action
policy_action_list = trigger_dict['action']
for policy_action_name in policy_action_list:
mon_policy[trigger_name] = policy_action_name
return mon_policy
def verify_policy(policy_dict, kw_policy):
@ -139,13 +139,12 @@ class VnfTestAlarmMonitor(base.BaseTackerTest):
self.addCleanup(self.wait_until_vnf_delete, vnf_id,
constants.VNF_CIRROS_DELETE_TIMEOUT)
@unittest.skip("Related Bug 1682098")
def test_vnf_alarm_respawn(self):
self._test_vnf_tosca_alarm(
'sample-tosca-alarm-respawn.yaml',
'alarm and respawn vnf')
@unittest.skip("Related Bug 1682098")
@unittest.skip("Skip and wait for releasing Heat Translator")
def test_vnf_alarm_scale(self):
self._test_vnf_tosca_alarm(
'sample-tosca-alarm-scale.yaml',

View File

@ -27,7 +27,7 @@ CONF = cfg.CONF
class VnfTestToscaScale(base.BaseTackerTest):
@unittest.skip("Related Bug 1682098")
@unittest.skip("Skip and wait for releasing Heat Translator")
def test_vnf_tosca_scale(self):
data = dict()
data['tosca'] = read_file('sample-tosca-scale-all.yaml')
@ -71,7 +71,7 @@ class VnfTestToscaScale(base.BaseTackerTest):
self.assertIn('VDU1', resources_list)
self.assertIn('CP1', resources_list)
self.assertIn('G1', resources_list)
self.assertIn('SP1_group', resources_list)
def _scale(type, count):
body = {"scale": {'type': type, 'policy': 'SP1'}}

View File

@ -50,6 +50,8 @@ vnfd_alarm_respawn_tosca_template = _get_template(
'test_tosca_vnfd_alarm_respawn.yaml')
vnfd_alarm_scale_tosca_template = _get_template(
'test_tosca_vnfd_alarm_scale.yaml')
vnfd_alarm_multi_actions_tosca_template = _get_template(
'test_tosca_vnfd_alarm_multi_actions.yaml')
nsd_tosca_template = yaml.safe_load(_get_template('tosca_nsd_template.yaml'))
vnffgd_wrong_cp_number_template = yaml.safe_load(_get_template(
'tosca_vnffgd_wrong_cp_number_template.yaml'))

View File

@ -13,7 +13,6 @@
# under the License.
import os
import unittest
import testtools
from toscaparser import tosca_template
@ -80,15 +79,12 @@ class TestSamples(testtools.TestCase):
hot,
"Heat-translator failed to translate %s" % f)
@unittest.skip("Related Bug 1682098")
def test_scale_sample(self, tosca_file=['tosca-vnfd-scale.yaml']):
self._test_samples(tosca_file)
@unittest.skip("Related Bug 1682098")
def test_alarm_sample(self, tosca_file=['tosca-vnfd-alarm-scale.yaml']):
self._test_samples(tosca_file)
@unittest.skip("Related Bug 1682098")
def test_list_samples(self,
files=['tosca-vnfd-scale.yaml',
'tosca-vnfd-alarm-scale.yaml']):

View File

@ -1,24 +1,26 @@
heat_template_version: 2013-05-23
description: 'sample-tosca-vnfd-scaling
'
outputs:
mgmt_ip-VDU1:
value:
get_attr: [CP1, fixed_ips, 0, ip_address]
parameters: {}
resources:
CP1:
properties: {network: net_mgmt, port_security_enabled: false}
type: OS::Neutron::Port
VDU1:
type: OS::Nova::Server
properties:
availability_zone: nova
config_drive: false
flavor: m1.tiny
image: cirros-0.3.5-x86_64-disk
metadata: {metering.vnf: SG1}
networks:
- port: {get_resource: CP1}
user_data_format: SOFTWARE_CONFIG
type: OS::Nova::Server
config_drive: false
networks:
- port: { get_resource: CP1 }
image: cirros-0.3.5-x86_64-disk
flavor: m1.tiny
metadata: {metering.vnf: SG1}
VL1:
type: OS::Neutron::Net
CP1:
type: OS::Neutron::Port
properties:
network: net_mgmt
port_security_enabled: false
heat_template_version: 2013-05-23
description: Tacker Scaling template

View File

@ -1,23 +1,25 @@
heat_template_version: 2013-05-23
description: 'sample-tosca-vnfd-scaling
'
outputs:
mgmt_ip-VDU1:
value:
get_attr: [CP1, fixed_ips, 0, ip_address]
parameters: {}
resources:
CP1:
properties: {network: net_mgmt, port_security_enabled: false}
type: OS::Neutron::Port
VDU1:
type: OS::Nova::Server
properties:
availability_zone: nova
config_drive: false
flavor: m1.tiny
image: cirros-0.3.5-x86_64-disk
networks:
- port: {get_resource: CP1}
user_data_format: SOFTWARE_CONFIG
type: OS::Nova::Server
config_drive: false
networks:
- port: { get_resource: CP1 }
image: cirros-0.3.5-x86_64-disk
flavor: m1.tiny
VL1:
type: OS::Neutron::Net
CP1:
type: OS::Neutron::Port
properties:
network: net_mgmt
port_security_enabled: false
heat_template_version: 2013-05-23
description: Tacker Scaling template

View File

@ -1,26 +1,30 @@
heat_template_version: 2013-05-23
description: Tacker scaling template
description: 'sample-tosca-vnfd-scaling
'
parameters: {}
outputs: {}
resources:
G1:
SP1_group:
properties:
cooldown: 60
desired_capacity: 2
max_size: 3
min_size: 1
resource: {type: scaling.yaml}
cooldown: 60
resource: {type: SP1_res.yaml}
type: OS::Heat::AutoScalingGroup
SP1_scale_in:
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: {get_resource: G1}
auto_scaling_group_id: {get_resource: SP1_group}
cooldown: 60
scaling_adjustment: '-1'
scaling_adjustment: -1
type: OS::Heat::ScalingPolicy
SP1_scale_out:
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: {get_resource: G1}
auto_scaling_group_id: {get_resource: SP1_group}
cooldown: 60
scaling_adjustment: 1
type: OS::Heat::ScalingPolicy

View File

@ -35,7 +35,7 @@ resources:
meter_name: cpu_util
threshold: 50
period: 60
statistic: average
statistic: avg
evaluation_periods: 1
comparison_operator: gt
'matching_metadata': {'metadata.user_metadata.vnf': 'VDU1'}

View File

@ -36,7 +36,7 @@ resources:
meter_name: cpu_util
threshold: 50
period: 60
statistic: average
statistic: avg
evaluation_periods: 1
comparison_operator: gt
'matching_metadata': {'metadata.user_metadata.vnf': 'VDU1'}

View File

@ -1,26 +1,30 @@
heat_template_version: 2013-05-23
description: Tacker scaling template
description: 'sample-tosca-vnfd-scaling
'
parameters: {}
outputs: {}
resources:
G1:
SP1_group:
properties:
cooldown: 60
desired_capacity: 2
max_size: 3
min_size: 1
resource: {type: scaling.yaml}
resource: {type: SP1_res.yaml}
type: OS::Heat::AutoScalingGroup
SP1_scale_in:
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: {get_resource: G1}
auto_scaling_group_id: {get_resource: SP1_group}
cooldown: 60
scaling_adjustment: '-1'
scaling_adjustment: -1
type: OS::Heat::ScalingPolicy
SP1_scale_out:
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: {get_resource: G1}
auto_scaling_group_id: {get_resource: SP1_group}
cooldown: 60
scaling_adjustment: 1
type: OS::Heat::ScalingPolicy

View File

@ -0,0 +1,57 @@
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
description: Demo example
metadata:
template_name: sample-tosca-vnfd
topology_template:
node_templates:
VDU1:
type: tosca.nodes.nfv.VDU.Tacker
capabilities:
nfv_compute:
properties:
disk_size: 1 GB
mem_size: 512 MB
num_cpus: 2
properties:
image: cirros-0.3.5-x86_64-disk
mgmt_driver: noop
availability_zone: nova
metadata: {metering.vnf: VDU1}
CP1:
type: tosca.nodes.nfv.CP.Tacker
properties:
management: true
anti_spoofing_protection: false
requirements:
- virtualLink:
node: VL1
- virtualBinding:
node: VDU1
VL1:
type: tosca.nodes.nfv.VL
properties:
network_name: net_mgmt
vendor: Tacker
policies:
- vdu1_cpu_usage_monitoring_policy:
type: tosca.policies.tacker.Alarming
triggers:
mon_policy_multi_actions:
event_type:
type: tosca.events.resource.utilization
implementation: ceilometer
meter_name: cpu_util
condition:
threshold: 50
constraint: utilization greater_than 50%
period: 600
evaluations: 1
method: average
comparison_operator: gt
metadata: VDU1
actions: [respawn, log]

View File

@ -45,13 +45,13 @@ topology_template:
event_type:
type: tosca.events.resource.utilization
implementation: ceilometer
metrics: cpu_util
meter_name: cpu_util
condition:
threshold: 50
constraint: utilization greater_than 50%
period: 600
evaluations: 1
method: avg
method: average
comparison_operator: gt
metadata: VDU1
actions: [respawn]
action: [respawn]

View File

@ -55,13 +55,13 @@ topology_template:
event_type:
type: tosca.events.resource.utilization
implementation: ceilometer
metrics: cpu_util
meter_name: cpu_util
condition:
threshold: 50
constraint: utilization greater_than 50%
period: 600
evaluations: 1
method: avg
method: average
comparison_operator: gt
metadata: SG1
actions: [SP1]
action: [SP1]

View File

@ -48,7 +48,7 @@ topology_template:
event_type:
type: tosca.events.resource.utilization
implementation: Ceilometer
metrics: cpu_util
meter_name: cpu_util
condition:
threshold: 50
constraint: utilization greater_than 50%
@ -57,4 +57,4 @@ topology_template:
method: average
comparison_operator: gt
metadata: VDU1
actions: ''
action: ''

View File

@ -46,7 +46,7 @@ topology_template:
event_type:
type: tosca.events.resource.utilization
implementation: Ceilometer
metrics: cpu_util
meter_name: cpu_util
condition:
threshold: 50
constraint: utilization greater_than 50%
@ -55,4 +55,4 @@ topology_template:
method: average
comparison_operator: gt
metadata: VDU1
actions: ''
action: ''

View File

@ -36,13 +36,13 @@ topology_template:
policies:
- SP1:
type: tosca.policies.tacker.Scaling
targets: [VDU1]
properties:
increment: 1
cooldown: 60
min_instances: 1
max_instances: 3
default_instances: 2
targets: [VDU1]
- vdu_cpu_usage_monitoring_policy:
type: tosca.policies.tacker.Alarming
@ -51,28 +51,28 @@ topology_template:
event_type:
type: tosca.events.resource.utilization
implementation: ceilometer
metrics: cpu_util
meter_name: cpu_util
condition:
threshold: 50
constraint: utilization greater_than 50%
period: 600
evaluations: 1
method: avg
method: average
comparison_operator: gt
metadata: SG1
actions: [SP1]
action: [SP1]
vdu_lcpu_usage_scaling_in:
event_type:
type: tosca.events.resource.utilization
implementation: ceilometer
metrics: cpu_util
meter_name: cpu_util
condition:
threshold: 10
constraint: utilization less_than 10%
period: 600
evaluations: 1
method: avg
method: average
comparison_operator: lt
metadata: SG1
actions: [SP1]
action: [SP1]

View File

@ -35,10 +35,11 @@ topology_template:
policies:
- SP1:
type: tosca.policies.tacker.Scaling
targets: [VDU1]
properties:
increment: 1
cooldown: 60
min_instances: 1
max_instances: 3
default_instances: 2
targets: [VDU1]

View File

@ -17,7 +17,6 @@ import codecs
import json
import mock
import os
import unittest
import yaml
from tacker import context
@ -304,7 +303,7 @@ class TestOpenStack(base.TestCase):
vnf["attributes"][k] = yaml.safe_load(
vnf["attributes"][k])
expected_vnf["attributes"]['scaling_group_names'] = {
'SP1': 'G1'}
'SP1': 'SP1_group'}
vnf["attributes"]['scaling_group_names'] = json.loads(
vnf["attributes"]['scaling_group_names']
)
@ -399,12 +398,11 @@ class TestOpenStack(base.TestCase):
input_params
)
@unittest.skip("Related Bug 1682098")
def test_create_tosca_scale(self):
self._test_assert_equal_for_tosca_templates(
'tosca_scale.yaml',
'hot_scale_main.yaml',
files={'scaling.yaml': 'hot_scale_custom.yaml'},
files={'SP1_res.yaml': 'hot_scale_custom.yaml'},
is_monitor=False
)
@ -434,7 +432,6 @@ class TestOpenStack(base.TestCase):
'hot_tosca_mac_ip.yaml'
)
@unittest.skip("Related Bug 1682098")
def test_create_tosca_alarm_respawn(self):
self._test_assert_equal_for_tosca_templates(
'tosca_alarm_respawn.yaml',
@ -442,16 +439,14 @@ class TestOpenStack(base.TestCase):
is_monitor=False
)
@unittest.skip("Related Bug 1682098")
def test_create_tosca_alarm_scale(self):
self._test_assert_equal_for_tosca_templates(
'tosca_alarm_scale.yaml',
'hot_tosca_alarm_scale.yaml',
files={'scaling.yaml': 'hot_alarm_scale_custom.yaml'},
files={'SP1_res.yaml': 'hot_alarm_scale_custom.yaml'},
is_monitor=False
)
@unittest.skip("Related Bug 1682098")
def test_create_tosca_with_alarm_monitoring_not_matched(self):
self.assertRaises(vnfm.MetadataNotMatched,
self._test_assert_equal_for_tosca_templates,

View File

@ -455,6 +455,14 @@ class TestVNFMPlugin(db_base.SqlTestCase):
self._test_create_vnf_trigger(policy_name="vdu_hcpu_usage_scaling_out",
action_value="SP1-out")
@patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb.get_vnf')
def test_create_vnf_trigger_multi_actions(self, mock_get_vnf):
dummy_vnf = self._get_dummy_active_vnf(
utils.vnfd_alarm_multi_actions_tosca_template)
mock_get_vnf.return_value = dummy_vnf
self._test_create_vnf_trigger(policy_name="mon_policy_multi_actions",
action_value="respawn&log")
@patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb.get_vnf')
def test_get_vnf_policies(self, mock_get_vnf):
vnf_id = "6261579e-d6f3-49ad-8bc3-a9cb974778fe"

View File

@ -29,7 +29,8 @@ from collections import OrderedDict
FAILURE = 'tosca.policies.tacker.Failure'
LOG = logging.getLogger(__name__)
MONITORING = 'tosca.policies.tacker.Monitoring'
MONITORING = 'tosca.policies.Monitoring'
SCALING = 'tosca.policies.Scaling'
PLACEMENT = 'tosca.policies.tacker.Placement'
TACKERCP = 'tosca.nodes.nfv.CP.Tacker'
TACKERVDU = 'tosca.nodes.nfv.VDU.Tacker'
@ -82,6 +83,9 @@ HEAT_RESOURCE_MAP = {
"image": "OS::Glance::Image"
}
SCALE_GROUP_RESOURCE = "OS::Heat::AutoScalingGroup"
SCALE_POLICY_RESOURCE = "OS::Heat::ScalingPolicy"
@log.log
def updateimports(template):
@ -167,6 +171,54 @@ def get_vdu_metadata(template):
return metadata
@log.log
def pre_process_alarm_resources(vnf, template, vdu_metadata):
alarm_resources = dict()
matching_metadata = dict()
alarm_actions = dict()
for policy in template.policies:
if (policy.type_definition.is_derived_from(MONITORING)):
matching_metadata =\
_process_matching_metadata(vdu_metadata, policy)
alarm_actions = _process_alarm_actions(vnf, policy)
alarm_resources['matching_metadata'] = matching_metadata
alarm_resources['alarm_actions'] = alarm_actions
return alarm_resources
def _process_matching_metadata(metadata, policy):
matching_mtdata = dict()
triggers = policy.entity_tpl['triggers']
for trigger_name, trigger_dict in triggers.items():
if not (trigger_dict.get('metadata') and metadata):
raise vnfm.MetadataNotMatched()
is_matched = False
for vdu_name, metadata_dict in metadata['vdus'].items():
if trigger_dict['metadata'] ==\
metadata_dict['metering.vnf']:
is_matched = True
if not is_matched:
raise vnfm.MetadataNotMatched()
matching_mtdata[trigger_name] = dict()
matching_mtdata[trigger_name]['metadata.user_metadata.vnf'] =\
trigger_dict['metadata']
return matching_mtdata
def _process_alarm_actions(vnf, policy):
# process alarm url here
triggers = policy.entity_tpl['triggers']
alarm_actions = dict()
for trigger_name, trigger_dict in triggers.items():
alarm_url = vnf['attributes'].get(trigger_name)
if alarm_url:
alarm_url = str(alarm_url)
LOG.debug('Alarm url in heat %s', alarm_url)
alarm_actions[trigger_name] = dict()
alarm_actions[trigger_name]['alarm_actions'] = [alarm_url]
return alarm_actions
@log.log
def get_mgmt_ports(tosca):
mgmt_ports = {}
@ -199,9 +251,10 @@ def add_resources_tpl(heat_dict, hot_res_tpl):
for prop, val in (vdu_dict).items():
heat_dict["resources"][res_name]["properties"][prop] = val
heat_dict["resources"][vdu]["properties"][res] = {
"get_resource": res_name
}
if heat_dict["resources"].get(vdu):
heat_dict["resources"][vdu]["properties"][res] = {
"get_resource": res_name
}
@log.log
@ -254,7 +307,8 @@ def represent_odict(dump, tag, mapping, flow_style=None):
@log.log
def post_process_heat_template(heat_tpl, mgmt_ports, metadata,
res_tpl, unsupported_res_prop=None):
alarm_resources, res_tpl,
unsupported_res_prop=None):
#
# TODO(bobh) - remove when heat-translator can support literal strings.
#
@ -278,8 +332,24 @@ def post_process_heat_template(heat_tpl, mgmt_ports, metadata,
LOG.debug('Added output for %s', outputname)
if metadata:
for vdu_name, metadata_dict in metadata['vdus'].items():
heat_dict['resources'][vdu_name]['properties']['metadata'] =\
metadata_dict
if heat_dict['resources'].get(vdu_name):
heat_dict['resources'][vdu_name]['properties']['metadata'] =\
metadata_dict
matching_metadata = alarm_resources.get('matching_metadata')
alarm_actions = alarm_resources.get('alarm_actions')
if matching_metadata:
for trigger_name, matching_metadata_dict in matching_metadata.items():
if heat_dict['resources'].get(trigger_name):
matching_mtdata = dict()
matching_mtdata['matching_metadata'] =\
matching_metadata[trigger_name]
heat_dict['resources'][trigger_name]['properties'].\
update(matching_mtdata)
if alarm_actions:
for trigger_name, alarm_actions_dict in alarm_actions.items():
if heat_dict['resources'].get(trigger_name):
heat_dict['resources'][trigger_name]['properties']. \
update(alarm_actions_dict)
add_resources_tpl(heat_dict, res_tpl)
for res in heat_dict["resources"].values():
@ -464,3 +534,72 @@ def get_resources_dict(template, flavor_extra_input=None):
else:
res_dict[res] = res_method(template)
return res_dict
@log.log
def get_scaling_policy(template):
scaling_policy_names = list()
for policy in template.policies:
if (policy.type_definition.is_derived_from(SCALING)):
scaling_policy_names.append(policy.name)
return scaling_policy_names
@log.log
def get_scaling_group_dict(ht_template, scaling_policy_names):
scaling_group_dict = dict()
scaling_group_names = list()
heat_dict = yamlparser.simple_ordered_parse(ht_template)
for resource_name, resource_dict in heat_dict['resources'].items():
if resource_dict['type'] == SCALE_GROUP_RESOURCE:
scaling_group_names.append(resource_name)
if scaling_group_names:
scaling_group_dict[scaling_policy_names[0]] = scaling_group_names[0]
return scaling_group_dict
def get_nested_resources_name(template):
for policy in template.policies:
if (policy.type_definition.is_derived_from(SCALING)):
nested_resource_name = policy.name + '_res.yaml'
return nested_resource_name
def update_nested_scaling_resources(nested_resources, mgmt_ports, metadata,
res_tpl, unsupported_res_prop=None):
nested_tpl = dict()
if nested_resources:
nested_resource_name, nested_resources_yaml =\
list(nested_resources.items())[0]
nested_resources_dict =\
yamlparser.simple_ordered_parse(nested_resources_yaml)
if metadata:
for vdu_name, metadata_dict in metadata['vdus'].items():
nested_resources_dict['resources'][vdu_name]['properties']['metadata'] = \
metadata_dict
add_resources_tpl(nested_resources_dict, res_tpl)
for res in nested_resources_dict["resources"].values():
if not res['type'] == HEAT_SOFTWARE_CONFIG:
continue
config = res["properties"]["config"]
if 'get_file' in config:
res["properties"]["config"] = open(config["get_file"]).read()
if unsupported_res_prop:
convert_unsupported_res_prop(nested_resources_dict,
unsupported_res_prop)
for outputname, portname in mgmt_ports.items():
ipval = {'get_attr': [portname, 'fixed_ips', 0, 'ip_address']}
output = {outputname: {'value': ipval}}
if 'outputs' in nested_resources_dict:
nested_resources_dict['outputs'].update(output)
else:
nested_resources_dict['outputs'] = output
LOG.debug(_('Added output for %s'), outputname)
yaml.SafeDumper.add_representer(
OrderedDict, lambda dumper, value: represent_odict(
dumper, u'tag:yaml.org,2002:map', value))
nested_tpl[nested_resource_name] =\
yaml.safe_dump(nested_resources_dict)
return nested_tpl

View File

@ -303,7 +303,7 @@ class OpenStack(abstract_driver.DeviceAbstractDriver,
@log.log
def scale(self, context, plugin, auth_attr, policy, region_name):
heatclient = hc.HeatClient(auth_attr, region_name)
policy_rsc = get_scaling_policy_name(policy_name=policy['id'],
policy_rsc = get_scaling_policy_name(policy_name=policy['name'],
action=policy['action'])
events = heatclient.resource_event_list(policy['instance_id'],
policy_rsc, limit=1,
@ -325,8 +325,8 @@ class OpenStack(abstract_driver.DeviceAbstractDriver,
try:
time.sleep(self.STACK_RETRY_WAIT)
stack_id = policy['instance_id']
policy_name = get_scaling_policy_name(policy_name=policy['id'],
action=policy['action'])
policy_name = get_scaling_policy_name(
policy_name=policy['name'], action=policy['action'])
events = heatclient.resource_event_list(stack_id, policy_name,
limit=1,
sort_dir='desc',

View File

@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
@ -25,7 +23,6 @@ from tacker.extensions import common_services as cs
from tacker.extensions import vnfm
from tacker.tosca import utils as toscautils
from collections import OrderedDict
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -49,10 +46,6 @@ ALARMING_POLICY = 'tosca.policies.tacker.Alarming'
SCALING_POLICY = 'tosca.policies.tacker.Scaling'
def get_scaling_policy_name(action, policy_name):
return '%s_scale_%s' % (policy_name, action)
class TOSCAToHOT(object):
"""Convert TOSCA template to HOT template."""
@ -64,6 +57,7 @@ class TOSCAToHOT(object):
self.unsupported_props = {}
self.heat_template_yaml = None
self.monitoring_dict = None
self.nested_resources = dict()
self.fields = None
self.STACK_FLAVOR_EXTRA = cfg.CONF.openstack_vim.flavor_extra_specs
@ -77,13 +71,10 @@ class TOSCAToHOT(object):
LOG.debug('vnfd_dict %s', vnfd_dict)
self._get_unsupported_resource_props(self.heatclient)
is_tosca_format = False
self._generate_hot_from_tosca(vnfd_dict, dev_attrs)
is_tosca_format = True
self.fields['template'] = self.heat_template_yaml
if is_tosca_format:
self._handle_policies(vnfd_dict)
if not self.vnf['attributes'].get('heat_template'):
self.vnf['attributes']['heat_template'] = self.fields['template']
if self.monitoring_dict:
self.vnf['attributes']['monitoring_policy'] = jsonutils.dumps(
self.monitoring_dict)
@ -122,37 +113,6 @@ class TOSCAToHOT(object):
self.fields = fields
return dev_attrs
@log.log
def _handle_policies(self, vnfd_dict):
vnf = self.vnf
(is_scaling_needed, scaling_group_names,
main_dict) = self._generate_hot_scaling(
vnfd_dict['topology_template'], 'scaling.yaml')
(is_enabled_alarm, alarm_resource, heat_tpl_yaml) =\
self._generate_hot_alarm_resource(vnfd_dict['topology_template'])
if is_enabled_alarm and not is_scaling_needed:
self.fields['template'] = heat_tpl_yaml
if is_scaling_needed:
if is_enabled_alarm:
main_dict['resources'].update(alarm_resource)
main_yaml = yaml.safe_dump(main_dict)
self.fields['template'] = main_yaml
self.fields['files'] = {'scaling.yaml': self.heat_template_yaml}
vnf['attributes']['heat_template'] = main_yaml
# TODO(kanagaraj-manickam) when multiple groups are
# supported, make this scaling attribute as
# scaling name vs scaling template map and remove
# scaling_group_names
vnf['attributes']['scaling.yaml'] = self.heat_template_yaml
vnf['attributes']['scaling_group_names'] = jsonutils.dumps(
scaling_group_names)
elif not vnf['attributes'].get('heat_template'):
vnf['attributes']['heat_template'] = self.fields['template']
self.vnf = vnf
@log.log
def _update_params(self, original, paramvalues, match=False):
for key, value in (original).items():
@ -305,131 +265,55 @@ class TOSCAToHOT(object):
raise vnfm.ToscaParserFailed(error_msg_details=str(e))
metadata = toscautils.get_vdu_metadata(tosca)
alarm_resources =\
toscautils.pre_process_alarm_resources(self.vnf, tosca, metadata)
monitoring_dict = toscautils.get_vdu_monitoring(tosca)
mgmt_ports = toscautils.get_mgmt_ports(tosca)
nested_resource_name = toscautils.get_nested_resources_name(tosca)
res_tpl = toscautils.get_resources_dict(tosca,
self.STACK_FLAVOR_EXTRA)
toscautils.post_process_template(tosca)
scaling_policy_names = toscautils.get_scaling_policy(tosca)
try:
translator = tosca_translator.TOSCATranslator(tosca,
parsed_params)
heat_template_yaml = translator.translate()
if nested_resource_name:
sub_heat_template_yaml =\
translator.translate_to_yaml_files_dict(
nested_resource_name, True)
nested_resource_yaml =\
sub_heat_template_yaml[nested_resource_name]
self.nested_resources[nested_resource_name] =\
nested_resource_yaml
except Exception as e:
LOG.debug("heat-translator error: %s", str(e))
raise vnfm.HeatTranslatorFailed(error_msg_details=str(e))
if self.nested_resources:
nested_tpl = toscautils.update_nested_scaling_resources(
self.nested_resources, mgmt_ports, metadata,
res_tpl, self.unsupported_props)
self.fields['files'] = nested_tpl
self.vnf['attributes'][nested_resource_name] =\
nested_tpl[nested_resource_name]
mgmt_ports.clear()
if scaling_policy_names:
scaling_group_dict = toscautils.get_scaling_group_dict(
heat_template_yaml, scaling_policy_names)
self.vnf['attributes']['scaling_group_names'] =\
jsonutils.dumps(scaling_group_dict)
heat_template_yaml = toscautils.post_process_heat_template(
heat_template_yaml, mgmt_ports, metadata,
heat_template_yaml, mgmt_ports, metadata, alarm_resources,
res_tpl, self.unsupported_props)
self.heat_template_yaml = heat_template_yaml
self.monitoring_dict = monitoring_dict
self.metadata = metadata
@log.log
def _generate_hot_scaling(self, vnfd_dict,
scale_resource_type="OS::Nova::Server"):
# Initialize the template
template_dict = yaml.safe_load(HEAT_TEMPLATE_BASE)
template_dict['description'] = 'Tacker scaling template'
parameters = {}
template_dict['parameters'] = parameters
# Add scaling related resource defs
resources = {}
scaling_group_names = {}
# policies:
# - SP1:
# type: tosca.policies.tacker.Scaling
if 'policies' in vnfd_dict:
for policy_dict in vnfd_dict['policies']:
name, policy = list(policy_dict.items())[0]
if policy['type'] == SCALING_POLICY:
resources, scaling_group_names =\
self._convert_to_heat_scaling_policy(
policy['properties'], scale_resource_type, name)
# TODO(kanagaraj-manickam) only one policy is supported
# for all vdus. remove this break, once this limitation
# is addressed.
break
template_dict['resources'] = resources
# First return value helps to check if scaling resources exist
return ((len(template_dict['resources']) > 0), scaling_group_names,
template_dict)
@log.log
def _convert_to_heat_scaling_group(self, policy_prp, scale_resource_type,
name):
group_hot = {'type': 'OS::Heat::AutoScalingGroup'}
properties = {}
properties['min_size'] = policy_prp['min_instances']
properties['max_size'] = policy_prp['max_instances']
properties['desired_capacity'] = policy_prp['default_instances']
properties['cooldown'] = policy_prp['cooldown']
properties['resource'] = {}
# TODO(kanagaraj-manickam) all VDU members are considered as 1
# group now and make it to form the groups based on the VDU
# list mentioned in the policy's targets
# scale_resource_type is custome type mapped the HOT template
# generated for all VDUs in the tosca template
properties['resource']['type'] = scale_resource_type
# TODO(kanagraj-manickam) add custom type params here, to
# support parameterized template
group_hot['properties'] = properties
return group_hot
# TODO(kanagaraj-manickam) now only one group is supported, so name
# is hard-coded with G1
@log.log
def _get_scale_group_name(self, targets):
return 'G1'
# tosca policies
#
# properties:
# adjust_by: 1
# cooldown: 120
# targets: [G1]
@log.log
def _convert_to_heat_scaling_policy(self, policy_prp, scale_resource_type,
name):
# Add scaling related resource defs
resources = {}
scaling_group_names = {}
# Form the group
scale_grp = self._get_scale_group_name(policy_prp['targets'])
scaling_group_names[name] = scale_grp
resources[scale_grp] = self._convert_to_heat_scaling_group(
policy_prp, scale_resource_type, scale_grp)
grp_id = {'get_resource': scale_grp}
policy_hot = {'type': 'OS::Heat::ScalingPolicy'}
properties = {}
properties['adjustment_type'] = 'change_in_capacity'
properties['cooldown'] = policy_prp['cooldown']
properties['scaling_adjustment'] = policy_prp['increment']
properties['auto_scaling_group_id'] = grp_id
policy_hot['properties'] = properties
# Add scale_out policy
policy_rsc_name = get_scaling_policy_name(action='out',
policy_name=name)
resources[policy_rsc_name] = policy_hot
# Add scale_in policy
in_value = '-%d' % int(policy_prp['increment'])
policy_hot_in = copy.deepcopy(policy_hot)
policy_hot_in['properties']['scaling_adjustment'] = in_value
policy_rsc_name = get_scaling_policy_name(action='in',
policy_name=name)
resources[policy_rsc_name] = policy_hot_in
return resources, scaling_group_names
@log.log
def represent_odict(self, dump, tag, mapping, flow_style=None):
value = []
@ -455,74 +339,3 @@ class TOSCAToHOT(object):
else:
node.flow_style = best_style
return node
@log.log
def _generate_hot_alarm_resource(self, topology_tpl_dict):
alarm_resource = dict()
heat_tpl = self.heat_template_yaml
heat_dict = yamlparser.simple_ordered_parse(heat_tpl)
is_enabled_alarm = False
if 'policies' in topology_tpl_dict:
for policy_dict in topology_tpl_dict['policies']:
name, policy_tpl_dict = list(policy_dict.items())[0]
# need to parse triggers here: scaling in/out, respawn,...
if policy_tpl_dict['type'] == \
'tosca.policies.tacker.Alarming':
is_enabled_alarm = True
triggers = policy_tpl_dict['triggers']
for trigger_name, trigger_dict in triggers.items():
alarm_resource[trigger_name] =\
self._convert_to_heat_monitoring_resource({
trigger_name: trigger_dict}, self.vnf)
heat_dict['resources'].update(alarm_resource)
yaml.SafeDumper.add_representer(OrderedDict,
lambda dumper, value: self.represent_odict(dumper,
u'tag:yaml.org,2002:map', value))
heat_tpl_yaml = yaml.safe_dump(heat_dict)
return (is_enabled_alarm,
alarm_resource,
heat_tpl_yaml
)
def _convert_to_heat_monitoring_resource(self, mon_policy, vnf):
mon_policy_hot = {'type': 'OS::Aodh::Alarm'}
mon_policy_hot['properties'] = \
self._convert_to_heat_monitoring_prop(mon_policy, vnf)
return mon_policy_hot
def _convert_to_heat_monitoring_prop(self, mon_policy, vnf):
metadata = self.metadata
trigger_name, trigger_dict = list(mon_policy.items())[0]
tpl_condition = trigger_dict['condition']
properties = dict()
if not (trigger_dict.get('metadata') and metadata):
raise vnfm.MetadataNotMatched()
matching_metadata_dict = dict()
properties['meter_name'] = trigger_dict['metrics']
is_matched = False
for vdu_name, metadata_dict in metadata['vdus'].items():
if trigger_dict['metadata'] ==\
metadata_dict['metering.vnf']:
is_matched = True
if not is_matched:
raise vnfm.MetadataNotMatched()
matching_metadata_dict['metadata.user_metadata.vnf'] =\
trigger_dict['metadata']
properties['matching_metadata'] = \
matching_metadata_dict
properties['comparison_operator'] = \
tpl_condition['comparison_operator']
properties['period'] = tpl_condition['period']
properties['evaluation_periods'] = tpl_condition['evaluations']
properties['statistic'] = tpl_condition['method']
properties['description'] = tpl_condition['constraint']
properties['threshold'] = tpl_condition['threshold']
# alarm url process here
alarm_url = vnf['attributes'].get(trigger_name)
if alarm_url:
alarm_url = str(alarm_url)
LOG.debug('Alarm url in heat %s', alarm_url)
properties['alarm_actions'] = [alarm_url]
return properties

View File

@ -220,8 +220,9 @@ class VNFAlarmMonitor(object):
params['vnf_id'] = vnf['id']
params['mon_policy_name'] = trigger_name
driver = trigger_dict['event_type']['implementation']
policy_action_list = trigger_dict.get('actions')
if len(policy_action_list) == 0:
# TODO(Tung Doan) trigger_dict.get('actions') needs to be used
policy_action = trigger_dict.get('action')
if len(policy_action) == 0:
_log_monitor_events(t_context.get_admin_context(),
vnf,
"Alarm not set: policy action missing")
@ -234,8 +235,9 @@ class VNFAlarmMonitor(object):
'policy_name': bk_policy_name,
'action_name': bk_action_name}
return policy
for policy_action in policy_action_list:
filters = {'name': policy_action}
for index, policy_action_name in enumerate(policy_action):
filters = {'name': policy_action_name}
bkend_policies =\
plugin.get_vnf_policies(context, vnf['id'], filters)
if bkend_policies:
@ -244,16 +246,19 @@ class VNFAlarmMonitor(object):
cp = trigger_dict['condition'].\
get('comparison_operator')
scaling_type = 'out' if cp == 'gt' else 'in'
policy_action = _refactor_backend_policy(policy_action,
scaling_type)
policy_action[index] = _refactor_backend_policy(
policy_action_name, scaling_type)
params['mon_policy_action'] = policy_action
alarm_url[trigger_name] =\
self.call_alarm_url(driver, vnf, params)
details = "Alarm URL set successfully: %s" % alarm_url
_log_monitor_events(t_context.get_admin_context(),
vnf,
details)
# Support multiple action. Ex: respawn % notify
action_name = '%'.join(policy_action)
params['mon_policy_action'] = action_name
alarm_url[trigger_name] =\
self.call_alarm_url(driver, vnf, params)
details = "Alarm URL set successfully: %s" % alarm_url
_log_monitor_events(t_context.get_admin_context(),
vnf,
details)
return alarm_url
def process_alarm_for_vnf(self, vnf, trigger):

View File

@ -22,6 +22,7 @@ import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import uuidutils
from toscaparser.tosca_template import ToscaTemplate
from tacker.api.v1 import attributes
@ -122,7 +123,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
OPTS_POLICY_ACTION = [
cfg.ListOpt(
'policy_action', default=['autoscaling', 'respawn',
'log_only', 'log_and_kill'],
'log', 'log_and_kill'],
help=_('Hosting vnf drivers tacker plugin will use')),
]
cfg.CONF.register_opts(OPTS_POLICY_ACTION, 'tacker')
@ -542,7 +543,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
raise exceptions.VnfPolicyTypeInvalid(
type=type,
valid_types=constants.POLICY_ACTIONS.keys(),
policy=policy['id']
policy=policy['name']
)
action = policy['action']
@ -550,10 +551,10 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
raise exceptions.VnfPolicyActionInvalid(
action=action,
valid_actions=constants.POLICY_ACTIONS[type],
policy=policy['id']
policy=policy['name']
)
LOG.debug("Policy %s is validated successfully", policy['id'])
LOG.debug("Policy %s is validated successfully", policy['name'])
def _get_status():
if policy['action'] == constants.ACTION_SCALE_IN:
@ -571,7 +572,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
[constants.ACTIVE],
status)
LOG.debug("Policy %(policy)s vnf is at %(status)s",
{'policy': policy['id'],
{'policy': policy['name'],
'status': status})
return result
@ -584,7 +585,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
new_status,
mgmt_url)
LOG.debug("Policy %(policy)s vnf is at %(status)s",
{'policy': policy['id'],
{'policy': policy['name'],
'status': new_status})
return result
@ -601,7 +602,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
region_name=region_name
)
LOG.debug("Policy %s action is started successfully",
policy['id'])
policy['name'])
return last_event_id
except Exception as e:
LOG.error("Policy %s action is failed to start",
@ -618,7 +619,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
def _vnf_policy_action_wait():
try:
LOG.debug("Policy %s action is in progress",
policy['id'])
policy['name'])
mgmt_url = self._vnf_manager.invoke(
infra_driver,
'scale_wait',
@ -630,12 +631,12 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
last_event_id=last_event_id
)
LOG.debug("Policy %s action is completed successfully",
policy['id'])
policy['name'])
_handle_vnf_scaling_post(constants.ACTIVE, mgmt_url)
# TODO(kanagaraj-manickam): Add support for config and mgmt
except Exception as e:
LOG.error("Policy %s action is failed to complete",
policy['id'])
policy['name'])
with excutils.save_and_reraise_exception():
self.set_vnf_error_status_reason(
context,
@ -665,7 +666,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
p['properties'] = policy.get('properties') or policy.get('triggers')
p['vnf'] = vnf
p['name'] = name
p['id'] = p['name']
p['id'] = uuidutils.generate_uuid()
return p
def get_vnf_policies(
@ -704,14 +705,16 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
filters={'name': policy_id})
if policies:
return policies[0]
raise exceptions.VnfPolicyNotFound(policy=policy_id,
vnf_id=vnf_id)
else:
return None
def create_vnf_scale(self, context, vnf_id, scale):
policy_ = self.get_vnf_policy(context,
scale['scale']['policy'],
vnf_id)
if not policy_:
raise exceptions.VnfPolicyNotFound(policy=scale['scale']['policy'],
vnf_id=vnf_id)
policy_.update({'action': scale['scale']['type']})
self._handle_vnf_scaling(context, policy_)
@ -732,26 +735,39 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
if not self._vnf_alarm_monitor.process_alarm_for_vnf(vnf_id, trigger):
raise exceptions.AlarmUrlInvalid(vnf_id=vnf_id)
policy_ = None
action_ = None
# validate policy action. if action is composite, split it.
# ex: SP1-in, SP1-out
# ex: respawn%notify
action = trigger['action_name']
sp_action = action.split('-')
if len(sp_action) == 2:
bk_policy_name = sp_action[0]
bk_policy_action = sp_action[1]
policies_ = self.get_vnf_policies(context, vnf_id,
filters={'name': bk_policy_name})
if policies_:
policy_ = policies_[0]
action_ = bk_policy_action
action_list = action.split('%')
pl_action_dict = dict()
pl_action_dict['policy_actions'] = dict()
pl_action_dict['policy_actions']['def_actions'] = list()
pl_action_dict['policy_actions']['custom_actions'] = dict()
for action in action_list:
# validate policy action. if action is composite, split it.
# ex: SP1-in, SP1-out
action_ = None
if action in constants.DEFAULT_ALARM_ACTIONS:
pl_action_dict['policy_actions']['def_actions'].append(action)
policy_ = self.get_vnf_policy(context, action, vnf_id)
if not policy_:
sp_action = action.split('-')
if len(sp_action) == 2:
bk_policy_name = sp_action[0]
bk_policy_action = sp_action[1]
policies_ = self.get_vnf_policies(
context, vnf_id, filters={'name': bk_policy_name})
if policies_:
policy_ = policies_[0]
action_ = bk_policy_action
if policy_:
pl_action_dict['policy_actions']['custom_actions'].update(
{policy_['id']: {'bckend_policy': policy_,
'bckend_action': action_}})
if not policy_:
if action not in constants.DEFAULT_ALARM_ACTIONS:
policy_ = self.get_vnf_policy(context, action, vnf_id)
LOG.debug("Trigger %s is validated successfully", trigger)
return policy_, action_
LOG.debug("Trigger %s is validated successfully", trigger)
return pl_action_dict
# validate url
def _get_vnf_triggers(self, context, vnf_id, filters=None, fields=None):
@ -786,36 +802,48 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
action, 'execute_action', plugin=self, context=context,
vnf_dict=vnf_dict, args={})
if trigger.get('bckend_policy'):
bckend_policy = trigger['bckend_policy']
bckend_policy_type = bckend_policy['type']
if bckend_policy_type == constants.POLICY_SCALING:
if vnf_dict['status'] != constants.ACTIVE:
LOG.info("Scaling Policy action skipped due to status:"
" %(status)s for vnf: %(vnfid)s",
{"status": vnf_dict['status'],
"vnfid": vnf_dict['id']})
return
action = 'autoscaling'
scale = {}
scale.setdefault('scale', {})
scale['scale']['type'] = trigger['bckend_action']
scale['scale']['policy'] = bckend_policy['name']
self._vnf_action.invoke(
action, 'execute_action', plugin=self, context=context,
vnf_dict=vnf_dict, args=scale)
# Multiple actions support
if trigger.get('policy_actions'):
policy_actions = trigger['policy_actions']
if policy_actions.get('def_actions'):
for action in policy_actions['def_actions']:
self._vnf_action.invoke(
action, 'execute_action', plugin=self, context=context,
vnf_dict=vnf_dict, args={})
if policy_actions.get('custom_actions'):
custom_actions = policy_actions['custom_actions']
for pl_action, pl_action_dict in custom_actions.items():
bckend_policy = pl_action_dict['bckend_policy']
bckend_action = pl_action_dict['bckend_action']
bckend_policy_type = bckend_policy['type']
if bckend_policy_type == constants.POLICY_SCALING:
if vnf_dict['status'] != constants.ACTIVE:
LOG.info(_("Scaling Policy action"
"skipped due to status:"
"%(status)s for vnf: %(vnfid)s"),
{"status": vnf_dict['status'],
"vnfid": vnf_dict['id']})
return
action = 'autoscaling'
scale = {}
scale.setdefault('scale', {})
scale['scale']['type'] = bckend_action
scale['scale']['policy'] = bckend_policy['name']
self._vnf_action.invoke(
action, 'execute_action', plugin=self,
context=context, vnf_dict=vnf_dict, args=scale)
def create_vnf_trigger(
self, context, vnf_id, trigger):
trigger_ = self.get_vnf_trigger(
context, vnf_id, trigger['trigger']['policy_name'])
# action_name before analyzing
trigger_.update({'action_name': trigger['trigger']['action_name']})
trigger_.update({'params': trigger['trigger']['params']})
bk_policy, bk_action = self._validate_alarming_policy(
policy_actions = self._validate_alarming_policy(
context, vnf_id, trigger_)
if bk_policy:
trigger_.update({'bckend_policy': bk_policy,
'bckend_action': bk_action})
if policy_actions:
trigger_.update(policy_actions)
self._handle_vnf_monitoring(context, trigger_)
return trigger['trigger']

View File

@ -32,12 +32,12 @@ def _log_monitor_events(context, vnf_dict, evt_details):
details=evt_details)
class VNFActionLogOnly(abstract_action.AbstractPolicyAction):
class VNFActionLog(abstract_action.AbstractPolicyAction):
def get_type(self):
return 'log_only'
return 'log'
def get_name(self):
return 'log_only'
return 'log'
def get_description(self):
return 'Tacker VNF logging policy'