VNF scaling: Infra (heat) driver update
implements blueprint: #vnf-scaling All the VDUs are considered as one scale group irrespective of list of VDUs specified in the scaling policy targets. This will be addressed in follow-up patches. Change-Id: I21bc0519860bc46f08363dd07333f29316e50bbd
This commit is contained in:
parent
02b52400ea
commit
89cb3c4fbc
@ -0,0 +1,23 @@
|
|||||||
|
heat_template_version: 2013-05-23
|
||||||
|
description: 'sample-tosca-vnfd-scaling
|
||||||
|
|
||||||
|
'
|
||||||
|
outputs:
|
||||||
|
mgmt_ip-VDU1:
|
||||||
|
value:
|
||||||
|
get_attr: [CP1, fixed_ips, 0, ip_address]
|
||||||
|
parameters: {}
|
||||||
|
resources:
|
||||||
|
CP1:
|
||||||
|
properties: {network: private, port_security_enabled: false}
|
||||||
|
type: OS::Neutron::Port
|
||||||
|
VDU1:
|
||||||
|
properties:
|
||||||
|
availability_zone: nova
|
||||||
|
config_drive: false
|
||||||
|
flavor: m1.tiny
|
||||||
|
image: cirros-0.3.4-x86_64-uec
|
||||||
|
networks:
|
||||||
|
- port: {get_resource: CP1}
|
||||||
|
user_data_format: SOFTWARE_CONFIG
|
||||||
|
type: OS::Nova::Server
|
@ -0,0 +1,26 @@
|
|||||||
|
heat_template_version: 2013-05-23
|
||||||
|
description: Tacker scaling template
|
||||||
|
parameters: {}
|
||||||
|
resources:
|
||||||
|
G1:
|
||||||
|
properties:
|
||||||
|
cooldown: 60
|
||||||
|
desired_capacity: 2
|
||||||
|
max_size: 3
|
||||||
|
min_size: 1
|
||||||
|
resource: {type: scaling.yaml}
|
||||||
|
type: OS::Heat::AutoScalingGroup
|
||||||
|
SP1_scale_in:
|
||||||
|
properties:
|
||||||
|
adjustment_type: change_in_capacity
|
||||||
|
auto_scaling_group_id: {get_resource: G1}
|
||||||
|
cooldown: 60
|
||||||
|
scaling_adjustment: '-1'
|
||||||
|
type: OS::Heat::ScalingPolicy
|
||||||
|
SP1_scale_out:
|
||||||
|
properties:
|
||||||
|
adjustment_type: change_in_capacity
|
||||||
|
auto_scaling_group_id: {get_resource: G1}
|
||||||
|
cooldown: 60
|
||||||
|
scaling_adjustment: 1
|
||||||
|
type: OS::Heat::ScalingPolicy
|
@ -0,0 +1,44 @@
|
|||||||
|
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
|
||||||
|
|
||||||
|
description: sample-tosca-vnfd-scaling
|
||||||
|
|
||||||
|
metadata:
|
||||||
|
template_name: sample-tosca-vnfd-scaling
|
||||||
|
|
||||||
|
topology_template:
|
||||||
|
node_templates:
|
||||||
|
VDU1:
|
||||||
|
type: tosca.nodes.nfv.VDU.Tacker
|
||||||
|
properties:
|
||||||
|
image: cirros-0.3.4-x86_64-uec
|
||||||
|
mgmt_driver: noop
|
||||||
|
availability_zone: nova
|
||||||
|
flavor: m1.tiny
|
||||||
|
|
||||||
|
CP1:
|
||||||
|
type: tosca.nodes.nfv.CP.Tacker
|
||||||
|
properties:
|
||||||
|
management: true
|
||||||
|
anti_spoofing_protection: false
|
||||||
|
requirements:
|
||||||
|
- virtualLink:
|
||||||
|
node: VL1
|
||||||
|
- virtualBinding:
|
||||||
|
node: VDU1
|
||||||
|
|
||||||
|
VL1:
|
||||||
|
type: tosca.nodes.nfv.VL
|
||||||
|
properties:
|
||||||
|
network_name: private
|
||||||
|
vendor: Tacker
|
||||||
|
|
||||||
|
policies:
|
||||||
|
- SP1:
|
||||||
|
type: tosca.policy.tacker.Scaling
|
||||||
|
properties:
|
||||||
|
increment: 1
|
||||||
|
cooldown: 60
|
||||||
|
min_instances: 1
|
||||||
|
max_instances: 3
|
||||||
|
default_instances: 2
|
||||||
|
targets: [VDU1]
|
@ -14,6 +14,7 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import codecs
|
import codecs
|
||||||
|
import json
|
||||||
import mock
|
import mock
|
||||||
import os
|
import os
|
||||||
import yaml
|
import yaml
|
||||||
@ -214,24 +215,21 @@ class TestDeviceHeat(base.TestCase):
|
|||||||
'-5ff7-4332-b032-50a14d6c1123',
|
'-5ff7-4332-b032-50a14d6c1123',
|
||||||
'template': _get_template(template)}
|
'template': _get_template(template)}
|
||||||
|
|
||||||
def _get_expected_tosca_device(self, tosca_tpl_name, hot_tpl_name,
|
def _get_expected_tosca_device(self,
|
||||||
param_values=''):
|
tosca_tpl_name,
|
||||||
|
hot_tpl_name,
|
||||||
|
param_values='',
|
||||||
|
is_monitor=True):
|
||||||
tosca_tpl = _get_template(tosca_tpl_name)
|
tosca_tpl = _get_template(tosca_tpl_name)
|
||||||
exp_tmpl = self._get_expected_device_template(tosca_tpl)
|
exp_tmpl = self._get_expected_device_template(tosca_tpl)
|
||||||
tosca_hw_dict = yaml.safe_load(_get_template(hot_tpl_name))
|
tosca_hw_dict = yaml.safe_load(_get_template(hot_tpl_name))
|
||||||
return {
|
dvc = {
|
||||||
'device_template': exp_tmpl['device_template'],
|
'device_template': exp_tmpl['device_template'],
|
||||||
'description': u'OpenWRT with services',
|
'description': u'OpenWRT with services',
|
||||||
'attributes': {
|
'attributes': {
|
||||||
'heat_template': tosca_hw_dict,
|
'heat_template': tosca_hw_dict,
|
||||||
'monitoring_policy': '{"vdus": {"VDU1":'
|
'param_values': param_values
|
||||||
' {"ping": {"name": "ping",'
|
},
|
||||||
' "actions": {"failure": "respawn"},'
|
|
||||||
' "parameters": {"count": 3,'
|
|
||||||
' "interval": 10'
|
|
||||||
'}, "monitoring_params": {"count": 3, '
|
|
||||||
'"interval": 10}}}}}',
|
|
||||||
'param_values': param_values},
|
|
||||||
'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123',
|
'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123',
|
||||||
'instance_id': None,
|
'instance_id': None,
|
||||||
'mgmt_url': None,
|
'mgmt_url': None,
|
||||||
@ -239,9 +237,21 @@ class TestDeviceHeat(base.TestCase):
|
|||||||
'service_context': [],
|
'service_context': [],
|
||||||
'status': 'PENDING_CREATE',
|
'status': 'PENDING_CREATE',
|
||||||
'template_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e',
|
'template_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e',
|
||||||
'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437'}
|
'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437'
|
||||||
|
}
|
||||||
|
# Add montitoring attributes for those yaml, which are having it
|
||||||
|
if is_monitor:
|
||||||
|
dvc['attributes'].update(
|
||||||
|
{'monitoring_policy': '{"vdus": {"VDU1": {"ping": {"name": '
|
||||||
|
'"ping", "actions": {"failure": '
|
||||||
|
'"respawn"}, "parameters": {"count": 3, '
|
||||||
|
'"interval": 10}, "monitoring_params": '
|
||||||
|
'{"count": 3, "interval": 10}}}}}'})
|
||||||
|
|
||||||
|
return dvc
|
||||||
|
|
||||||
def _get_dummy_tosca_device(self, template, input_params=''):
|
def _get_dummy_tosca_device(self, template, input_params=''):
|
||||||
|
|
||||||
tosca_template = _get_template(template)
|
tosca_template = _get_template(template)
|
||||||
device = utils.get_dummy_device_obj()
|
device = utils.get_dummy_device_obj()
|
||||||
dtemplate = self._get_expected_device_template(tosca_template)
|
dtemplate = self._get_expected_device_template(tosca_template)
|
||||||
@ -253,14 +263,19 @@ class TestDeviceHeat(base.TestCase):
|
|||||||
device['attributes']['param_values'] = input_params
|
device['attributes']['param_values'] = input_params
|
||||||
return device
|
return device
|
||||||
|
|
||||||
def _test_assert_equal_for_tosca_templates(self, tosca_tpl_name,
|
def _test_assert_equal_for_tosca_templates(self,
|
||||||
hot_tpl_name, input_params=''):
|
tosca_tpl_name,
|
||||||
|
hot_tpl_name,
|
||||||
|
input_params='',
|
||||||
|
files=None,
|
||||||
|
is_monitor=True):
|
||||||
device = self._get_dummy_tosca_device(tosca_tpl_name, input_params)
|
device = self._get_dummy_tosca_device(tosca_tpl_name, input_params)
|
||||||
expected_result = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
|
expected_result = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
|
||||||
expected_fields = self._get_expected_fields_tosca(hot_tpl_name)
|
expected_fields = self._get_expected_fields_tosca(hot_tpl_name)
|
||||||
expected_device = self._get_expected_tosca_device(tosca_tpl_name,
|
expected_device = self._get_expected_tosca_device(tosca_tpl_name,
|
||||||
hot_tpl_name,
|
hot_tpl_name,
|
||||||
input_params)
|
input_params,
|
||||||
|
is_monitor)
|
||||||
result = self.heat_driver.create(plugin=None, context=self.context,
|
result = self.heat_driver.create(plugin=None, context=self.context,
|
||||||
device=device,
|
device=device,
|
||||||
auth_attr=utils.get_vim_auth_obj())
|
auth_attr=utils.get_vim_auth_obj())
|
||||||
@ -268,11 +283,33 @@ class TestDeviceHeat(base.TestCase):
|
|||||||
actual_fields["template"] = yaml.safe_load(actual_fields["template"])
|
actual_fields["template"] = yaml.safe_load(actual_fields["template"])
|
||||||
expected_fields["template"] = \
|
expected_fields["template"] = \
|
||||||
yaml.safe_load(expected_fields["template"])
|
yaml.safe_load(expected_fields["template"])
|
||||||
|
|
||||||
|
if files:
|
||||||
|
for k, v in actual_fields["files"].items():
|
||||||
|
actual_fields["files"][k] = yaml.safe_load(v)
|
||||||
|
|
||||||
|
expected_fields["files"] = {}
|
||||||
|
for k, v in files.items():
|
||||||
|
expected_fields["files"][k] = yaml.safe_load(_get_template(v))
|
||||||
|
|
||||||
self.assertEqual(expected_fields, actual_fields)
|
self.assertEqual(expected_fields, actual_fields)
|
||||||
device["attributes"]["heat_template"] = yaml.safe_load(
|
device["attributes"]["heat_template"] = yaml.safe_load(
|
||||||
device["attributes"]["heat_template"])
|
device["attributes"]["heat_template"])
|
||||||
self.heat_client.create.assert_called_once_with(expected_fields)
|
self.heat_client.create.assert_called_once_with(expected_fields)
|
||||||
self.assertEqual(expected_result, result)
|
self.assertEqual(expected_result, result)
|
||||||
|
|
||||||
|
if files:
|
||||||
|
expected_fields["files"] = {}
|
||||||
|
for k, v in files.items():
|
||||||
|
expected_device["attributes"][k] = yaml.safe_load(
|
||||||
|
_get_template(v))
|
||||||
|
device["attributes"][k] = yaml.safe_load(
|
||||||
|
device["attributes"][k])
|
||||||
|
expected_device["attributes"]['scaling_group_names'] = {
|
||||||
|
'SP1': 'G1'}
|
||||||
|
device["attributes"]['scaling_group_names'] = json.loads(
|
||||||
|
device["attributes"]['scaling_group_names']
|
||||||
|
)
|
||||||
self.assertEqual(expected_device, device)
|
self.assertEqual(expected_device, device)
|
||||||
|
|
||||||
def test_create_tosca(self):
|
def test_create_tosca(self):
|
||||||
@ -363,3 +400,11 @@ class TestDeviceHeat(base.TestCase):
|
|||||||
'hot_tosca_generic_vnfd_params.yaml',
|
'hot_tosca_generic_vnfd_params.yaml',
|
||||||
input_params
|
input_params
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_create_tosca_scale(self):
|
||||||
|
self._test_assert_equal_for_tosca_templates(
|
||||||
|
'tosca_scale.yaml',
|
||||||
|
'hot_scale_main.yaml',
|
||||||
|
files={'scaling.yaml': 'hot_scale_custom.yaml'},
|
||||||
|
is_monitor=False
|
||||||
|
)
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
import copy
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
||||||
@ -30,11 +31,13 @@ from tacker.common import clients
|
|||||||
from tacker.common import log
|
from tacker.common import log
|
||||||
from tacker.extensions import vnfm
|
from tacker.extensions import vnfm
|
||||||
from tacker.vm.infra_drivers import abstract_driver
|
from tacker.vm.infra_drivers import abstract_driver
|
||||||
|
from tacker.vm.infra_drivers import scale_driver
|
||||||
from tacker.vm.tosca import utils as toscautils
|
from tacker.vm.tosca import utils as toscautils
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
OPTS = [
|
OPTS = [
|
||||||
cfg.IntOpt('stack_retries',
|
cfg.IntOpt('stack_retries',
|
||||||
default=60,
|
default=60,
|
||||||
@ -48,6 +51,7 @@ OPTS = [
|
|||||||
default={},
|
default={},
|
||||||
help=_("Flavor Extra Specs")),
|
help=_("Flavor Extra Specs")),
|
||||||
]
|
]
|
||||||
|
|
||||||
CONF.register_opts(OPTS, group='tacker_heat')
|
CONF.register_opts(OPTS, group='tacker_heat')
|
||||||
|
|
||||||
|
|
||||||
@ -72,9 +76,15 @@ HEAT_TEMPLATE_BASE = """
|
|||||||
heat_template_version: 2013-05-23
|
heat_template_version: 2013-05-23
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
OUTPUT_PREFIX = 'mgmt_ip-'
|
||||||
|
|
||||||
class DeviceHeat(abstract_driver.DeviceAbstractDriver):
|
|
||||||
|
|
||||||
|
def get_scaling_policy_name(action, policy_name):
|
||||||
|
return '%s_scale_%s' % (policy_name, action)
|
||||||
|
|
||||||
|
|
||||||
|
class DeviceHeat(abstract_driver.DeviceAbstractDriver,
|
||||||
|
scale_driver.VnfScaleAbstractDriver):
|
||||||
"""Heat driver of hosting device."""
|
"""Heat driver of hosting device."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -334,6 +344,112 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver):
|
|||||||
|
|
||||||
return heat_template_yaml, monitoring_dict
|
return heat_template_yaml, monitoring_dict
|
||||||
|
|
||||||
|
def generate_hot_scaling(vnfd_dict,
|
||||||
|
scale_resource_type="OS::Nova::Server"):
|
||||||
|
# Initialize the template
|
||||||
|
template_dict = yaml.load(HEAT_TEMPLATE_BASE)
|
||||||
|
template_dict['description'] = 'Tacker scaling template'
|
||||||
|
|
||||||
|
parameters = {}
|
||||||
|
template_dict['parameters'] = parameters
|
||||||
|
|
||||||
|
# Add scaling related resource defs
|
||||||
|
resources = {}
|
||||||
|
scaling_group_names = {}
|
||||||
|
|
||||||
|
# TODO(kanagaraj-manickam) now only one group is supported, so name
|
||||||
|
# is hard-coded with G1
|
||||||
|
def _get_scale_group_name(targets):
|
||||||
|
return 'G1'
|
||||||
|
|
||||||
|
def _convert_to_heat_scaling_group(policy_prp,
|
||||||
|
scale_resource_type,
|
||||||
|
name):
|
||||||
|
group_hot = {'type': 'OS::Heat::AutoScalingGroup'}
|
||||||
|
properties = {}
|
||||||
|
properties['min_size'] = policy_prp['min_instances']
|
||||||
|
properties['max_size'] = policy_prp['max_instances']
|
||||||
|
properties['desired_capacity'] = policy_prp[
|
||||||
|
'default_instances']
|
||||||
|
properties['cooldown'] = policy_prp['cooldown']
|
||||||
|
properties['resource'] = {}
|
||||||
|
# TODO(kanagaraj-manickam) all VDU memebers are considered as 1
|
||||||
|
# group now and make it to form the groups based on the VDU
|
||||||
|
# list mentioned in the policy's targets
|
||||||
|
# scale_resource_type is custome type mapped the HOT template
|
||||||
|
# generated for all VDUs in the tosca template
|
||||||
|
properties['resource']['type'] = scale_resource_type
|
||||||
|
|
||||||
|
# TODO(kanagraj-manickam) add custom type params here, to
|
||||||
|
# support parameterized template
|
||||||
|
group_hot['properties'] = properties
|
||||||
|
|
||||||
|
return group_hot
|
||||||
|
|
||||||
|
# tosca policies
|
||||||
|
#
|
||||||
|
# properties:
|
||||||
|
# adjust_by: 1
|
||||||
|
# cooldown: 120
|
||||||
|
# targets: [G1]
|
||||||
|
def _convert_to_heat_scaling_policy(policy_prp, name):
|
||||||
|
# Form the group
|
||||||
|
scale_grp = _get_scale_group_name(policy_prp['targets'])
|
||||||
|
scaling_group_names[name] = scale_grp
|
||||||
|
resources[scale_grp] = _convert_to_heat_scaling_group(
|
||||||
|
policy_prp,
|
||||||
|
scale_resource_type,
|
||||||
|
scale_grp)
|
||||||
|
|
||||||
|
grp_id = {'get_resource': scale_grp}
|
||||||
|
|
||||||
|
policy_hot = {'type': 'OS::Heat::ScalingPolicy'}
|
||||||
|
properties = {}
|
||||||
|
properties['adjustment_type'] = 'change_in_capacity'
|
||||||
|
properties['cooldown'] = policy_prp['cooldown']
|
||||||
|
properties['scaling_adjustment'] = policy_prp['increment']
|
||||||
|
properties['auto_scaling_group_id'] = grp_id
|
||||||
|
policy_hot['properties'] = properties
|
||||||
|
|
||||||
|
# Add scale_out policy
|
||||||
|
policy_rsc_name = get_scaling_policy_name(
|
||||||
|
action='out',
|
||||||
|
policy_name=name
|
||||||
|
)
|
||||||
|
resources[policy_rsc_name] = policy_hot
|
||||||
|
|
||||||
|
# Add scale_in policy
|
||||||
|
in_value = '-%d' % int(policy_prp['increment'])
|
||||||
|
policy_hot_in = copy.deepcopy(policy_hot)
|
||||||
|
policy_hot_in['properties'][
|
||||||
|
'scaling_adjustment'] = in_value
|
||||||
|
policy_rsc_name = get_scaling_policy_name(
|
||||||
|
action='in',
|
||||||
|
policy_name=name
|
||||||
|
)
|
||||||
|
resources[policy_rsc_name] = policy_hot_in
|
||||||
|
|
||||||
|
# policies:
|
||||||
|
# - SP1:
|
||||||
|
# type: tosca.policy.tacker.Scaling
|
||||||
|
if 'policies' in vnfd_dict:
|
||||||
|
for policy_dict in vnfd_dict['policies']:
|
||||||
|
name, policy = policy_dict.items()[0]
|
||||||
|
if policy['type'] == 'tosca.policy.tacker.Scaling':
|
||||||
|
_convert_to_heat_scaling_policy(policy['properties'],
|
||||||
|
name)
|
||||||
|
# TODO(kanagaraj-manickam) only one policy is supported
|
||||||
|
# for all vdus. remove this break, once this limitation
|
||||||
|
# is addressed.
|
||||||
|
break
|
||||||
|
|
||||||
|
template_dict['resources'] = resources
|
||||||
|
|
||||||
|
# First return value helps to check if scaling resources exist
|
||||||
|
return ((len(template_dict['resources']) > 0),
|
||||||
|
scaling_group_names,
|
||||||
|
template_dict)
|
||||||
|
|
||||||
def generate_hot_from_legacy(vnfd_dict):
|
def generate_hot_from_legacy(vnfd_dict):
|
||||||
assert 'template' not in fields
|
assert 'template' not in fields
|
||||||
assert 'template_url' not in fields
|
assert 'template_url' not in fields
|
||||||
@ -420,18 +536,43 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver):
|
|||||||
vnfd_dict = yamlparser.simple_ordered_parse(vnfd_yaml)
|
vnfd_dict = yamlparser.simple_ordered_parse(vnfd_yaml)
|
||||||
LOG.debug('vnfd_dict %s', vnfd_dict)
|
LOG.debug('vnfd_dict %s', vnfd_dict)
|
||||||
|
|
||||||
|
is_tosca_format = False
|
||||||
if 'tosca_definitions_version' in vnfd_dict:
|
if 'tosca_definitions_version' in vnfd_dict:
|
||||||
(heat_template_yaml,
|
(heat_template_yaml,
|
||||||
monitoring_dict) = generate_hot_from_tosca(vnfd_dict)
|
monitoring_dict) = generate_hot_from_tosca(vnfd_dict)
|
||||||
|
is_tosca_format = True
|
||||||
else:
|
else:
|
||||||
(heat_template_yaml,
|
(heat_template_yaml,
|
||||||
monitoring_dict) = generate_hot_from_legacy(vnfd_dict)
|
monitoring_dict) = generate_hot_from_legacy(vnfd_dict)
|
||||||
|
|
||||||
fields['template'] = heat_template_yaml
|
fields['template'] = heat_template_yaml
|
||||||
|
|
||||||
if not device['attributes'].get('heat_template'):
|
# Handle scaling here
|
||||||
device['attributes']['heat_template'] = \
|
if is_tosca_format:
|
||||||
fields['template']
|
(is_scaling_needed,
|
||||||
|
scaling_group_names,
|
||||||
|
main_dict) = generate_hot_scaling(
|
||||||
|
vnfd_dict['topology_template'],
|
||||||
|
'scaling.yaml')
|
||||||
|
|
||||||
|
if is_scaling_needed:
|
||||||
|
main_yaml = yaml.dump(main_dict)
|
||||||
|
fields['template'] = main_yaml
|
||||||
|
fields['files'] = {'scaling.yaml': heat_template_yaml}
|
||||||
|
device['attributes']['heat_template'] = main_yaml
|
||||||
|
# TODO(kanagaraj-manickam) when multiple groups are
|
||||||
|
# supported, make this scaling atribute as
|
||||||
|
# scaling name vs scaling template map and remove
|
||||||
|
# scaling_group_names
|
||||||
|
device['attributes']['scaling.yaml'] = heat_template_yaml
|
||||||
|
device['attributes'][
|
||||||
|
'scaling_group_names'] = jsonutils.dumps(
|
||||||
|
scaling_group_names
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if not device['attributes'].get('heat_template'):
|
||||||
|
device['attributes'][
|
||||||
|
'heat_template'] = fields['template']
|
||||||
|
|
||||||
if monitoring_dict:
|
if monitoring_dict:
|
||||||
device['attributes']['monitoring_policy'] = \
|
device['attributes']['monitoring_policy'] = \
|
||||||
@ -457,7 +598,8 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver):
|
|||||||
|
|
||||||
return stack
|
return stack
|
||||||
|
|
||||||
return create_stack()['stack']['id']
|
stack = create_stack()
|
||||||
|
return stack['stack']['id']
|
||||||
|
|
||||||
def create_wait(self, plugin, context, device_dict, device_id, auth_attr):
|
def create_wait(self, plugin, context, device_dict, device_id, auth_attr):
|
||||||
region_name = device_dict.get('placement_attr', {}).get(
|
region_name = device_dict.get('placement_attr', {}).get(
|
||||||
@ -500,13 +642,25 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver):
|
|||||||
raise vnfm.DeviceCreateWaitFailed(device_id=device_id,
|
raise vnfm.DeviceCreateWaitFailed(device_id=device_id,
|
||||||
reason=error_reason)
|
reason=error_reason)
|
||||||
|
|
||||||
outputs = stack.outputs
|
def _find_mgmt_ips(outputs):
|
||||||
LOG.debug(_('outputs %s'), outputs)
|
LOG.debug(_('outputs %s'), outputs)
|
||||||
PREFIX = 'mgmt_ip-'
|
mgmt_ips = dict((output['output_key'][len(OUTPUT_PREFIX):],
|
||||||
mgmt_ips = dict((output['output_key'][len(PREFIX):],
|
output['output_value'])
|
||||||
output['output_value'])
|
for output in outputs
|
||||||
for output in outputs
|
if output.get('output_key',
|
||||||
if output.get('output_key', '').startswith(PREFIX))
|
'').startswith(OUTPUT_PREFIX))
|
||||||
|
return mgmt_ips
|
||||||
|
|
||||||
|
# scaling enabled
|
||||||
|
if device_dict['attributes'].get('scaling_group_names'):
|
||||||
|
group_names = jsonutils.loads(
|
||||||
|
device_dict['attributes'].get('scaling_group_names')).values()
|
||||||
|
mgmt_ips = self._find_mgmt_ips_from_groups(heatclient_,
|
||||||
|
device_id,
|
||||||
|
group_names)
|
||||||
|
else:
|
||||||
|
mgmt_ips = _find_mgmt_ips(stack.outputs)
|
||||||
|
|
||||||
if mgmt_ips:
|
if mgmt_ips:
|
||||||
device_dict['mgmt_url'] = jsonutils.dumps(mgmt_ips)
|
device_dict['mgmt_url'] = jsonutils.dumps(mgmt_ips)
|
||||||
|
|
||||||
@ -604,6 +758,100 @@ class DeviceHeat(abstract_driver.DeviceAbstractDriver):
|
|||||||
raise vnfm.DeviceCreateWaitFailed(device_id=device_id,
|
raise vnfm.DeviceCreateWaitFailed(device_id=device_id,
|
||||||
reason=error_reason)
|
reason=error_reason)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _find_mgmt_ips_from_groups(cls,
|
||||||
|
heat_client,
|
||||||
|
instance_id,
|
||||||
|
group_names):
|
||||||
|
|
||||||
|
def _find_mgmt_ips(attributes):
|
||||||
|
mgmt_ips = {}
|
||||||
|
for k, v in attributes.items():
|
||||||
|
if k.startswith(OUTPUT_PREFIX):
|
||||||
|
mgmt_ips[k.replace(OUTPUT_PREFIX, '')] = v
|
||||||
|
|
||||||
|
return mgmt_ips
|
||||||
|
|
||||||
|
mgmt_ips = {}
|
||||||
|
for group_name in group_names:
|
||||||
|
grp = heat_client.resource_get(instance_id,
|
||||||
|
group_name)
|
||||||
|
# Get scale group
|
||||||
|
for rsc in heat_client.resource_get_list(
|
||||||
|
grp.physical_resource_id):
|
||||||
|
# Get list of resoruces in scale group
|
||||||
|
scale_rsc = heat_client.resource_get(
|
||||||
|
grp.physical_resource_id,
|
||||||
|
rsc.resource_name)
|
||||||
|
|
||||||
|
# findout the mgmt ips from attributes
|
||||||
|
for k, v in _find_mgmt_ips(scale_rsc.attributes).items():
|
||||||
|
if k not in mgmt_ips:
|
||||||
|
mgmt_ips[k] = [v]
|
||||||
|
else:
|
||||||
|
mgmt_ips[k].append(v)
|
||||||
|
|
||||||
|
return mgmt_ips
|
||||||
|
|
||||||
|
@log.log
|
||||||
|
def scale(self,
|
||||||
|
context,
|
||||||
|
plugin,
|
||||||
|
auth_attr,
|
||||||
|
policy,
|
||||||
|
region_name):
|
||||||
|
heatclient_ = HeatClient(auth_attr, region_name)
|
||||||
|
return heatclient_.resource_signal(policy['instance_id'],
|
||||||
|
get_scaling_policy_name(
|
||||||
|
policy_name=policy['id'],
|
||||||
|
action=policy['action']
|
||||||
|
))
|
||||||
|
|
||||||
|
@log.log
|
||||||
|
def scale_wait(self,
|
||||||
|
context,
|
||||||
|
plugin,
|
||||||
|
auth_attr,
|
||||||
|
policy,
|
||||||
|
region_name):
|
||||||
|
heatclient_ = HeatClient(auth_attr, region_name)
|
||||||
|
|
||||||
|
# TODO(kanagaraj-manickam) make wait logic into separate utility method
|
||||||
|
# and make use of it here and other actions like create and delete
|
||||||
|
while (True):
|
||||||
|
time.sleep(STACK_RETRY_WAIT)
|
||||||
|
try:
|
||||||
|
rsc = heatclient_.resource_get(
|
||||||
|
policy['instance_id'],
|
||||||
|
get_scaling_policy_name(policy_name=policy['id'],
|
||||||
|
action=policy['action']))
|
||||||
|
except Exception:
|
||||||
|
LOG.exception(_("Device scaling may not have "
|
||||||
|
"happened because Heat API request failed "
|
||||||
|
"while waiting for the stack %(stack)s to be "
|
||||||
|
"scaled"), {'stack': policy['instance_id']})
|
||||||
|
break
|
||||||
|
|
||||||
|
if rsc.resource_status == 'SIGNAL_IN_PROGRESS':
|
||||||
|
continue
|
||||||
|
|
||||||
|
break
|
||||||
|
|
||||||
|
def _fill_scaling_group_name():
|
||||||
|
vnf = policy['vnf']
|
||||||
|
scaling_group_names = vnf['attributes']['scaling_group_names']
|
||||||
|
policy['group_name'] = jsonutils.loads(
|
||||||
|
scaling_group_names)[policy['name']]
|
||||||
|
|
||||||
|
_fill_scaling_group_name()
|
||||||
|
|
||||||
|
mgmt_ips = self._find_mgmt_ips_from_groups(
|
||||||
|
heatclient_,
|
||||||
|
policy['instance_id'],
|
||||||
|
[policy['group_name']])
|
||||||
|
|
||||||
|
return jsonutils.dumps(mgmt_ips)
|
||||||
|
|
||||||
|
|
||||||
class HeatClient(object):
|
class HeatClient(object):
|
||||||
def __init__(self, auth_attr, region_name=None):
|
def __init__(self, auth_attr, region_name=None):
|
||||||
@ -639,3 +887,13 @@ class HeatClient(object):
|
|||||||
def resource_attr_support(self, resource_name, property_name):
|
def resource_attr_support(self, resource_name, property_name):
|
||||||
resource = self.resource_types.get(resource_name)
|
resource = self.resource_types.get(resource_name)
|
||||||
return property_name in resource['attributes']
|
return property_name in resource['attributes']
|
||||||
|
|
||||||
|
def resource_get_list(self, stack_id, nested_depth=0):
|
||||||
|
return self.heat.resources.list(stack_id,
|
||||||
|
nested_depth=nested_depth)
|
||||||
|
|
||||||
|
def resource_signal(self, stack_id, rsc_name):
|
||||||
|
return self.heat.resources.signal(stack_id, rsc_name)
|
||||||
|
|
||||||
|
def resource_get(self, stack_id, rsc_name):
|
||||||
|
return self.heat.resources.get(stack_id, rsc_name)
|
||||||
|
42
tacker/vm/infra_drivers/scale_driver.py
Normal file
42
tacker/vm/infra_drivers/scale_driver.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import abc
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from tacker.api import extensions
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
|
class VnfScaleAbstractDriver(extensions.PluginInterface):
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def scale(self,
|
||||||
|
context,
|
||||||
|
plugin,
|
||||||
|
auth_attr,
|
||||||
|
policy,
|
||||||
|
region_name):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def scale_wait(self,
|
||||||
|
context,
|
||||||
|
plugin,
|
||||||
|
auth_attr,
|
||||||
|
policy,
|
||||||
|
region_name):
|
||||||
|
pass
|
@ -101,7 +101,7 @@ def updateimports(template):
|
|||||||
|
|
||||||
@log.log
|
@log.log
|
||||||
def get_vdu_monitoring(template):
|
def get_vdu_monitoring(template):
|
||||||
monitoring_dict = {'vdus': {}}
|
monitoring_dict = {}
|
||||||
for nt in template.nodetemplates:
|
for nt in template.nodetemplates:
|
||||||
if nt.type_definition.is_derived_from(TACKERVDU):
|
if nt.type_definition.is_derived_from(TACKERVDU):
|
||||||
mon_policy = nt.get_property_value('monitoring_policy') or 'noop'
|
mon_policy = nt.get_property_value('monitoring_policy') or 'noop'
|
||||||
@ -110,6 +110,7 @@ def get_vdu_monitoring(template):
|
|||||||
if mon_policy != 'noop':
|
if mon_policy != 'noop':
|
||||||
if 'parameters' in mon_policy:
|
if 'parameters' in mon_policy:
|
||||||
mon_policy['monitoring_params'] = mon_policy['parameters']
|
mon_policy['monitoring_params'] = mon_policy['parameters']
|
||||||
|
monitoring_dict['vdus'] = {}
|
||||||
monitoring_dict['vdus'][nt.name] = {}
|
monitoring_dict['vdus'][nt.name] = {}
|
||||||
monitoring_dict['vdus'][nt.name][mon_policy['name']] = \
|
monitoring_dict['vdus'][nt.name][mon_policy['name']] = \
|
||||||
mon_policy
|
mon_policy
|
||||||
|
Loading…
Reference in New Issue
Block a user