Merge "Fix some bugs related legacy cnf operations"

This commit is contained in:
Zuul 2021-04-08 12:56:45 +00:00 committed by Gerrit Code Review
commit a6fe9869e9
7 changed files with 47 additions and 29 deletions

View File

@ -83,6 +83,8 @@ Kubernetes environment with one container per VDU.
min_instances: 1
max_instances: 3
target_cpu_utilization_percentage: 40
default_instances: 1 # required parameter but ignored for cnf
increment: 1 # required parameter but ignored for cnf
In "vnfcs", there are 2 components: front_end and rss_reader.
We model them as Containers [#second]_ inside a Pod [#third]_. To provide
@ -158,6 +160,8 @@ will be added in the future.
min_instances: 1
max_instances: 3
target_cpu_utilization_percentage: 40
default_instances: 1 # required parameter but ignored for cnf
increment: 1 # required parameter but ignored for cnf
2. Two containers per VDU example
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -204,6 +208,8 @@ Similar to the above example, in this scenario, we define 2 containers in VDU1.
min_instances: 1
max_instances: 3
target_cpu_utilization_percentage: 40
default_instances: 1 # required parameter but ignored for cnf
increment: 1 # required parameter but ignored for cnf
Viewing a containerized VNF
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -284,7 +290,7 @@ User also can scale VNF manually, by running the following commands:
.. code-block:: console
$ openstack vnf scale --vnf-name VNF1 --scaling-policy-name SP1 --scaling-type out
$ openstack vnf scale --scaling-policy-name SP1 --scaling-type out VNF1
$ kubectl get deployment
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE

View File

@ -35,3 +35,5 @@ topology_template:
min_instances: 1
max_instances: 3
target_cpu_utilization_percentage: 40
default_instances: 1 # required parameter but ignored for cnf
increment: 1 # required parameter but ignored for cnf

View File

@ -34,3 +34,5 @@ topology_template:
min_instances: 1
max_instances: 10
target_cpu_utilization_percentage: 50
default_instances: 1 # required parameter but ignored for cnf
increment: 1 # required parameter but ignored for cnf

View File

@ -1023,21 +1023,24 @@ def fake_pod_list():
)
def get_scale_policy(type, aspect_id='vdu1', delta_num=1):
def get_scale_policy(type, aspect_id='vdu1', delta_num=1, is_legacy=False):
policy = dict()
policy['vnf_instance_id'] = uuidsentinel.vnf_instance_id
policy['action'] = type
policy['name'] = aspect_id
policy['delta_num'] = delta_num
policy['vdu_defs'] = {
'VDU1': {
'type': 'tosca.nodes.nfv.Vdu.Compute',
'properties': {
'name': 'fake_name',
'description': 'test description',
'vdu_profile': {
'min_number_of_instances': 1,
'max_number_of_instances': 3}}}}
if is_legacy:
policy['instance_id'] = "fake_namespace,fake_name"
else:
policy['vnf_instance_id'] = uuidsentinel.vnf_instance_id
policy['delta_num'] = delta_num
policy['vdu_defs'] = {
'VDU1': {
'type': 'tosca.nodes.nfv.Vdu.Compute',
'properties': {
'name': 'fake_name',
'description': 'test description',
'vdu_profile': {
'min_number_of_instances': 1,
'max_number_of_instances': 3}}}}
return policy

View File

@ -2352,7 +2352,8 @@ class TestKubernetes(base.TestCase):
mock_read_namespaced_deployment,
mock_patch_namespaced_deployment_scale,
mock_read_namespaced_horizontal_pod_autoscaler):
policy = fakes.get_scale_policy(type=scale_type, aspect_id='SP1')
policy = fakes.get_scale_policy(
type=scale_type, aspect_id='SP1', is_legacy=True)
policy['instance_id'] = "fake_namespace,fake_name"
mock_vnf_resource_list.return_value = []
mock_read_namespaced_deployment.return_value = \
@ -2569,8 +2570,8 @@ class TestKubernetes(base.TestCase):
@mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id")
def test_scale_wait_legacy(self, mock_vnf_resource_list,
mock_list_namespaced_pod):
policy = fakes.get_scale_policy(type='out', aspect_id='SP1')
policy['instance_id'] = "fake_namespace,fake_name"
policy = fakes.get_scale_policy(
type='out', aspect_id='SP1', is_legacy=True)
mock_vnf_resource_list.return_value = []
mock_list_namespaced_pod.return_value = \
client.V1PodList(items=[
@ -2587,8 +2588,8 @@ class TestKubernetes(base.TestCase):
@mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id")
def test_scale_wait_legacy_retry_over(self, mock_vnf_resource_list,
mock_list_namespaced_pod):
policy = fakes.get_scale_policy(type='out', aspect_id='SP1')
policy['instance_id'] = "fake_namespace,fake_name"
policy = fakes.get_scale_policy(
type='out', aspect_id='SP1', is_legacy=True)
mock_vnf_resource_list.return_value = []
mock_list_namespaced_pod.return_value = \
client.V1PodList(items=[
@ -2603,8 +2604,8 @@ class TestKubernetes(base.TestCase):
@mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id")
def test_scale_wait_legacy_status_unknown(self, mock_vnf_resource_list,
mock_list_namespaced_pod):
policy = fakes.get_scale_policy(type='out', aspect_id='SP1')
policy['instance_id'] = "fake_namespace,fake_name"
policy = fakes.get_scale_policy(
type='out', aspect_id='SP1', is_legacy=True)
mock_vnf_resource_list.return_value = []
mock_list_namespaced_pod.return_value = \
client.V1PodList(items=[

View File

@ -189,8 +189,11 @@ class Parser(object):
and vdu_name in policy.targets:
count = count + 1
policy_props = policy.properties
self.check_unsupported_key(policy_props,
ALLOWED_SCALING_OBJECT_PROPS)
# NOTE(ueha): check_unsupported_key() is commented out to
# resolve vnf create error due to required
# parameters of policies.
# self.check_unsupported_key(policy_props,
# ALLOWED_SCALING_OBJECT_PROPS)
scaling_obj.scaling_name = policy.name
scaling_obj.target_cpu_utilization_percentage = \
policy_props.get(

View File

@ -930,6 +930,7 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
# If one of objects is still alive, keeps on waiting
if count > 0:
keep_going = True
time.sleep(self.STACK_RETRY_WAIT)
else:
keep_going = False
except Exception as e:
@ -1123,13 +1124,13 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
"""
# initialize Kubernetes APIs
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
context, policy['vnf_instance_id'])
try:
if not vnf_resources:
if not policy.get('vnf_instance_id'):
# execute legacy scale method
self._scale_legacy(policy, auth_cred)
else:
vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
context, policy['vnf_instance_id'])
app_v1_api_client = self.kubernetes.get_app_v1_api_client(
auth=auth_cred)
aspect_id = policy['name']
@ -1287,13 +1288,13 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
"""
# initialize Kubernetes APIs
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
context, policy['vnf_instance_id'])
try:
if not vnf_resources:
if not policy.get('vnf_instance_id'):
# execute legacy scale_wait method
self._scale_wait_legacy(policy, auth_cred)
else:
vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
context, policy['vnf_instance_id'])
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
app_v1_api_client = self.kubernetes.get_app_v1_api_client(