Fix retry operation after LCM failure
The LCM retry operation of CNF and MgmtDriver(k8s-cluster) was verified. To fix these errors, the following things is done in this patch: 1. When executing retry, if the status of vnf is ERROR, change ERROR to the status of the corresponding lifecycle. 2. In the scale operation, the judgment condition is added. When vim is not kubernetes, the `scaleGroupDict` assignment will be used. 3. When an exception occurs in terminate, the task_state in vnf_instance is changed to None. 4. In the scale operation, add an else branch. If before_error_point is greater than 4, get resource_changes from vnf_info first. If resource_changes does not exist, get it by calling the self._scale_resource_update method. 5. At the beginning of instantiate/heal/terminate, when modifying the status of vnf, add the judgment condition. When the status is not ERROR, it can be modified. 6. In the heal/scale operation, add a judgment condition to judge whether the node exists, and only when the node exists, the delete node command will be executed. 7. In the instantiate operation, before creating the stack, add to determine whether the stack already exists. 8. (1) When installing the k8s node with MgmtDriver, add to determine whether the node already exists in the cluster. If it exists, skip the node and install the next node. (2) After installing the k8s cluster, when registering VIM, add to determine whether the VIM already exists, and if it exists, use it directly. Closes-Bug: #1942277 Change-Id: If4e695120eb432155499a2e866ae651a18a320ab
This commit is contained in:
parent
0cd9cd8404
commit
71fbed8bf4
|
@ -77,6 +77,7 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
|||
self.FLOATING_IP_FLAG = False
|
||||
self.SET_NODE_LABEL_FLAG = False
|
||||
self.SET_ZONE_ID_FLAG = False
|
||||
self.nfvo_plugin = NfvoPlugin()
|
||||
|
||||
def _check_is_cidr(self, cidr_str):
|
||||
# instantiate: check cidr
|
||||
|
@ -134,6 +135,16 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
|||
if err == 'Error: no repositories to show':
|
||||
return []
|
||||
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
|
||||
elif type == 'check_node':
|
||||
err = result.get_stderr()
|
||||
if result.get_return_code() == 0:
|
||||
pass
|
||||
elif (result.get_return_code() != 0 and
|
||||
"kubectl: command not found" in err):
|
||||
return "False"
|
||||
else:
|
||||
LOG.error(err)
|
||||
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
|
||||
return result.get_stdout()
|
||||
|
||||
def _create_vim(self, context, vnf_instance, server, bearer_token,
|
||||
|
@ -176,14 +187,10 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
|||
'masternode_password': password}
|
||||
extra['helm_info'] = str(helm_info)
|
||||
vim_info['vim']['extra'] = extra
|
||||
try:
|
||||
nfvo_plugin = NfvoPlugin()
|
||||
created_vim_info = nfvo_plugin.create_vim(context, vim_info)
|
||||
except Exception as e:
|
||||
LOG.error("Failed to register kubernetes vim: {}".format(e))
|
||||
raise exceptions.MgmtDriverOtherError(
|
||||
error_message="Failed to register kubernetes vim: {}".format(
|
||||
e))
|
||||
|
||||
created_vim_info = self._get_or_create_vim(
|
||||
context, vim_name, server, vim_info)
|
||||
|
||||
id = uuidutils.generate_uuid()
|
||||
vim_id = created_vim_info.get('id')
|
||||
vim_type = 'kubernetes'
|
||||
|
@ -199,6 +206,23 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
|||
vnf_instance.vim_connection_info = vim_connection_infos
|
||||
vnf_instance.save()
|
||||
|
||||
def _get_or_create_vim(
|
||||
self, context, vim_name, server, create_vim_info):
|
||||
created_vim_info = self._get_vim_by_name(context, vim_name)
|
||||
if created_vim_info:
|
||||
vim_info = self.nfvo_plugin.get_vim(
|
||||
context, created_vim_info.id)
|
||||
if (vim_info['auth_url'] == server and
|
||||
vim_info['status'] == 'REACHABLE'):
|
||||
return vim_info
|
||||
try:
|
||||
return self.nfvo_plugin.create_vim(context, create_vim_info)
|
||||
except Exception as e:
|
||||
LOG.error(f"Failed to register kubernetes vim: {e}")
|
||||
raise exceptions.MgmtDriverOtherError(
|
||||
error_message="Failed to register "
|
||||
f"kubernetes vim: {e}")
|
||||
|
||||
def _get_ha_group_resources_list(
|
||||
self, heatclient, stack_id, node, additional_params):
|
||||
# ha: get group resources list
|
||||
|
@ -705,6 +729,24 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
|||
commander.close_session()
|
||||
LOG.debug("_connect_to_private_registries function complete.")
|
||||
|
||||
def _is_master_installed(self, vm_dict):
|
||||
nic_ip = vm_dict['ssh']['nic_ip']
|
||||
master_name = 'master' + nic_ip.split('.')[-1]
|
||||
user = vm_dict.get('ssh', {}).get('username')
|
||||
password = vm_dict.get('ssh', {}).get('password')
|
||||
host = vm_dict.get('ssh', {}).get('ipaddr')
|
||||
commander = cmd_executer.RemoteCommandExecutor(
|
||||
user=user, password=password,
|
||||
host=host, timeout=K8S_CMD_TIMEOUT)
|
||||
ssh_command = f"kubectl get node | grep {master_name}"
|
||||
result = self._execute_command(commander, ssh_command,
|
||||
K8S_CMD_TIMEOUT, 'check_node', 0)
|
||||
if result != "False":
|
||||
for res in result:
|
||||
if res.split(' ')[0].strip() == master_name:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _install_k8s_cluster(self, context, vnf_instance,
|
||||
proxy, script_path,
|
||||
master_vm_dict_list, worker_vm_dict_list,
|
||||
|
@ -764,8 +806,14 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
|||
active_host = ""
|
||||
ssl_ca_cert_hash = ""
|
||||
kubeadm_token = ""
|
||||
get_node_names = []
|
||||
# install master node
|
||||
for vm_dict in master_vm_dict_list:
|
||||
|
||||
# check master_node exist in k8s-cluster
|
||||
if self._is_master_installed(vm_dict):
|
||||
continue
|
||||
|
||||
if vm_dict.get('ssh', {}).get('nic_ip') == \
|
||||
master_ssh_ips_str.split(',')[0]:
|
||||
active_username = vm_dict.get('ssh', {}).get('username')
|
||||
|
@ -900,10 +948,19 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
|||
commander = cmd_executer.RemoteCommandExecutor(
|
||||
user=user, password=password, host=host,
|
||||
timeout=K8S_CMD_TIMEOUT)
|
||||
ssh_command = "kubectl create -f /tmp/create_admin_token.yaml"
|
||||
|
||||
# Check whether the secret already exists
|
||||
if not self._has_secret(commander):
|
||||
ssh_command = ("kubectl create -f "
|
||||
"/tmp/create_admin_token.yaml")
|
||||
self._execute_command(
|
||||
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
|
||||
time.sleep(30)
|
||||
else:
|
||||
ssh_command = "kubectl get node"
|
||||
get_node_names = self._execute_command(
|
||||
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
|
||||
|
||||
ssh_command = "kubectl get secret -n kube-system " \
|
||||
"| grep '^admin-token' " \
|
||||
"| awk '{print $1}' " \
|
||||
|
@ -952,6 +1009,11 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
|||
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
|
||||
|
||||
# execute install k8s command on VM
|
||||
for get_node_name in get_node_names:
|
||||
if ('worker' + nic_ip.split('.')[-1] ==
|
||||
get_node_name.split(' ')[0]):
|
||||
break
|
||||
else:
|
||||
self._install_worker_node(
|
||||
commander, proxy, ha_flag, nic_ip,
|
||||
cluster_ip, kubeadm_token, ssl_ca_cert_hash,
|
||||
|
@ -975,6 +1037,12 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
|||
return (server, bearer_token, ssl_ca_cert, project_name,
|
||||
masternode_ip_list)
|
||||
|
||||
def _has_secret(self, commander):
|
||||
ssh_command = ("kubectl get secret -n kube-system "
|
||||
"| grep '^admin-token'")
|
||||
return self._execute_command(
|
||||
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
|
||||
|
||||
def _check_values(self, additional_param):
|
||||
for key, value in additional_param.items():
|
||||
if 'master_node' == key or 'worker_node' == key:
|
||||
|
@ -1239,8 +1307,7 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
|||
vim_info = self._get_vim_by_name(
|
||||
context, k8s_vim_name)
|
||||
if vim_info:
|
||||
nfvo_plugin = NfvoPlugin()
|
||||
nfvo_plugin.delete_vim(context, vim_info.id)
|
||||
self.nfvo_plugin.delete_vim(context, vim_info.id)
|
||||
|
||||
def _get_username_pwd(self, vnf_request, vnf_instance, role):
|
||||
# heal and scale: get user pwd
|
||||
|
@ -1413,6 +1480,8 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
|||
daemonset_content_str = ''.join(result)
|
||||
daemonset_content = json.loads(
|
||||
daemonset_content_str)
|
||||
if not daemonset_content['items']:
|
||||
continue
|
||||
ssh_command = \
|
||||
"kubectl drain {resource} --ignore-daemonsets " \
|
||||
"--timeout={k8s_cmd_timeout}s".format(
|
||||
|
@ -2018,6 +2087,18 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
|||
self._execute_command(
|
||||
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3)
|
||||
|
||||
# check worker_node exist in k8s-cluster
|
||||
result = self._is_worker_node_installed(
|
||||
commander, fixed_master_name)
|
||||
if not result:
|
||||
continue
|
||||
for res in result:
|
||||
if res.split(' ')[0].strip() == fixed_master_name:
|
||||
# fixed_master_name is found
|
||||
break
|
||||
else:
|
||||
continue
|
||||
|
||||
# delete master node
|
||||
ssh_command = "kubectl delete node " + \
|
||||
fixed_master_name
|
||||
|
@ -2063,6 +2144,11 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
|||
commander, ssh_command, K8S_CMD_TIMEOUT, 'etcd', 3)
|
||||
commander.close_session()
|
||||
|
||||
def _is_worker_node_installed(self, commander, fixed_master_name):
|
||||
ssh_command = f"kubectl get node | grep {fixed_master_name}"
|
||||
return self._execute_command(
|
||||
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
|
||||
|
||||
def _delete_worker_node(
|
||||
self, fixed_worker_infos, not_fixed_master_infos,
|
||||
master_username, master_password):
|
||||
|
@ -2081,6 +2167,8 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
|||
worker_node_pod_info_str = ''.join(result)
|
||||
worker_node_pod_info = json.loads(
|
||||
worker_node_pod_info_str)
|
||||
if not worker_node_pod_info['items']:
|
||||
continue
|
||||
ssh_command = "kubectl drain {} " \
|
||||
"--ignore-daemonsets " \
|
||||
"--timeout={}s" \
|
||||
|
@ -2259,8 +2347,7 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
|||
k8s_vim_info = self._get_vim_by_name(
|
||||
context, k8s_vim_name)
|
||||
if k8s_vim_info:
|
||||
nfvo_plugin = NfvoPlugin()
|
||||
nfvo_plugin.delete_vim(context, k8s_vim_info.id)
|
||||
self.nfvo_plugin.delete_vim(context, k8s_vim_info.id)
|
||||
for vim_info in vnf_instance.vim_connection_info:
|
||||
if vim_info.vim_id == k8s_vim_info.id:
|
||||
vnf_instance.vim_connection_info.remove(vim_info)
|
||||
|
|
|
@ -1197,7 +1197,7 @@ class VnfLcmController(wsgi.Controller):
|
|||
if max_level < scale_level:
|
||||
return self._make_problem_detail(
|
||||
'can not scale_out', 400, title='can not scale_out')
|
||||
if 'vnf_lcm_op_occs_id' in vnf_info:
|
||||
if 'vnf_lcm_op_occs_id' in vnf_info and vim_type != "kubernetes":
|
||||
num = (scaleGroupDict['scaleGroupDict']
|
||||
[scale_vnf_request.aspect_id]['num'])
|
||||
default = (scaleGroupDict['scaleGroupDict']
|
||||
|
|
|
@ -113,6 +113,8 @@ _PENDING_STATUS = ('PENDING_CREATE',
|
|||
'PENDING_TERMINATE',
|
||||
'PENDING_DELETE',
|
||||
'PENDING_HEAL',
|
||||
'PENDING_SCALE_OUT',
|
||||
'PENDING_SCALE_IN',
|
||||
'PENDING_CHANGE_EXT_CONN')
|
||||
_ERROR_STATUS = ('ERROR',)
|
||||
_ALL_STATUSES = _ACTIVE_STATUS + _INACTIVE_STATUS + _PENDING_STATUS + \
|
||||
|
@ -1948,8 +1950,10 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
|||
vnf_dict['current_error_point'] = \
|
||||
fields.ErrorPoint.NOTIFY_PROCESSING
|
||||
|
||||
if vnf_dict['before_error_point'] <= \
|
||||
fields.ErrorPoint.NOTIFY_PROCESSING:
|
||||
if vnf_dict['status'] == 'ERROR':
|
||||
self._change_vnf_status(context, vnf_instance.id,
|
||||
_ERROR_STATUS, 'PENDING_CREATE')
|
||||
elif vnf_dict['before_error_point'] <= EP.NOTIFY_PROCESSING:
|
||||
# change vnf_status
|
||||
if vnf_dict['status'] == 'INACTIVE':
|
||||
vnf_dict['status'] = 'PENDING_CREATE'
|
||||
|
@ -2039,8 +2043,10 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
|||
vnf_dict['current_error_point'] = \
|
||||
fields.ErrorPoint.NOTIFY_PROCESSING
|
||||
|
||||
if vnf_dict['before_error_point'] <= \
|
||||
fields.ErrorPoint.NOTIFY_PROCESSING:
|
||||
if vnf_dict['status'] == 'ERROR':
|
||||
self._change_vnf_status(context, vnf_instance.id,
|
||||
_ERROR_STATUS, 'PENDING_TERMINATE')
|
||||
elif vnf_dict['before_error_point'] <= EP.NOTIFY_PROCESSING:
|
||||
self._change_vnf_status(context, vnf_instance.id,
|
||||
_ACTIVE_STATUS, 'PENDING_TERMINATE')
|
||||
|
||||
|
@ -2077,6 +2083,8 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
|||
# set vnf_status to error
|
||||
self._change_vnf_status(context, vnf_instance.id,
|
||||
_ALL_STATUSES, 'ERROR')
|
||||
self.vnflcm_driver._vnf_instance_update(
|
||||
context, vnf_instance, task_state=None)
|
||||
|
||||
# Update vnf_lcm_op_occs table and send notification "FAILED_TEMP"
|
||||
self._send_lcm_op_occ_notification(
|
||||
|
@ -2137,8 +2145,10 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
|||
vnf_dict['current_error_point'] = \
|
||||
fields.ErrorPoint.NOTIFY_PROCESSING
|
||||
|
||||
if vnf_dict['before_error_point'] <= \
|
||||
fields.ErrorPoint.NOTIFY_PROCESSING:
|
||||
if vnf_dict['status'] == 'ERROR':
|
||||
self._change_vnf_status(context, vnf_instance.id,
|
||||
_ERROR_STATUS, 'PENDING_HEAL')
|
||||
elif vnf_dict['before_error_point'] <= EP.NOTIFY_PROCESSING:
|
||||
# update vnf status to PENDING_HEAL
|
||||
self._change_vnf_status(context, vnf_instance.id,
|
||||
_ACTIVE_STATUS, constants.PENDING_HEAL)
|
||||
|
@ -2217,6 +2227,11 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
|||
scale_vnf_request,
|
||||
vnf_lcm_op_occ_id)
|
||||
|
||||
if vnf_info['status'] == 'ERROR':
|
||||
self._change_vnf_status(context, vnf_instance.id,
|
||||
_ERROR_STATUS,
|
||||
'PENDING_' + scale_vnf_request.type)
|
||||
|
||||
self.vnflcm_driver.scale_vnf(
|
||||
context, vnf_info, vnf_instance, scale_vnf_request)
|
||||
|
||||
|
@ -2405,10 +2420,16 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
|||
)
|
||||
|
||||
vnf_dict['current_error_point'] = EP.NOTIFY_PROCESSING
|
||||
if vnf_dict['before_error_point'] <= EP.NOTIFY_PROCESSING:
|
||||
|
||||
if vnf_dict['status'] == 'ERROR':
|
||||
self._change_vnf_status(context, vnf_instance.id,
|
||||
_ERROR_STATUS,
|
||||
'PENDING_CHANGE_EXT_CONN')
|
||||
elif vnf_dict['before_error_point'] <= EP.NOTIFY_PROCESSING:
|
||||
# update vnf status to PENDING_CHANGE_EXT_CONN
|
||||
self._change_vnf_status(context, vnf_instance.id,
|
||||
_ACTIVE_STATUS, 'PENDING_CHANGE_EXT_CONN')
|
||||
_ACTIVE_STATUS,
|
||||
'PENDING_CHANGE_EXT_CONN')
|
||||
|
||||
self.vnflcm_driver.change_ext_conn_vnf(
|
||||
context,
|
||||
|
|
|
@ -920,8 +920,11 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
termination_type=fields.VnfInstanceTerminationType.GRACEFUL,
|
||||
additional_params={"key": "value"})
|
||||
vnf_lcm_op_occs_id = uuidsentinel.vnf_lcm_op_occs_id
|
||||
vnf_dict = db_utils.get_dummy_vnf(instance_id=self.instance_uuid)
|
||||
vnf_dict['before_error_point'] = fields.ErrorPoint.INITIAL
|
||||
vnf_dict = {
|
||||
**db_utils.get_dummy_vnf(instance_id=self.instance_uuid),
|
||||
'before_error_point': fields.ErrorPoint.INITIAL,
|
||||
'status': ''
|
||||
}
|
||||
self.conductor.terminate(self.context, vnf_lcm_op_occs_id,
|
||||
vnf_instance, terminate_vnf_req, vnf_dict)
|
||||
|
||||
|
@ -1465,8 +1468,11 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
fields.VnfInstanceState.INSTANTIATED
|
||||
vnf_instance.save()
|
||||
heal_vnf_req = objects.HealVnfRequest(cause="healing request")
|
||||
vnf_dict = {"fake": "fake_dict"}
|
||||
vnf_dict['before_error_point'] = fields.ErrorPoint.INITIAL
|
||||
vnf_dict = {
|
||||
'fake': 'fake_dict',
|
||||
'before_error_point': fields.ErrorPoint.INITIAL,
|
||||
'status': ''
|
||||
}
|
||||
vnf_lcm_op_occs_id = uuidsentinel.vnf_lcm_op_occs_id
|
||||
self.conductor.heal(self.context, vnf_instance, vnf_dict,
|
||||
heal_vnf_req, vnf_lcm_op_occs_id)
|
||||
|
@ -1509,8 +1515,11 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
fields.VnfInstanceState.INSTANTIATED
|
||||
vnf_instance.save()
|
||||
heal_vnf_req = objects.HealVnfRequest(cause="healing request")
|
||||
vnf_dict = {"fake": "fake_dict"}
|
||||
vnf_dict['before_error_point'] = fields.ErrorPoint.NOTIFY_PROCESSING
|
||||
vnf_dict = {
|
||||
'fake': 'fake_dict',
|
||||
'before_error_point': fields.ErrorPoint.NOTIFY_PROCESSING,
|
||||
'status': ''
|
||||
}
|
||||
vnf_lcm_op_occs_id = uuidsentinel.vnf_lcm_op_occs_id
|
||||
self.conductor.heal(self.context, vnf_instance, vnf_dict,
|
||||
heal_vnf_req, vnf_lcm_op_occs_id)
|
||||
|
@ -1553,8 +1562,11 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
fields.VnfInstanceState.INSTANTIATED
|
||||
vnf_instance.save()
|
||||
heal_vnf_req = objects.HealVnfRequest(cause="healing request")
|
||||
vnf_dict = {"fake": "fake_dict"}
|
||||
vnf_dict['before_error_point'] = fields.ErrorPoint.INTERNAL_PROCESSING
|
||||
vnf_dict = {
|
||||
'fake': 'fake_dict',
|
||||
'before_error_point': fields.ErrorPoint.INTERNAL_PROCESSING,
|
||||
'status': ''
|
||||
}
|
||||
vnf_lcm_op_occs_id = uuidsentinel.vnf_lcm_op_occs_id
|
||||
self.conductor.heal(self.context, vnf_instance, vnf_dict,
|
||||
heal_vnf_req, vnf_lcm_op_occs_id)
|
||||
|
@ -1596,8 +1608,11 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
fields.VnfInstanceState.INSTANTIATED
|
||||
vnf_instance.save()
|
||||
heal_vnf_req = objects.HealVnfRequest(cause="healing request")
|
||||
vnf_dict = {"fake": "fake_dict"}
|
||||
vnf_dict['before_error_point'] = fields.ErrorPoint.NOTIFY_COMPLETED
|
||||
vnf_dict = {
|
||||
'fake': 'fake_dict',
|
||||
'before_error_point': fields.ErrorPoint.NOTIFY_COMPLETED,
|
||||
'status': ''
|
||||
}
|
||||
vnf_lcm_op_occs_id = uuidsentinel.vnf_lcm_op_occs_id
|
||||
self.conductor.heal(self.context, vnf_instance, vnf_dict,
|
||||
heal_vnf_req, vnf_lcm_op_occs_id)
|
||||
|
@ -1659,9 +1674,12 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
vnf_instance.instantiated_vnf_info = objects.InstantiatedVnfInfo(
|
||||
flavour_id='simple')
|
||||
heal_vnf_req = objects.HealVnfRequest(cause="healing request")
|
||||
vnf_dict = db_utils.get_dummy_vnf_etsi(instance_id=self.instance_uuid,
|
||||
flavour='simple')
|
||||
vnf_dict['before_error_point'] = fields.ErrorPoint.INITIAL
|
||||
vnf_dict = {
|
||||
**db_utils.get_dummy_vnf_etsi(
|
||||
instance_id=self.instance_uuid, flavour='simple'),
|
||||
'before_error_point': fields.ErrorPoint.INITIAL,
|
||||
'status': ''
|
||||
}
|
||||
vnf_lcm_op_occs_id = 'a9c36d21-21aa-4692-8922-7999bbcae08c'
|
||||
mock_exec.return_value = True
|
||||
mock_act.return_value = None
|
||||
|
@ -1802,9 +1820,12 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
vnf_instance.instantiated_vnf_info.vnf_virtual_link_resource_info = \
|
||||
[vl_obj]
|
||||
heal_vnf_req = objects.HealVnfRequest(cause="healing request")
|
||||
vnf_dict = db_utils.get_dummy_vnf_etsi(instance_id=self.instance_uuid,
|
||||
flavour='simple')
|
||||
vnf_dict['before_error_point'] = fields.ErrorPoint.INITIAL
|
||||
vnf_dict = {
|
||||
**db_utils.get_dummy_vnf_etsi(
|
||||
instance_id=self.instance_uuid, flavour='simple'),
|
||||
'before_error_point': fields.ErrorPoint.INITIAL,
|
||||
'status': ''
|
||||
}
|
||||
vnfd_yaml = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
'../../etc/samples/etsi/nfv/'
|
||||
'test_heal_grant_unit/'
|
||||
|
@ -1979,10 +2000,13 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
vnf_instance.instantiated_vnf_info = objects.InstantiatedVnfInfo(
|
||||
flavour_id='simple')
|
||||
heal_vnf_req = objects.HealVnfRequest(cause="healing request")
|
||||
vnf_dict = db_utils.get_dummy_vnf_etsi(instance_id=self.instance_uuid,
|
||||
flavour='simple')
|
||||
vnf_dict['before_error_point'] = fields.ErrorPoint.INITIAL
|
||||
vnfd_key = 'vnfd_' + vnf_instance.instantiated_vnf_info.flavour_id
|
||||
vnf_dict = {
|
||||
**db_utils.get_dummy_vnf_etsi(
|
||||
instance_id=self.instance_uuid, flavour='simple'),
|
||||
'before_error_point': fields.ErrorPoint.INITIAL,
|
||||
'status': ''
|
||||
}
|
||||
vnfd_yaml = vnf_dict['vnfd']['attributes'].get(vnfd_key, '')
|
||||
mock_vnfd_dict.return_value = yaml.safe_load(vnfd_yaml)
|
||||
vnf_lcm_op_occs_id = 'a9c36d21-21aa-4692-8922-7999bbcae08c'
|
||||
|
@ -2081,9 +2105,12 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
vnf_instance.instantiated_vnf_info = objects.InstantiatedVnfInfo(
|
||||
flavour_id='simple')
|
||||
heal_vnf_req = objects.HealVnfRequest(cause="healing request")
|
||||
vnf_dict = db_utils.get_dummy_vnf_etsi(instance_id=self.instance_uuid,
|
||||
flavour='simple')
|
||||
vnf_dict['before_error_point'] = fields.ErrorPoint.INITIAL
|
||||
vnf_dict = {
|
||||
**db_utils.get_dummy_vnf_etsi(
|
||||
instance_id=self.instance_uuid, flavour='simple'),
|
||||
'before_error_point': fields.ErrorPoint.INITIAL,
|
||||
'status': ''
|
||||
}
|
||||
vnfd_key = 'vnfd_' + vnf_instance.instantiated_vnf_info.flavour_id
|
||||
vnfd_yaml = vnf_dict['vnfd']['attributes'].get(vnfd_key, '')
|
||||
mock_vnfd_dict.return_value = yaml.safe_load(vnfd_yaml)
|
||||
|
@ -2140,8 +2167,11 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
mock_add_additional_vnf_info.side_effect = Exception
|
||||
|
||||
heal_vnf_req = objects.HealVnfRequest(cause="healing request")
|
||||
vnf_dict = {"fake": "fake_dict"}
|
||||
vnf_dict['before_error_point'] = fields.ErrorPoint.INITIAL
|
||||
vnf_dict = {
|
||||
'fake': 'fake_dict',
|
||||
'before_error_point': fields.ErrorPoint.INITIAL,
|
||||
'status': ''
|
||||
}
|
||||
vnf_lcm_op_occs_id = uuidsentinel.vnf_lcm_op_occs_id
|
||||
self.conductor.heal(self.context, vnf_instance, vnf_dict,
|
||||
heal_vnf_req, vnf_lcm_op_occs_id)
|
||||
|
@ -2170,7 +2200,10 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
vnf_instance.create()
|
||||
|
||||
heal_vnf_req = objects.HealVnfRequest(cause="healing request")
|
||||
vnf_dict = {"fake": "fake_dict"}
|
||||
vnf_dict = {
|
||||
'fake': 'fake_dict',
|
||||
'status': ''
|
||||
}
|
||||
vnf_lcm_op_occs_id = uuidsentinel.vnf_lcm_op_occs_id
|
||||
self.conductor.heal(self.context, vnf_instance, vnf_dict,
|
||||
heal_vnf_req, vnf_lcm_op_occs_id)
|
||||
|
@ -3301,7 +3334,7 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
vnf_instance.instantiation_state = fields.VnfInstanceState.INSTANTIATED
|
||||
vnf_instance.save()
|
||||
vnf_instance.instantiated_vnf_info = fakes.get_instantiated_vnf_info()
|
||||
vnf_dict = {"before_error_point": 0}
|
||||
vnf_dict = {"before_error_point": 0, "status": ""}
|
||||
change_ext_conn_req = fakes.get_change_ext_conn_request_obj()
|
||||
|
||||
# Test condition settings.
|
||||
|
@ -3363,7 +3396,7 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
vnf_instance.instantiation_state = fields.VnfInstanceState.INSTANTIATED
|
||||
vnf_instance.save()
|
||||
vnf_instance.instantiated_vnf_info = fakes.get_instantiated_vnf_info()
|
||||
vnf_dict = {"before_error_point": 0}
|
||||
vnf_dict = {"before_error_point": 0, "status": ""}
|
||||
change_ext_conn_req = fakes.get_change_ext_conn_request_obj()
|
||||
vnf_virtual_link = (
|
||||
vnf_instance.instantiated_vnf_info.vnf_virtual_link_resource_info)
|
||||
|
@ -3446,7 +3479,7 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
vnf_instance.instantiation_state = fields.VnfInstanceState.INSTANTIATED
|
||||
vnf_instance.save()
|
||||
vnf_instance.instantiated_vnf_info = fakes.get_instantiated_vnf_info()
|
||||
vnf_dict = {"before_error_point": 0}
|
||||
vnf_dict = {"before_error_point": 0, "status": ""}
|
||||
change_ext_conn_req = fakes.get_change_ext_conn_request_obj()
|
||||
|
||||
# Test condition settings.
|
||||
|
@ -3501,7 +3534,7 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
vnf_instance.instantiation_state = fields.VnfInstanceState.INSTANTIATED
|
||||
vnf_instance.save()
|
||||
vnf_instance.instantiated_vnf_info = fakes.get_instantiated_vnf_info()
|
||||
vnf_dict = {"before_error_point": 0}
|
||||
vnf_dict = {"before_error_point": 0, "status": ""}
|
||||
change_ext_conn_req = fakes.get_change_ext_conn_request_obj()
|
||||
|
||||
# Test condition settings.
|
||||
|
@ -3571,7 +3604,7 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
change_ext_conn_req = fakes.get_change_ext_conn_request_obj()
|
||||
vnf_lcm_op_occs_id = uuidsentinel.vnf_lcm_op_occs_id
|
||||
vnf_dict = {"before_error_point": 0,
|
||||
"current_error_point": 6}
|
||||
"current_error_point": 6, "status": ""}
|
||||
m_vnf_lcm_subscriptions = (
|
||||
[mock.MagicMock(**fakes.get_vnf_lcm_subscriptions())])
|
||||
mock_vnf_lcm_subscriptions_get.return_value = (
|
||||
|
@ -3627,7 +3660,7 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
vnf_instance.instantiation_state = fields.VnfInstanceState.INSTANTIATED
|
||||
vnf_instance.save()
|
||||
vnf_instance.instantiated_vnf_info = fakes.get_instantiated_vnf_info()
|
||||
vnf_dict = {"before_error_point": 1}
|
||||
vnf_dict = {"before_error_point": 1, "status": ""}
|
||||
change_ext_conn_req = fakes.get_change_ext_conn_request_obj()
|
||||
|
||||
self.conductor.change_ext_conn(
|
||||
|
@ -3682,7 +3715,7 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
|||
vnf_instance.instantiation_state = fields.VnfInstanceState.INSTANTIATED
|
||||
vnf_instance.save()
|
||||
vnf_instance.instantiated_vnf_info = fakes.get_instantiated_vnf_info()
|
||||
vnf_dict = {"before_error_point": 7}
|
||||
vnf_dict = {"before_error_point": 7, "status": ""}
|
||||
change_ext_conn_req = fakes.get_change_ext_conn_request_obj()
|
||||
|
||||
self.conductor.change_ext_conn(
|
||||
|
|
|
@ -1980,10 +1980,11 @@ class TestOpenStack(base.FixturedTestCase):
|
|||
|
||||
@mock.patch('tacker.vnfm.infra_drivers.openstack.translate_template.'
|
||||
'TOSCAToHOT._get_unsupported_resource_props')
|
||||
def test_instantiate_vnf(self, mock_get_unsupported_resource_props):
|
||||
@mock.patch.object(hc.HeatClient, "find_stack")
|
||||
def test_instantiate_vnf(self, mock_get_unsupported_resource_props,
|
||||
mock_find_stack):
|
||||
vim_connection_info = fd_utils.get_vim_connection_info_object()
|
||||
inst_req_info = fd_utils.get_instantiate_vnf_request()
|
||||
vnfd_dict = fd_utils.get_vnfd_dict()
|
||||
grant_response = fd_utils.get_grant_response_dict()
|
||||
|
||||
url = os.path.join(self.heat_url, 'stacks')
|
||||
|
@ -1992,7 +1993,12 @@ class TestOpenStack(base.FixturedTestCase):
|
|||
headers=self.json_headers)
|
||||
vnf_instance = fd_utils.get_vnf_instance_object()
|
||||
|
||||
vnfd_dict['before_error_point'] = fields.ErrorPoint.PRE_VIM_CONTROL
|
||||
vnfd_dict = {
|
||||
**fd_utils.get_vnfd_dict(),
|
||||
'before_error_point': fields.ErrorPoint.PRE_VIM_CONTROL,
|
||||
'status': ''
|
||||
}
|
||||
|
||||
instance_id = self.openstack.instantiate_vnf(
|
||||
self.context, vnf_instance, vnfd_dict, vim_connection_info,
|
||||
inst_req_info, grant_response, self.plugin)
|
||||
|
@ -2152,7 +2158,9 @@ class TestOpenStack(base.FixturedTestCase):
|
|||
@mock.patch('tacker.vnflcm.utils.get_base_nest_hot_dict')
|
||||
@mock.patch('tacker.vnflcm.utils._get_vnf_package_path')
|
||||
@mock.patch.object(objects.VnfLcmOpOcc, "get_by_vnf_instance_id")
|
||||
def test_heal_vnf_instance(self, mock_get_vnflcm_op_occs,
|
||||
@mock.patch.object(hc.HeatClient, "find_stack")
|
||||
def test_heal_vnf_instance(self, mock_find_stack,
|
||||
mock_get_vnflcm_op_occs,
|
||||
mock_get_vnf_package_path,
|
||||
mock_get_base_hot_dict):
|
||||
nested_hot_dict = {'parameters': {'vnf': 'test'}}
|
||||
|
|
|
@ -1224,6 +1224,12 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver):
|
|||
vnf_instance,
|
||||
scale_vnf_request,
|
||||
vim_connection_info)
|
||||
else:
|
||||
resource_changes = vnf_info.get('resource_changes')
|
||||
if not resource_changes:
|
||||
resource_changes = self._scale_resource_update(
|
||||
context, vnf_info, vnf_instance, scale_vnf_request,
|
||||
vim_connection_info, error=True)
|
||||
|
||||
vnf_info['current_error_point'] = EP.INTERNAL_PROCESSING
|
||||
|
||||
|
|
|
@ -520,6 +520,13 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
|
|||
def _create_stack_with_user_data(self, heatclient, vnf,
|
||||
base_hot_dict, nested_hot_dict,
|
||||
hot_param_dict):
|
||||
# Find existing stack
|
||||
filters = {"name": f"vnflcm_{vnf['id']}"}
|
||||
stack_found = heatclient.find_stack(**filters)
|
||||
if stack_found and "status" in vnf and vnf['status'] == 'ERROR':
|
||||
stack = {'stack': {'id': stack_found.id}}
|
||||
return stack
|
||||
|
||||
fields = {}
|
||||
fields['stack_name'] = ("vnflcm_" + vnf["id"])
|
||||
fields['template'] = self._format_base_hot(base_hot_dict)
|
||||
|
@ -554,6 +561,13 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
|
|||
|
||||
@log.log
|
||||
def _create_stack(self, heatclient, vnf, fields):
|
||||
# Find existing stack
|
||||
filters = {"name": f"vnflcm_{vnf['id']}"}
|
||||
stack_found = heatclient.find_stack(**filters)
|
||||
if stack_found and "status" in vnf and vnf['status'] == 'ERROR':
|
||||
stack = {'stack': {'id': stack_found.id}}
|
||||
return stack
|
||||
|
||||
if 'stack_name' not in fields:
|
||||
name = vnf['name'].replace(' ', '_') + '_' + vnf['id']
|
||||
if vnf['attributes'].get('failure_count'):
|
||||
|
|
Loading…
Reference in New Issue