Browse Source

Sample script for healing k8s with kubespray

This patch and "Sample script for deploying k8s with kubespray",
"Sample script for scaling k8s with kubespray" are actually the
same one. For the convenience of review, they
were submitted separately.

This patch is only about the content of heal k8s nodes. However,
since the processing of scale_start is similar to heal_start,
and the processing of scale_end is similar to the processing of
heal_end, the code of scale has been refactored and the common
processing is extracted as a common method. At the same time,
when CNF needs to perform `heal_entire`, the configuration file
of Load Balancer needs to be re-modified, so heal-related
processing is added to the example MgmtDriver of CNF's `NodePort`.

Because the input parameter of terminate and heal is limited,
the type of this parameter is also modified. In
`NFV-SOL003 v2.6.1`[1], the `additionalParams`'s type is
`KeyValuePairs`, so we changed the additionalParams's type from
`list` to `dict` in heal_vnf_request. In order not to affect
the original function, we modified the original additional_params
to legacy_additional_params and retained the original processing,
and revised the UT code.

[1] https://www.etsi.org/deliver/etsi_gs/NFV-SOL/001_099/003/02.06.01_60/gs_NFV-SOL003v020601p.pdf

Implements: blueprint k8s-mgmtdriver-kubespray
Change-Id: Ibf8b56904b1326f5c7d323c9295d10e303f4b4b8
changes/39/806539/8
Yi Feng 9 months ago
parent
commit
2bd3d5670b
  1. 24
      releasenotes/notes/support-k8s-mgmtdriver-kubespray-b0ecbaaefb755b11.yaml
  2. 57
      samples/mgmt_driver/kubespray/cnf_nodeport_setting/cnf_nodeport_mgmt.py
  3. 4
      samples/mgmt_driver/kubespray/cnf_nodeport_setting/cnf_nodeport_setting_vnf_package/TOSCA-Metadata/TOSCA.meta
  4. 743
      samples/mgmt_driver/kubespray/kubespray_mgmt.py
  5. 4
      samples/mgmt_driver/kubespray/kubespray_vnf_package/TOSCA-Metadata/TOSCA.meta
  6. 6
      tacker/objects/heal_vnf_request.py
  7. 2
      tacker/objects/terminate_vnf_req.py
  8. 2
      tacker/tests/unit/vnfm/infra_drivers/openstack/test_vdu.py
  9. 2
      tacker/tests/unit/vnfm/policy_actions/vdu_autoheal/test_vdu_autoheal.py
  10. 2
      tacker/tests/unit/vnfm/test_plugin.py
  11. 3
      tacker/vnfm/infra_drivers/openstack/vdu.py
  12. 3
      tacker/vnfm/policy_actions/vdu_autoheal/vdu_autoheal.py

24
releasenotes/notes/support-k8s-mgmtdriver-kubespray-b0ecbaaefb755b11.yaml

@ -0,0 +1,24 @@
---
features:
- |
MgmtDriver function configures applications provided by VNF vendors.
VNF vendors can customize configuration methods for applications via
MgmtDriver. These customizations are specified by "interface" definition
in ETSI NFV-SOL001 v2.6.1. We provide the sample of MgmtDriver and
scripts which can deploy a Kubernetes cluster. It can use
kubespray as Ansible playbooks to install Kubernetes cluster VNF,
and also install and configure HAproxy load balancer for the Kubernetes
cluster. We also provide a user guide to help users understand how to
use this feature.
Instantiate kubernetes cluster:
The Kubernetes cluster can be instantiated with VNF Lifecycle
Management Interface in ETSI NFV-SOL 003 v2.6.1.
Scale kubernetes worker node:
Scaling operations on the Worker-nodes for the VNF including
Kubernetes cluster is supported with MgmtDriver.
Heal kubernetes worker node:
Healing operations on the Worker-nodes for the VNF including
Kubernetes cluster is supported with MgmtDriver.

57
samples/mgmt_driver/kubespray/cnf_nodeport_setting/cnf_nodeport_mgmt.py

@ -275,23 +275,17 @@ class CnfNodePortMgmt(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
vnf_package_path = vnflcm_utils._get_vnf_package_path(
context, vnf_instance.vnfd_id)
add_param = {}
if hasattr(terminate_vnf_request, 'additional_params'):
if terminate_vnf_request.additional_params:
additional_params = terminate_vnf_request.additional_params
add_param['lcm-kubernetes-external-lb'] = {
'script_path': additional_params.get('script_path'),
'external_lb_param': {
'ssh_ip': additional_params.get('ssh_ip'),
'ssh_username': additional_params.get('ssh_username'),
'ssh_password': additional_params.get('ssh_password'),
}
}
add_param['lcm-kubernetes-def-files'] = \
vnf_instance.instantiated_vnf_info.additional_params.get(
'lcm-kubernetes-def-files')
else:
add_param = \
vnf_instance.instantiated_vnf_info.additional_params
if hasattr(terminate_vnf_request, 'additional_params') and \
terminate_vnf_request.additional_params:
additional_params = terminate_vnf_request.additional_params
lb_params_default = \
vnf_instance.instantiated_vnf_info.additional_params.get(
'lcm-kubernetes-external-lb')
add_param['lcm-kubernetes-external-lb'] = additional_params.get(
'lcm-kubernetes-external-lb', lb_params_default)
add_param['lcm-kubernetes-def-files'] = \
vnf_instance.instantiated_vnf_info.additional_params.get(
'lcm-kubernetes-def-files')
else:
add_param = \
vnf_instance.instantiated_vnf_info.additional_params
@ -321,13 +315,38 @@ class CnfNodePortMgmt(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
def heal_start(self, context, vnf_instance,
heal_vnf_request, grant,
grant_request, **kwargs):
pass
if not heal_vnf_request.vnfc_instance_id:
self.terminate_end(
context, vnf_instance, heal_vnf_request,
grant, grant_request)
else:
pass
@log.log
def heal_end(self, context, vnf_instance,
heal_vnf_request, grant,
grant_request, **kwargs):
pass
if not heal_vnf_request.vnfc_instance_id:
if hasattr(heal_vnf_request, 'additional_params') and \
heal_vnf_request.additional_params:
lb_params_default = \
vnf_instance.instantiated_vnf_info.additional_params.get(
'lcm-kubernetes-external-lb')
if not heal_vnf_request.additional_params.get(
'lcm-kubernetes-external-lb'):
heal_vnf_request.additional_params[
'lcm-kubernetes-external-lb'] = lb_params_default
heal_vnf_request.additional_params[
'lcm-kubernetes-def-files'] = \
vnf_instance.instantiated_vnf_info.additional_params.get(
'lcm-kubernetes-def-files')
else:
heal_vnf_request.additional_params = \
vnf_instance.instantiated_vnf_info.additional_params
self.instantiate_end(context, vnf_instance, heal_vnf_request,
grant, grant_request)
else:
pass
@log.log
def change_external_connectivity_start(

4
samples/mgmt_driver/kubespray/cnf_nodeport_setting/cnf_nodeport_setting_vnf_package/TOSCA-Metadata/TOSCA.meta

@ -21,9 +21,9 @@ Hash: 9994a5a5f630c41d178ac58fff93140d3509da5f01518a7bd0e16db70a545c5e
Name: Scripts/configure_lb.sh
Content-Type: application/x-shellscript
Algorithm: SHA-256
Hash: 45a01e214c06a66dc6b7a018650e292a8cc216e7db4cb638712852a843679d0d
Hash: f37afaf66aa15945583dd3e821f2e3be32d4c5ad2d633f58bbd5d319792a5978
Name: Scripts/cnf_nodeport_mgmt.py
Content-Type: text/x-python
Algorithm: SHA-256
Hash: 927d34e813f635f069ed352914aa7260e26c6b011560962aee5eb49e9faed927
Hash: 00551456e8e72f224335d8cf02448993c6f565d1fdbeedff3cd37310a93833fa

743
samples/mgmt_driver/kubespray/kubespray_mgmt.py

@ -301,14 +301,13 @@ class KubesprayMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
sftp.put(local_file, remote_file)
connect.close()
def _execute_command(self, commander, ssh_command, timeout, type, retry,
input_data=None):
def _execute_command(self, commander, ssh_command, timeout, type, retry):
eventlet.monkey_patch()
while retry >= 0:
try:
with eventlet.Timeout(timeout, True):
result = commander.execute_command(
ssh_command, input_data)
ssh_command)
break
except eventlet.timeout.Timeout:
LOG.debug('It is time out, When execute command: '
@ -771,25 +770,14 @@ class KubesprayMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
if vim_info:
nfvo_plugin = NfvoPlugin()
nfvo_plugin.delete_vim(context, vim_info.id)
for k8s_vim in vnf_instance.vim_connection_info:
if k8s_vim.vim_id == vim_info.id:
vnf_instance.vim_connection_info.remove(k8s_vim)
# delete cluster info on ansible server
ansible = {}
if hasattr(terminate_vnf_request, 'additional_params'):
if terminate_vnf_request.additional_params:
if terminate_vnf_request.additional_params.get(
'ansible_username'):
ansible['username'] = \
terminate_vnf_request.additional_params.get(
'ansible_username')
if terminate_vnf_request.additional_params.get(
'ansible_password'):
ansible['password'] = \
terminate_vnf_request.additional_params.get(
'ansible_password')
else:
ansible = k8s_params.get('ansible')
else:
ansible = k8s_params.get('ansible')
_, _, ansible, _ = \
self._get_initial_parameters(
context, vnf_instance, terminate_vnf_request)
commander = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), K8S_CMD_TIMEOUT)
@ -802,6 +790,105 @@ class KubesprayMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
commander.close_session()
def _update_external_lb(self, external_lb_param, lb_ssh_ip, hostname):
external_lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
lb_ssh_ip,
K8S_CMD_TIMEOUT
)
ssh_command = 'grep -n "{}" /etc/haproxy/haproxy.cfg | ' \
'cut -d : -f 1'.format(hostname)
result = self._execute_command(
external_lb_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
if result:
worker_host_num = result[0].replace('\n', '')
if worker_host_num.isdigit():
lb_server_num = int(worker_host_num, base=0)
ssh_command = "sudo sed -i '{}d' " \
"/etc/haproxy/haproxy.cfg" \
.format(lb_server_num)
self._execute_command(
external_lb_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
self._restart_haproxy(external_lb_commander)
external_lb_commander.close_session()
def _delete_worker_node_and_update_inventory_file(
self, ansible, worker_node, worker_hostname, operation_type):
update_hosts_yaml_path = ansible.get(
'transferring_inventory_path') + '/hosts.yaml'
self._modify_ansible_user_or_password(update_hosts_yaml_path,
worker_node, ansible)
# remove worker node
ssh_command = "ansible-playbook -i" \
" {}/hosts.yaml" \
" --become --become-user=root " \
"{}/remove-node.yml -e" \
" node={}".format(ansible.get(
'transferring_inventory_path'),
ansible.get('kubespray_root_path'),
worker_hostname)
try:
with eventlet.Timeout(K8S_INSTALL_TIMEOUT, True):
result, code = self._uninstall_worker_node(
ssh_command, ansible)
if code != 0:
msg = 'Fail to remove the worker node {}'.\
format(worker_hostname)
LOG.error(result)
raise exceptions.MgmtDriverOtherError(
error_message=msg)
LOG.debug(result)
except eventlet.timeout.Timeout:
msg = 'It is time out while deleting' \
' the worker node {}'.format(worker_hostname)
LOG.error(msg)
raise exceptions.MgmtDriverOtherError(
error_message=msg)
# Gets the line of rows where worker_hostname resides
if operation_type == 'SCALE':
while True:
commander_k8s = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), K8S_CMD_TIMEOUT)
ssh_command = 'grep -n "{}" {} | head -1 ' \
'| cut -d : -f 1'\
.format(worker_hostname, update_hosts_yaml_path)
host_name = self._execute_command(
commander_k8s, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
if host_name:
host_name_line = host_name[0].replace('\n', '')
if host_name_line.isdigit():
host_name_line = int(host_name_line, base=0)
ssh_command = 'sed -n {}P {}' \
.format(host_name_line + 1,
update_hosts_yaml_path)
is_hosts_or_children = self._execute_command(
commander_k8s, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)[0]
if "ansible_host" in is_hosts_or_children:
ssh_command = "sed -i '{}, {}d' {}" \
.format(host_name_line,
host_name_line + 4,
update_hosts_yaml_path)
else:
ssh_command = "sed -i " \
"'{}d' {}"\
.format(host_name_line,
update_hosts_yaml_path)
self._execute_command(
commander_k8s, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
else:
break
commander_k8s.close_session()
if os.path.exists(update_hosts_yaml_path):
os.remove(update_hosts_yaml_path)
def _uninstall_worker_node(self, ssh_command, ansible):
end_str = ('# ', '$ ', '? ', '% ')
end_flag = False
@ -847,6 +934,104 @@ class KubesprayMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
LOG.debug(e)
raise exceptions.MgmtDriverOtherError(error_message=e)
def _get_initial_parameters(self, context, vnf_instance, action_request):
vim_connection_info = \
self._get_vim_connection_info(context, vnf_instance)
k8s_cluster_installation_param = \
vnf_instance.instantiated_vnf_info.additional_params.get(
'k8s_cluster_installation_param')
worker_node_default = \
k8s_cluster_installation_param.get('worker_node')
external_lb_param_default = \
k8s_cluster_installation_param.get('external_lb_param')
ansible_default = \
k8s_cluster_installation_param.get('ansible')
# If additional_params exist in action_request
if hasattr(action_request, 'additional_params') and \
action_request.additional_params:
# Get the VM's information from action_request
add_param = action_request. \
additional_params.get('k8s_cluster_installation_param')
if add_param:
worker_node = add_param.get('worker_node', worker_node_default)
external_lb_param = add_param.get('external_lb_param',
external_lb_param_default)
ansible = add_param.get('ansible', ansible_default)
else:
worker_node = worker_node_default
external_lb_param = external_lb_param_default
ansible = ansible_default
else:
worker_node = worker_node_default
external_lb_param = external_lb_param_default
ansible = ansible_default
return worker_node, external_lb_param, ansible, vim_connection_info
def _remove_node_and_update_config_file(
self, worker_hostnames, external_lb_param,
lb_ssh_ip, ansible, worker_node, operation_type):
# Migrate the pod of the worker node
for worker_hostname in worker_hostnames:
# init lb RemoteCommandExecutor
external_lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
lb_ssh_ip,
K8S_CMD_TIMEOUT
)
# check worker_node exist in k8s-cluster
ssh_command = "kubectl get node --no-headers {}" \
" 2> /dev/null".format(worker_hostname)
result = self._execute_command(external_lb_commander,
ssh_command,
K8S_CMD_TIMEOUT,
'common',
0)
if result:
ssh_command = \
"kubectl get pods --field-selector=spec." \
"nodeName={} -o json".format(worker_hostname)
result = self._execute_command(external_lb_commander,
ssh_command,
K8S_CMD_TIMEOUT,
'common',
0)
# Get the names of all pods on the worker node
daemonset_content_str = ''.join(result)
daemonset_content = json.loads(
daemonset_content_str)
ssh_command = "kubectl drain {}" \
" --ignore-daemonsets" \
" --delete-emptydir-data" \
" --timeout={}s".format(worker_hostname,
DRAIN_TIMEOUT)
self._execute_command(external_lb_commander,
ssh_command,
K8S_DEPLOY_TIMEOUT,
'common', 0)
self.evacuate_wait(external_lb_commander,
daemonset_content)
external_lb_commander.close_session()
# Uninstall worker node and update inventory file
self._delete_worker_node_and_update_inventory_file(
ansible, worker_node, worker_hostname, operation_type)
# Update ExternalLB's haproxy
self._update_external_lb(external_lb_param, lb_ssh_ip,
worker_hostname)
ansible_commander = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), K8S_CMD_TIMEOUT)
ssh_command = 'rm -rf ~/.ssh/known_hosts'
self._execute_command(
ansible_commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
ansible_commander.close_session()
@log.log
def scale_start(self, context, vnf_instance,
scale_vnf_request, grant,
@ -856,36 +1041,10 @@ class KubesprayMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
scale_name_list = kwargs.get('scale_name_list')
nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id
resource_name = scale_vnf_request.aspect_id
vim_connection_info = \
self._get_vim_connection_info(context, vnf_instance)
# Get the VM's information of worker node, LB and ansible
additional_params = \
vnf_instance.instantiated_vnf_info.additional_params
k8s_cluster_installation_param = \
additional_params.get('k8s_cluster_installation_param')
# If additional_params exist in scale_vnf_request
worker_node_default = \
k8s_cluster_installation_param.get('worker_node')
external_lb_param_default = \
k8s_cluster_installation_param.get('external_lb_param')
ansible_default = \
k8s_cluster_installation_param.get('ansible')
# If additional_params exist in scale_vnf_request
if scale_vnf_request.additional_params:
# Get the VM's information from scale_vnf_request
worker_node = scale_vnf_request. \
additional_params.get('worker_node', worker_node_default)
external_lb_param = scale_vnf_request. \
additional_params.get('external_lb_param',
external_lb_param_default)
ansible = scale_vnf_request. \
additional_params.get('ansible', ansible_default)
else:
worker_node = worker_node_default
external_lb_param = external_lb_param_default
ansible = ansible_default
worker_node, external_lb_param, ansible, vim_connection_info = \
self._get_initial_parameters(
context, vnf_instance, scale_vnf_request)
# Get the ssh ip of LB
heatclient = hc.HeatClient(vim_connection_info.access_info)
@ -931,150 +1090,13 @@ class KubesprayMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
'worker' + worker_ip_dict.get("nic_ip").split('.')[-1]
worker_hostnames.append(worker_hostname)
# Migrate the pod of the worker node
for worker_hostname in worker_hostnames:
# init lb RemoteCommandExecutor
external_lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
lb_ssh_ip,
K8S_CMD_TIMEOUT
)
# check worker_node exist in k8s-cluster
ssh_command = "kubectl get node --no-headers {}" \
" 2> /dev/null".format(worker_hostname)
result = self._execute_command(external_lb_commander,
ssh_command,
K8S_CMD_TIMEOUT,
'common',
0)
update_hosts_yaml_path = ansible.get(
'transferring_inventory_path') + '/hosts.yaml'
# Update inventory file
self._modify_ansible_user_or_password(update_hosts_yaml_path,
worker_node, ansible)
if result:
ssh_command = \
"kubectl get pods --field-selector=spec." \
"nodeName={} -o json".format(worker_hostname)
result = self._execute_command(external_lb_commander,
ssh_command,
K8S_CMD_TIMEOUT,
'common',
0)
# Get the names of all pods on the worker node
daemonset_content_str = ''.join(result)
daemonset_content = json.loads(
daemonset_content_str)
ssh_command = "kubectl drain {}" \
" --ignore-daemonsets" \
" --delete-emptydir-data" \
" --timeout={}s".format(worker_hostname,
DRAIN_TIMEOUT)
self._execute_command(external_lb_commander,
ssh_command,
K8S_DEPLOY_TIMEOUT,
'common', 0)
self.evacuate_wait(external_lb_commander,
daemonset_content)
external_lb_commander.close_session()
# uninstall worker node
ssh_command = "ansible-playbook -i" \
" {}/hosts.yaml" \
" --become --become-user=root " \
"{}/remove-node.yml -e" \
" node={}".format(ansible.get(
'transferring_inventory_path'),
ansible.get('kubespray_root_path'),
worker_hostname)
try:
with eventlet.Timeout(K8S_INSTALL_TIMEOUT, True):
result, code = self._uninstall_worker_node(
ssh_command, ansible)
if code != 0:
msg = 'Fail to remove the worker node {}'.\
format(worker_hostname)
LOG.error(result)
raise exceptions.MgmtDriverOtherError(
error_message=msg)
LOG.debug(result)
except eventlet.timeout.Timeout:
msg = 'It is time out while deleting' \
' the worker node {}'.format(worker_hostname)
LOG.error(msg)
raise exceptions.MgmtDriverOtherError(
error_message=msg)
# Gets the line of rows where worker_hostname resides
while True:
commander_k8s = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), K8S_CMD_TIMEOUT)
ssh_command = 'grep -n "{}" {} | head -1 ' \
'| cut -d : -f 1'\
.format(worker_hostname, update_hosts_yaml_path)
host_name = self._execute_command(
commander_k8s, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
if host_name:
host_name_line = host_name[0].replace('\n', '')
if host_name_line.isdigit():
host_name_line = int(host_name_line, base=0)
ssh_command = 'sed -n {}P {}' \
.format(host_name_line + 1,
update_hosts_yaml_path)
is_hosts_or_children = self._execute_command(
commander_k8s, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)[0]
if "ansible_host" in is_hosts_or_children:
ssh_command = "sed -i '{}, {}d' {}" \
.format(host_name_line,
host_name_line + 4,
update_hosts_yaml_path)
else:
ssh_command = "sed -i " \
"'{}d' {}"\
.format(host_name_line,
update_hosts_yaml_path)
self._execute_command(
commander_k8s, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
else:
break
commander_k8s.close_session()
# Update ExternalLB's haproxy
external_lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
lb_ssh_ip,
K8S_CMD_TIMEOUT
)
ssh_command = 'grep -n "{}" /etc/haproxy/haproxy.cfg | ' \
'cut -d : -f 1'.format(worker_hostname)
result = self._execute_command(
external_lb_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
if result:
worker_host_num = result[0].replace('\n', '')
if worker_host_num.isdigit():
lb_server_num = int(worker_host_num, base=0)
ssh_command = "sudo sed -i '{}d' " \
"/etc/haproxy/haproxy.cfg" \
.format(lb_server_num)
self._execute_command(
external_lb_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
self._restart_haproxy(external_lb_commander)
external_lb_commander.close_session()
self._remove_node_and_update_config_file(
worker_hostnames, external_lb_param,
lb_ssh_ip, ansible, worker_node, 'SCALE')
else:
pass
def evacuate_wait(self, commander, daemonset_content):
# scale: evacuate wait
wait_flag = True
retry_count = CHECK_POD_STATUS_RETRY_COUNT
while wait_flag and retry_count > 0:
@ -1129,50 +1151,99 @@ class KubesprayMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
external_lb_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
def _update_lb_config_file(self, external_lb_param, lb_ssh_ip,
worker_ip_dict_list):
external_lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
lb_ssh_ip,
K8S_CMD_TIMEOUT
)
add_row_data = ''
for worker_ip_dict in worker_ip_dict_list:
worker_host_name = 'worker' + \
worker_ip_dict.get('nic_ip').split('.')[-1]
nic_ip = worker_ip_dict.get('nic_ip')
row_data = ' server {} {} check'.format(
worker_host_name, nic_ip)
add_row_data += row_data + '\\n'
ssh_command = 'grep -n "backend kubernetes-nodeport" ' \
'/etc/haproxy/haproxy.cfg | head -1 | cut -d : -f 1'
result = self._execute_command(external_lb_commander,
ssh_command,
K8S_INSTALL_TIMEOUT,
'common', 0)[0].replace('\n', '')
write_start_row = int(result) + 2
ssh_command = 'sudo sed -i "{}a\\{}" ' \
'/etc/haproxy/haproxy.cfg'.format(
write_start_row, add_row_data)
LOG.debug("ssh_command: {}".format(ssh_command))
self._execute_command(
external_lb_commander, ssh_command,
K8S_INSTALL_TIMEOUT, 'common', 0)
self._restart_haproxy(external_lb_commander)
external_lb_commander.close_session()
def _install_node_and_update_config_file(
self, worker_node, worker_ip_dict_list,
ansible, external_lb_param, lb_ssh_ip):
# check worker_VM can be accessed via ssh
self._init_commander_and_set_script(
worker_node.get('username'), worker_node.get('password'),
worker_ip_dict_list[0].get('ssh_ip'), K8S_CMD_TIMEOUT)
# Install worker node
commander_k8s = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'),
K8S_INSTALL_TIMEOUT * len(worker_ip_dict_list))
facts_yaml_path = ansible.get(
'kubespray_root_path') + '/facts.yml'
ssh_command = "ansible-playbook -i" \
" {}/hosts.yaml" \
" --become --become-user=root {}" \
.format(ansible.get('transferring_inventory_path'),
facts_yaml_path)
self._execute_command(
commander_k8s, ssh_command,
K8S_DEPLOY_TIMEOUT, 'common', 0)
scale_yaml_path = ansible.get(
'kubespray_root_path') + '/scale.yml'
ssh_command = "ansible-playbook -i" \
" {}/hosts.yaml" \
" --become --become-user=root {}".format(
ansible.get('transferring_inventory_path'),
scale_yaml_path)
self._execute_command(
commander_k8s, ssh_command,
K8S_INSTALL_TIMEOUT * len(worker_ip_dict_list),
'install', 0)
commander_k8s.close_session()
# Update ExternalLB's haproxy.cfg
self._update_lb_config_file(
external_lb_param, lb_ssh_ip, worker_ip_dict_list)
@log.log
def scale_end(self, context, vnf_instance,
scale_vnf_request, grant,
grant_request, **kwargs):
if scale_vnf_request.type == 'SCALE_OUT':
k8s_cluster_installation_param = \
vnf_instance.instantiated_vnf_info.additional_params.\
get('k8s_cluster_installation_param')
scale_out_id_list = kwargs.get('scale_out_id_list')
nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id
worker_node, external_lb_param, ansible, vim_connection_info =\
self._get_initial_parameters(
context, vnf_instance, scale_vnf_request)
worker_node_default = \
k8s_cluster_installation_param.get('worker_node')
external_lb_param_default = \
k8s_cluster_installation_param.get('external_lb_param')
ansible_default = \
k8s_cluster_installation_param.get('ansible')
# If additional_params exist in scale_vnf_request
if scale_vnf_request.additional_params:
# Get the VM's information from scale_vnf_request
worker_node = scale_vnf_request. \
additional_params.get('worker_node', worker_node_default)
external_lb_param = scale_vnf_request. \
additional_params.get('external_lb_param',
external_lb_param_default)
ansible = scale_vnf_request. \
additional_params.get('ansible', ansible_default)
else:
worker_node = worker_node_default
external_lb_param = external_lb_param_default
ansible = ansible_default
# execute function vnflcm_utils._get_vim --> vim_connection_info
vim_connection_info = \
self._get_vim_connection_info(context, vnf_instance)
heatclient = hc.HeatClient(vim_connection_info.access_info)
# Get the ssh ip of LB
heatclient = hc.HeatClient(vim_connection_info.access_info)
resource_info = heatclient.resources. \
get(stack_id=nest_stack_id,
resource_name=external_lb_param.get('ssh_cp_name'))
# If the VM's floating ip is not None
# Get floating ip from resource_info and assign it to ssh ip
lb_ssh_ip = self._get_lb_or_worker_ssh_ip(resource_info, True)
# get scale-out worker's info
@ -1217,73 +1288,10 @@ class KubesprayMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
ansible.get('password'), update_hosts_yaml_path,
local_hosts_yaml_path, 'send')
# check worker_VM can be accessed via ssh
self._init_commander_and_set_script(
worker_node.get('username'), worker_node.get('password'),
worker_ip_dict_list[0].get('ssh_ip'), K8S_CMD_TIMEOUT)
# Install worker node
commander_k8s = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'),
K8S_INSTALL_TIMEOUT * len(worker_ip_dict_list))
facts_yaml_path = ansible.get(
'kubespray_root_path') + '/facts.yml'
ssh_command = "ansible-playbook -i" \
" {}/hosts.yaml" \
" --become --become-user=root {}" \
.format(ansible.get('transferring_inventory_path'),
facts_yaml_path)
self._execute_command(
commander_k8s, ssh_command,
K8S_DEPLOY_TIMEOUT, 'common', 0)
scale_yaml_path = ansible.get(
'kubespray_root_path') + '/scale.yml'
ssh_command = "ansible-playbook -i" \
" {}/hosts.yaml" \
" --become --become-user=root {}".format(
ansible.get('transferring_inventory_path'),
scale_yaml_path)
self._execute_command(
commander_k8s, ssh_command,
K8S_INSTALL_TIMEOUT * len(worker_ip_dict_list),
'install', 0)
commander_k8s.close_session()
# Update ExternalLB's haproxy.cfg
external_lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
lb_ssh_ip,
K8S_CMD_TIMEOUT
)
add_row_data = ''
for worker_ip_dict in worker_ip_dict_list:
worker_host_name = 'worker' + \
worker_ip_dict.get('nic_ip').split('.')[-1]
nic_ip = worker_ip_dict.get('nic_ip')
row_data = ' server {} {} check'.format(
worker_host_name, nic_ip)
add_row_data += row_data + '\n'
ssh_command = 'grep -n "backend kubernetes-nodeport" ' \
'/etc/haproxy/haproxy.cfg | head -1 | cut -d : -f 1'
result = self._execute_command(external_lb_commander,
ssh_command,
K8S_INSTALL_TIMEOUT,
'common', 0)[0].replace('\n', '')
write_start_row = int(result) + 2
ssh_command = 'sudo sed -i "{}a\\{}" ' \
'/etc/haproxy/haproxy.cfg'.format(
write_start_row, add_row_data)
LOG.debug("ssh_command: {}".format(ssh_command))
self._execute_command(
external_lb_commander, ssh_command,
K8S_INSTALL_TIMEOUT, 'common', 0)
self._restart_haproxy(external_lb_commander)
external_lb_commander.close_session()
# Install worker node adn update configuration file
self._install_node_and_update_config_file(
worker_node, worker_ip_dict_list, ansible,
external_lb_param, lb_ssh_ip)
else:
pass
@ -1298,7 +1306,7 @@ class KubesprayMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
local_hosts_yaml_path, 'receive')
with open(local_hosts_yaml_path, 'r', encoding='utf-8') as f:
file_content = f.read()
hosts_content = yaml.load(file_content)
hosts_content = yaml.safe_load(file_content)
worker_nodes = hosts_content['all']['children']['kube_node'][
'hosts']
LOG.debug("worker_nodes: {}".format(worker_nodes))
@ -1336,23 +1344,176 @@ class KubesprayMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
return hosts_content
os.remove(local_hosts_yaml_path)
except Exception:
LOG.debug('modify ansible_user or ansible_password has error: {}.'
LOG.error('modify ansible_user or ansible_password has error: {}.'
.format(ValueError))
raise exceptions.MgmtDriverOtherError(
error_message='modify user or password has error: {}.'.format(
Exception))
def _get_vnfc_resource_id(self, vnfc_resource_list, vnfc_instance_id):
for vnfc_resource in vnfc_resource_list:
if vnfc_resource.id == vnfc_instance_id:
return vnfc_resource
return None
def _get_heal_physical_resource_ids(self, vnf_instance,
heal_vnf_request):
heal_physical_resource_ids = []
for vnfc_instance_id in heal_vnf_request.vnfc_instance_id:
instantiated_vnf_info = vnf_instance.instantiated_vnf_info
vnfc_resource_info = instantiated_vnf_info.vnfc_resource_info
vnfc_resource = self._get_vnfc_resource_id(
vnfc_resource_info, vnfc_instance_id)
if vnfc_resource:
heal_physical_resource_ids.append(
vnfc_resource.compute_resource.resource_id)
return heal_physical_resource_ids
def _get_heal_worker_node_info(
self, vnf_additional_params, worker_node, heatclient,
nest_stack_id, heal_physical_resource_ids):
worker_ip_dict_list = []
if worker_node.get('aspect_id'):
worker_group_resource_name = worker_node.get('aspect_id')
worker_group_resource = heatclient.resources.get(
stack_id=nest_stack_id,
resource_name=worker_group_resource_name)
if not worker_group_resource:
raise exceptions.MgmtDriverOtherError(
error_message='The specified resource'
' {} was not found.'.format(
worker_group_resource_name))
worker_group_resource_list = heatclient.resources.list(
stack_id=worker_group_resource.physical_resource_id)
for worker_resource in worker_group_resource_list:
lowest_resource_list = heatclient.resources.list(
stack_id=worker_resource.physical_resource_id)
for lowest_resource in lowest_resource_list:
if lowest_resource.resource_type == 'OS::Nova::Server' \
and lowest_resource.physical_resource_id in \
heal_physical_resource_ids:
worker_ssh_ip, worker_nic_ip = \
self._get_ssh_ip_and_nic_ip(
heatclient,
worker_resource.physical_resource_id,
worker_node)
ip_dict = {"nic_ip": worker_nic_ip,
"ssh_ip": worker_ssh_ip}
worker_ip_dict_list.append(ip_dict)
else:
# in case of SOL001 TOSCA-based VNFD with single worker node
resource_list = heatclient.resources.list(
stack_id=nest_stack_id)
for resource in resource_list:
if resource.resource_type == 'OS::Nova::Server' \
and resource.physical_resource_id in \
heal_physical_resource_ids:
worker_ssh_ip, worker_nic_ip = \
self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, worker_node)
ip_dict = {"nic_ip": worker_nic_ip,
"ssh_ip": worker_ssh_ip}
worker_ip_dict_list.append(ip_dict)
# Get the hostname of the deleting worker nodes
worker_hostnames = []
for worker_ip_dict in worker_ip_dict_list:
# get worker host names
worker_hostname = \
'worker' + worker_ip_dict.get("nic_ip").split('.')[-1]
worker_hostnames.append(worker_hostname)
return worker_hostnames, worker_ip_dict_list
@log.log
def heal_start(self, context, vnf_instance,
heal_vnf_request, grant,
grant_request, **kwargs):
pass
vnf_additional_params = \
vnf_instance.instantiated_vnf_info.additional_params
# heal of the entire VNF
if not heal_vnf_request.vnfc_instance_id:
self.terminate_end(context, vnf_instance, heal_vnf_request,
grant, grant_request)
else:
# heal specified with VNFC instances
heal_physical_resource_ids = \
self._get_heal_physical_resource_ids(
vnf_instance, heal_vnf_request)
worker_node, external_lb_param, ansible, vim_connection_info = \
self._get_initial_parameters(
context, vnf_instance, heal_vnf_request)
nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id
# Get the ssh ip of LB
heatclient = hc.HeatClient(vim_connection_info.access_info)
ssh_ip, _ = self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, external_lb_param)
# Get the worker_hostnames to be healed
worker_hostnames, _ = self._get_heal_worker_node_info(
vnf_additional_params, worker_node, heatclient,
nest_stack_id, heal_physical_resource_ids)
# remove_worker_node_from_k8s_cluster and update configuration file
self._remove_node_and_update_config_file(
worker_hostnames, external_lb_param,
ssh_ip, ansible, worker_node, 'HEAL')
@log.log
def heal_end(self, context, vnf_instance,
heal_vnf_request, grant,
grant_request, **kwargs):
pass
vnf_additional_params = \
vnf_instance.instantiated_vnf_info.additional_params
# heal of the entire VNF
if not heal_vnf_request.vnfc_instance_id:
add_param_list = ['master_node', 'worker_node', 'proxy',
'ansible', 'external_lb_param']
for add_param in add_param_list:
if heal_vnf_request.additional_params.get(
'k8s_cluster_installation_param'):
if add_param in heal_vnf_request.additional_params.get(
'k8s_cluster_installation_param'):
vnf_additional_params.get(
'k8s_cluster_installation_param')[add_param] = \
heal_vnf_request.additional_params[
'k8s_cluster_installation_param'].get(
add_param)
heal_vnf_request.additional_params = vnf_additional_params
self.instantiate_end(context, vnf_instance, heal_vnf_request,
grant, grant_request)
else:
# heal specified with VNFC instances
heal_physical_resource_ids = \
self._get_heal_physical_resource_ids(
vnf_instance, heal_vnf_request)
worker_node, external_lb_param, ansible, vim_connection_info = \
self._get_initial_parameters(
context, vnf_instance, heal_vnf_request)
nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id
# Get the ssh ip of LB
heatclient = hc.HeatClient(vim_connection_info.access_info)
ssh_ip, _ = self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, external_lb_param)
# Get the worker_ip_dict_list to be healed
_, worker_ip_dict_list = self._get_heal_worker_node_info(
vnf_additional_params, worker_node, heatclient,
nest_stack_id, heal_physical_resource_ids)
# Install worker node and update configuration file
self._install_node_and_update_config_file(
worker_node, worker_ip_dict_list, ansible,
external_lb_param, ssh_ip)
@log.log
def change_external_connectivity_start(

4
samples/mgmt_driver/kubespray/kubespray_vnf_package/TOSCA-Metadata/TOSCA.meta

@ -6,9 +6,9 @@ Entry-Definitions: Definitions/sample_kubernetes_top.vnfd.yaml
Name: Scripts/install_external_lb.sh
Content-Type: application/x-shellscript
Algorithm: SHA-256
Hash: 0b2445403a4b2ce2f905c2b7f77dcdb444a1e445379a11c6aca8e87d4b1f8198
Hash: 3798d91efef8b4baa8eb2e56a43be96543d7eb76e7e1c55941460d407b974af4
Name: Scripts/kubespray_mgmt.py
Content-Type: text/x-python
Algorithm: SHA-256
Hash: 2d6232040fd049619e1a7c3e268b87ccec9aa4d56d955f1bfa4420a4f6531e31
Hash: 63ba10c3a9a84c558ad13acd71f2f8004a3169f26422ce98d929828daa30b485

6
tacker/objects/heal_vnf_request.py

@ -44,8 +44,10 @@ class HealVnfRequest(base.TackerObject):
default=[]),
'stack_id': fields.StringField(nullable=True, default=''),
'cause': fields.StringField(nullable=True, default=None),
'additional_params': fields.ListOfObjectsField(
'HealVnfAdditionalParams', default=[])
'legacy_additional_params': fields.ListOfObjectsField(
'HealVnfAdditionalParams', default=[]),
'additional_params': fields.DictOfNullableField(nullable=True,
default={})
}
def obj_make_compatible(self, primitive, target_version):

2
tacker/objects/terminate_vnf_req.py

@ -31,7 +31,7 @@ class TerminateVnfRequest(base.TackerObject, base.TackerPersistentObject):
nullable=False),
'graceful_termination_timeout': fields.IntegerField(nullable=True,
default=0),
'additional_params': fields.DictOfStringsField(nullable=True,
'additional_params': fields.DictOfNullableField(nullable=True,
default={}),
}

2
tacker/tests/unit/vnfm/infra_drivers/openstack/test_vdu.py

@ -108,7 +108,7 @@ class TestVDU(base.TestCase):
self.heal_request_data_obj = heal_vnf_request.HealVnfRequest(
cause='VNF monitoring fails.',
stack_id=vnf_dict['instance_id'],
additional_params=[self.additional_paramas_obj])
legacy_additional_params=[self.additional_paramas_obj])
self.heal_vdu = vdu.Vdu(self.context, vnf_dict,
self.heal_request_data_obj)

2
tacker/tests/unit/vnfm/policy_actions/vdu_autoheal/test_vdu_autoheal.py

@ -130,7 +130,7 @@ class TestVNFActionVduAutoheal(db_base.SqlTestCase):
additional_params.append(additional_paramas_obj)
heal_request_data_obj = heal_vnf_request.HealVnfRequest(
cause='VNF monitoring fails.',
additional_params=additional_params)
legacy_additional_params=additional_params)
mock_heal_vnf_request.return_value = heal_request_data_obj
self.vdu_autoheal.execute_action(self.vnfm_plugin, self.context,
vnf_dict, args={'vdu_name': 'VDU1'})

2
tacker/tests/unit/vnfm/test_plugin.py

@ -1142,7 +1142,7 @@ class TestVNFMPlugin(db_base.SqlTestCase):
heal_request_data_obj = heal_vnf_request.HealVnfRequest(
stack_id=dummy_device_obj['instance_id'],
cause='VNF monitoring fails.',
additional_params=[additional_params_obj])
legacy_additional_params=[additional_params_obj])
result = self.vnfm_plugin.heal_vnf(self.context,
dummy_device_obj['id'],
heal_request_data_obj)

3
tacker/vnfm/infra_drivers/openstack/vdu.py

@ -51,7 +51,8 @@ class Vdu(object):
def _resource_mark_unhealthy(self):
"""Mark the resource unhealthy using heat."""
additional_params = self.heal_request_data_obj.additional_params
additional_params = self.heal_request_data_obj.\
legacy_additional_params
for additional_param in additional_params:
resource_name = additional_param.parameter
res_status = self._get_resource_status(self.stack_id,

3
tacker/vnfm/policy_actions/vdu_autoheal/vdu_autoheal.py

@ -69,6 +69,7 @@ class VNFActionVduAutoheal(abstract_action.AbstractPolicyAction):
heal_request_data_obj = objects.HealVnfRequest(
stack_id=stack_id,
cause=(cause[-1] % vdu_name), additional_params=additional_params)
cause=(cause[-1] % vdu_name),
legacy_additional_params=additional_params)
plugin.heal_vnf(context, vnf_dict['id'], heal_request_data_obj)

Loading…
Cancel
Save