Private registry for deploying Kubernetes cluster

This patch implements spec
Support deployed Kubernetes cluster using the images in a Docker
private registry with MgmtDriver in blueprint bp/cir-k8s-cluster.

It includes mgmt_driver scripts of deploying a Docker private
registry to vm created by openstack_driver.
Support instantiate/terminate/heal function.

It includes mgmt_driver scripts of deploying kubernetes cluster
that can use images in Docker private registry to vm created by
openstack_driver.
Support instantiate/terminate/scale/heal function.

It also includes a shell script that actual install kubernetes
cluster and configure environment to support Docker private registry
on vm.

Implements: blueprint cir-k8s-cluster
Change-Id: I7adab1d1eaa491a37399d0b615cbee4c6ae86657
This commit is contained in:
Yi Feng 2021-07-20 16:48:13 +09:00
parent 89c3afc72d
commit 26910f1c27
10 changed files with 2637 additions and 12 deletions

View File

@ -0,0 +1,27 @@
---
features:
- |
Functions to enable the use of Docker private registry images in a
Kubernetes Cluster environment. We provide the sample of MgmtDriver
which can deploy Docker private registry VNF for Kubernetes cluster
before deploying Kubernetes cluster VNF, and deploy Kubernetes cluster
VNF that can support both the Docker private registry created above
and any others created outside of Tacker.
Instantiate operation for Kubernetes cluster with MgmtDriver:
MgmtDriver configures connections with Docker private registries on
newly created all Master/Worker VMs.
Scale-out operation for Kubernetes cluster Worker-nodes with MgmtDriver:
MgmtDriver configures connections with Docker private registries on
newly created Worker VMs.
Heal operation for the entire Kubernetes cluster with MgmtDriver:
MgmtDriver configures connections with Docker private registries on
the created all Master/Worker VMs.
Heal operation for a single node in Kubernetes cluster with MgmtDriver:
MgmtDriver configures connections with Docker private registries on
the created Master/Worker VM.

View File

@ -355,11 +355,20 @@ function set_docker_proxy {
[Service]
Environment="HTTP_PROXY=${http_proxy//%40/@}" "HTTPS_PROXY=${https_proxy//%40/@}" "NO_PROXY=$no_proxy"
EOF
cat <<EOF | sudo tee /etc/docker/daemon.json >/dev/null
if [[ -z "$HTTP_PRIVATE_REGISTRIES" ]]; then
cat <<EOF | sudo tee /etc/docker/daemon.json >/dev/null
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
else
cat <<EOF | sudo tee /etc/docker/daemon.json >/dev/null
{
"exec-opts": ["native.cgroupdriver=systemd"],
"insecure-registries": [${HTTP_PRIVATE_REGISTRIES}]
}
EOF
fi
sudo systemctl daemon-reload
sudo systemctl restart docker
sleep 3
@ -779,4 +788,8 @@ else
exit 255
fi
fi
sudo ln -s /root/.docker/config.json /var/lib/kubelet/config.json
sudo chmod 666 /var/lib/kubelet/config.json
exit 0

View File

@ -43,6 +43,10 @@ HELM_CHART_DIR = "/var/tacker/helm"
HELM_CHART_CMP_PATH = "/tmp/tacker-helm.tgz"
SERVER_WAIT_COMPLETE_TIME = 60
# CLI timeout period when setting private registries connection
PR_CONNECT_TIMEOUT = 30
PR_CMD_TIMEOUT = 300
class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
@ -114,6 +118,10 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
err = result.get_stderr()
LOG.error(err)
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
elif type == 'docker_login':
ret1 = result.get_stdout()
ret2 = result.get_stderr()
return ret1, ret2
elif type == 'helm_repo_list':
if result.get_return_code() != 0:
err = result.get_stderr()[0].replace('\n', '')
@ -458,7 +466,7 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
def _install_worker_node(self, commander, proxy,
ha_flag, nic_ip, cluster_ip, kubeadm_token,
ssl_ca_cert_hash):
ssl_ca_cert_hash, http_private_registries):
if proxy.get('http_proxy') and proxy.get('https_proxy'):
ssh_command = \
"export http_proxy={http_proxy};" \
@ -485,6 +493,13 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
worker_ip=nic_ip, cluster_ip=cluster_ip,
kubeadm_token=kubeadm_token,
ssl_ca_cert_hash=ssl_ca_cert_hash)
# if connecting to the private registries over HTTP,
# add "export HTTP_PRIVATE_REGISTRIES" command
if http_private_registries:
ssh_command = "export HTTP_PRIVATE_REGISTRIES=\"{}\";{}".format(
http_private_registries, ssh_command)
self._execute_command(
commander, ssh_command, K8S_INSTALL_TIMEOUT, 'install', 0)
@ -537,10 +552,132 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
commander.close_session()
def _get_http_private_registries(self, pr_connection_info):
http_private_registries = ""
if pr_connection_info:
http_pr_list = []
for pr_info in pr_connection_info:
pr_connection_type = str(pr_info.get('connection_type'))
pr_server = pr_info.get('server')
# NOTE: "connection_type" values are "0" for HTTP and
# "1" for HTTPS.
if pr_connection_type == "0":
http_pr_list.append("\\\"" + pr_server + "\\\"")
if http_pr_list:
http_private_registries = ",".join(http_pr_list)
return http_private_registries
def _connect_to_private_registries(self, vnf_package_path,
pr_connection_info, node_username,
node_password, node_ip):
LOG.debug("Start the _connect_to_private_registries function. "
"node ip: {}, pr connection info: {}".format(
node_ip, pr_connection_info))
commander = cmd_executer.RemoteCommandExecutor(
user=node_username, password=node_password,
host=node_ip, timeout=PR_CONNECT_TIMEOUT)
# create a cert file list for file transfer
cert_file_list = []
for pr_info in pr_connection_info:
pr_certificate_path = pr_info.get('certificate_path')
if pr_certificate_path:
local_file_path = os.path.join(
vnf_package_path, pr_certificate_path)
# check existence of cert file
if not os.path.exists(local_file_path):
err_param = "certificate_path(path:{})".format(
pr_certificate_path)
LOG.error("The {} in the additionalParams is invalid. "
"File does not exist.".format(err_param))
commander.close_session()
raise exceptions.MgmtDriverParamInvalid(param=err_param)
cert_file_name = os.path.basename(pr_certificate_path)
remote_tmp_path = os.path.join("/tmp", cert_file_name)
remote_dir_path = os.path.join(
"/etc/docker/certs.d", pr_info.get('server'))
remote_file_path = os.path.join(
remote_dir_path, cert_file_name)
cert_file_list.append((local_file_path, remote_tmp_path,
remote_dir_path, remote_file_path))
# send cert files to node
if cert_file_list:
retry = 4
while retry > 0:
try:
transport = paramiko.Transport(node_ip, 22)
transport.connect(
username=node_username, password=node_password)
sftp_client = paramiko.SFTPClient.from_transport(
transport)
for cert_item in cert_file_list:
local_file_path = cert_item[0]
remote_tmp_path = cert_item[1]
remote_dir_path = cert_item[2]
remote_file_path = cert_item[3]
# send cert file to tmp directory
sftp_client.put(local_file_path, remote_tmp_path)
# copy under /etc/docker/certs.d/<server>
ssh_command = ("sudo mkdir -p {} && "
"sudo cp {} {} && sudo rm -f {}".format(
remote_dir_path, remote_tmp_path,
remote_file_path, remote_tmp_path))
self._execute_command(
commander, ssh_command,
PR_CMD_TIMEOUT, 'common', 0)
transport.close()
except paramiko.SSHException as e:
LOG.debug(e)
retry -= 1
if retry == 0:
LOG.error(e)
commander.close_session()
raise paramiko.SSHException()
time.sleep(SERVER_WAIT_COMPLETE_TIME)
# connect to private registries
for pr_info in pr_connection_info:
# add host to /etc/hosts
pr_hosts_string = pr_info.get('hosts_string')
if pr_hosts_string:
ssh_command = ("echo '{}' | sudo tee -a /etc/hosts "
">/dev/null".format(pr_hosts_string))
self._execute_command(
commander, ssh_command, PR_CMD_TIMEOUT, 'common', 0)
# connect to private registry (run docker login)
pr_server = pr_info.get('server')
login_username = pr_info.get('username', 'tacker')
login_password = pr_info.get('password', 'tacker')
ssh_command = ("sudo docker login {} "
"--username {} --password {}".format(
pr_server, login_username, login_password))
result = self._execute_command(
commander, ssh_command, PR_CMD_TIMEOUT, 'docker_login', 0)
stdout = result[0]
login_successful = (
[line for line in stdout if "Login Succeeded" in line])
if not login_successful:
# Login Failed
stderr = result[1]
unnecessary_msg = "WARNING! Using --password via the CLI"
err_info = (
[line for line in stderr if not(unnecessary_msg in line)])
err_msg = ("Failed to login Docker private registry. "
"ErrInfo:{}".format(err_info))
LOG.error(err_msg)
commander.close_session()
raise exceptions.MgmtDriverOtherError(error_message=err_msg)
commander.close_session()
LOG.debug("_connect_to_private_registries function complete.")
def _install_k8s_cluster(self, context, vnf_instance,
proxy, script_path,
master_vm_dict_list, worker_vm_dict_list,
helm_inst_script_path):
helm_inst_script_path, pr_connection_info):
# instantiate: pre /etc/hosts
hosts_str = self._get_hosts(
master_vm_dict_list, worker_vm_dict_list)
@ -586,6 +723,10 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
"127.0.0.1", "localhost",
master_cluster_ip] + vm_cidr_list)))
# get private registries of type HTTP
http_private_registries = self._get_http_private_registries(
pr_connection_info)
# install k8s
active_username = ""
active_password = ""
@ -698,6 +839,14 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
kubeadm_token=kubeadm_token,
ssl_ca_cert_hash=ssl_ca_cert_hash,
certificate_key=certificate_key)
# if connecting to the private registries over HTTP,
# add "export HTTP_PRIVATE_REGISTRIES" command
if http_private_registries:
ssh_command = \
"export HTTP_PRIVATE_REGISTRIES=\"{}\";{}".format(
http_private_registries, ssh_command)
results = self._execute_command(
commander, ssh_command, K8S_INSTALL_TIMEOUT, 'install', 0)
@ -738,6 +887,12 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
masternode_ip_list.append(host)
commander.close_session()
# connect to private registries
if pr_connection_info:
self._connect_to_private_registries(
vnf_package_path, pr_connection_info,
user, password, host)
# install worker node
for vm_dict in worker_vm_dict_list:
user = vm_dict.get('ssh', {}).get('username')
@ -768,9 +923,16 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
# execute install k8s command on VM
self._install_worker_node(
commander, proxy, ha_flag, nic_ip,
cluster_ip, kubeadm_token, ssl_ca_cert_hash)
cluster_ip, kubeadm_token, ssl_ca_cert_hash,
http_private_registries)
commander.close_session()
# connect to private registries
if pr_connection_info:
self._connect_to_private_registries(
vnf_package_path, pr_connection_info,
user, password, host)
# set pod_affinity
commander = cmd_executer.RemoteCommandExecutor(
user=active_username, password=active_password,
@ -865,6 +1027,36 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
raise exceptions.MgmtDriverParamInvalid(param='cluster_cidr')
else:
additional_param['master_node']['cluster_cidr'] = '10.96.0.0/12'
# get private_registry_connection_info param
pr_connection_info = additional_param.get(
'private_registry_connection_info')
if pr_connection_info:
# check private_registry_connection_info param
for pr_info in pr_connection_info:
pr_connection_type = str(pr_info.get('connection_type', ''))
pr_server = pr_info.get('server')
# check connection_type param exists
if not pr_connection_type:
LOG.error("The connection_type "
"in the additionalParams does not exist.")
raise exceptions.MgmtDriverNotFound(
param="connection_type")
# check server param exists
if not pr_server:
LOG.error("The server "
"in the additionalParams does not exist.")
raise exceptions.MgmtDriverNotFound(param="server")
# check connection_type value
# NOTE: "connection_type" values are "0" for HTTP and
# "1" for HTTPS.
if not (pr_connection_type == "0"
or pr_connection_type == "1"):
LOG.error("The connection_type "
"in the additionalParams is invalid.")
raise exceptions.MgmtDriverParamInvalid(
param="connection_type")
# check grants exists
if grant:
self.SET_ZONE_ID_FLAG = True
@ -887,9 +1079,11 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
access_info, vnf_instance, grant)
server, bearer_token, ssl_ca_cert, project_name, masternode_ip_list = \
self._install_k8s_cluster(context, vnf_instance,
proxy, script_path, master_vm_dict_list,
proxy, script_path,
master_vm_dict_list,
worker_vm_dict_list,
helm_inst_script_path)
helm_inst_script_path,
pr_connection_info)
# register vim with kubernetes cluster info
self._create_vim(context, vnf_instance, server,
@ -1294,6 +1488,13 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
heatclient = hc.HeatClient(vim_connection_info.access_info)
scale_out_id_list = kwargs.get('scale_out_id_list')
# get private_registry_connection_info param
pr_connection_info = k8s_cluster_installation_param.get(
'private_registry_connection_info')
# get private registries of type HTTP
http_private_registries = self._get_http_private_registries(
pr_connection_info)
# get master_ip
master_ssh_ip_list = []
master_nic_ip_list = []
@ -1416,8 +1617,16 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
add_worker_ssh_ip_list.index(worker_ip)]
self._install_worker_node(
commander, proxy, ha_flag, worker_nic_ip,
cluster_ip, kubeadm_token, ssl_ca_cert_hash)
cluster_ip, kubeadm_token, ssl_ca_cert_hash,
http_private_registries)
commander.close_session()
# connect to private registries
if pr_connection_info:
self._connect_to_private_registries(
vnf_package_path, pr_connection_info,
worker_username, worker_password, worker_ip)
if self.SET_NODE_LABEL_FLAG:
commander, _ = self._connect_ssh_scale(
master_ssh_ip_list, master_username,
@ -1925,7 +2134,8 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
fixed_master_infos, proxy,
master_username, master_password, vnf_package_path,
script_path, cluster_ip, pod_cidr, cluster_cidr,
kubeadm_token, ssl_ca_cert_hash, ha_flag, helm_info):
kubeadm_token, ssl_ca_cert_hash, ha_flag, helm_info,
pr_connection_info, http_private_registries):
not_fixed_master_nic_ips = [
master_ips.get('master_nic_ip')
for master_ips in not_fixed_master_infos.values()]
@ -1994,6 +2204,14 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
kubeadm_token=kubeadm_token,
ssl_ca_cert_hash=ssl_ca_cert_hash,
certificate_key=certificate_key)
# if connecting to the private registries over HTTP,
# add "export HTTP_PRIVATE_REGISTRIES" command
if http_private_registries:
ssh_command = \
"export HTTP_PRIVATE_REGISTRIES=\"{}\";{}".format(
http_private_registries, ssh_command)
self._execute_command(
commander, ssh_command, K8S_INSTALL_TIMEOUT, 'install', 0)
if helm_info:
@ -2018,11 +2236,19 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3)
commander.close_session()
# connect to private registries
if pr_connection_info:
self._connect_to_private_registries(
vnf_package_path, pr_connection_info,
master_username, master_password,
fixed_master_info.get('master_ssh_ip'))
def _fix_worker_node(
self, fixed_worker_infos,
hosts_str, worker_username, worker_password,
vnf_package_path, script_path, proxy, cluster_ip,
kubeadm_token, ssl_ca_cert_hash, ha_flag):
kubeadm_token, ssl_ca_cert_hash, ha_flag,
pr_connection_info, http_private_registries):
for fixed_worker_name, fixed_worker in fixed_worker_infos.items():
commander = self._init_commander_and_send_install_scripts(
worker_username, worker_password,
@ -2031,11 +2257,19 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
self._install_worker_node(
commander, proxy, ha_flag,
fixed_worker.get('worker_nic_ip'),
cluster_ip, kubeadm_token, ssl_ca_cert_hash)
cluster_ip, kubeadm_token, ssl_ca_cert_hash,
http_private_registries)
self._set_node_ip_in_hosts(
commander, 'heal_end', hosts_str=hosts_str)
commander.close_session()
# connect to private registries
if pr_connection_info:
self._connect_to_private_registries(
vnf_package_path, pr_connection_info,
worker_username, worker_password,
fixed_worker.get('worker_ssh_ip'))
def _heal_and_join_k8s_node(
self, heatclient, stack_id, target_physical_resource_ids,
vnf_additional_params, master_resource_name, master_username,
@ -2141,19 +2375,29 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
hosts_str = self._get_all_hosts(
not_fixed_master_infos, fixed_master_infos,
not_fixed_worker_infos, fixed_worker_infos)
# get private_registry_connection_info param
pr_connection_info = k8s_cluster_installation_param.get(
'private_registry_connection_info')
# get private registries of type HTTP
http_private_registries = self._get_http_private_registries(
pr_connection_info)
if flag_master:
self._fix_master_node(
not_fixed_master_infos, hosts_str,
fixed_master_infos, proxy,
master_username, master_password, vnf_package_path,
script_path, cluster_ip, pod_cidr, cluster_cidr,
kubeadm_token, ssl_ca_cert_hash, ha_flag, helm_info)
kubeadm_token, ssl_ca_cert_hash, ha_flag, helm_info,
pr_connection_info, http_private_registries)
if flag_worker:
self._fix_worker_node(
fixed_worker_infos,
hosts_str, worker_username, worker_password,
vnf_package_path, script_path, proxy, cluster_ip,
kubeadm_token, ssl_ca_cert_hash, ha_flag)
kubeadm_token, ssl_ca_cert_hash, ha_flag,
pr_connection_info, http_private_registries)
if self.SET_NODE_LABEL_FLAG:
for fixed_worker_name, fixed_worker in fixed_worker_infos.items():

View File

@ -0,0 +1,467 @@
# Copyright (C) 2021 FUJITSU
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
import eventlet
from oslo_log import log as logging
import paramiko
from tacker.common import cmd_executer
from tacker.common import exceptions
from tacker import objects
from tacker.vnflcm import utils as vnflcm_utils
from tacker.vnfm.infra_drivers.openstack import heat_client as hc
from tacker.vnfm.mgmt_drivers import vnflcm_abstract_driver
LOG = logging.getLogger(__name__)
# CLI timeout period
PR_CONNECT_TIMEOUT = 30
PR_CMD_TIMEOUT_DEFAULT = 600
PR_CMD_TIMEOUT_INSTALL = 2700
# retry interval(sec)
PR_CMD_RETRY_INTERVAL = 30
# number of check command retries for wait for Docker running
PR_NUM_OF_RETRY_WAIT_DOCKER = 5
# number of check command retries for wait for Private registry running
PR_NUM_OF_RETRY_WAIT_PR = 5
# Command type
CMD_TYPE_COMMON = "common"
# Default host port
DEFAULT_HOST_PORT = '5000'
class PrivateRegistryMgmtDriver(
vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
def get_type(self):
return "mgmt-drivers-private-registry"
def get_name(self):
return "mgmt-drivers-private-registry"
def get_description(self):
return "Tacker Private registry VNF Mgmt Driver"
def _get_cp_ip_address(self, vnf_instance, vim_connection_info, cp_name):
heatclient = hc.HeatClient(vim_connection_info.access_info)
stack_id = vnf_instance.instantiated_vnf_info.instance_id
# get IP address from heat
resource_info = heatclient.resources.get(
stack_id=stack_id, resource_name=cp_name)
fixed_ips = resource_info.attributes.get("fixed_ips")
if fixed_ips:
cp_ip_address = fixed_ips[0].get("ip_address")
else:
cp_ip_address = ""
# check result
if not cp_ip_address:
err_msg = "Failed to get IP address for Private registry VM"
LOG.error(err_msg)
raise exceptions.MgmtDriverOtherError(error_message=err_msg)
LOG.debug("Getting IP address succeeded. "
"(CP name: {}, IP address: {})".format(cp_name, cp_ip_address))
return cp_ip_address
def _execute_command(self, commander, ssh_command,
timeout=PR_CMD_TIMEOUT_DEFAULT,
type=CMD_TYPE_COMMON, retry=0):
eventlet.monkey_patch()
while retry >= 0:
try:
with eventlet.Timeout(timeout, True):
LOG.debug("execute command: {}".format(ssh_command))
result = commander.execute_command(
ssh_command, input_data=None)
break
except eventlet.timeout.Timeout:
err_msg = ("It is time out, When execute command: "
"{}.".format(ssh_command))
retry -= 1
if retry < 0:
LOG.error(err_msg)
commander.close_session()
raise exceptions.MgmtDriverOtherError(
error_message=err_msg)
err_msg += " Retry after {} seconds.".format(
PR_CMD_RETRY_INTERVAL)
LOG.debug(err_msg)
time.sleep(PR_CMD_RETRY_INTERVAL)
if type == CMD_TYPE_COMMON:
stderr = result.get_stderr()
if stderr:
err_msg = ("Failed to execute command: {}, "
"stderr: {}".format(ssh_command, stderr))
LOG.error(err_msg)
commander.close_session()
raise exceptions.MgmtDriverOtherError(error_message=err_msg)
return result.get_stdout()
def _wait_docker_running(self, commander, err_msg,
retry=PR_NUM_OF_RETRY_WAIT_DOCKER):
while retry >= 0:
ssh_command = ("sudo systemctl status docker "
"| grep Active | grep -c running")
result = self._execute_command(commander, ssh_command)
count_result = result[0].replace("\n", "")
if count_result == "0":
retry -= 1
if retry < 0:
LOG.error(err_msg)
commander.close_session()
raise exceptions.MgmtDriverOtherError(
error_message=err_msg)
LOG.debug("Docker service is not running. "
"Check again after {} seconds.".format(
PR_CMD_RETRY_INTERVAL))
time.sleep(PR_CMD_RETRY_INTERVAL)
else:
LOG.debug("Docker service is running.")
break
def _wait_private_registry_running(self, commander,
retry=PR_NUM_OF_RETRY_WAIT_PR):
while retry >= 0:
ssh_command = ("sudo docker inspect "
"--format=\'{{.State.Status}}\' "
"private_registry")
result = self._execute_command(commander, ssh_command)
status = result[0].replace("\n", "")
if status == "running":
LOG.debug("Private registry container is running.")
break
retry -= 1
if retry < 0:
err_msg = "Failed to run Private registry container"
LOG.error(err_msg)
commander.close_session()
raise exceptions.MgmtDriverOtherError(
error_message=err_msg)
LOG.debug("Private registry container is not running. "
"Check again after {} seconds.".format(
PR_CMD_RETRY_INTERVAL))
time.sleep(PR_CMD_RETRY_INTERVAL)
def _check_pr_installation_params(self, pr_installation_params):
if not pr_installation_params:
LOG.error("The private_registry_installation_param "
"in the additionalParams does not exist.")
raise exceptions.MgmtDriverNotFound(
param="private_registry_installation_param")
ssh_cp_name = pr_installation_params.get("ssh_cp_name")
ssh_username = pr_installation_params.get("ssh_username")
ssh_password = pr_installation_params.get("ssh_password")
if not ssh_cp_name:
LOG.error("The ssh_cp_name "
"in the additionalParams does not exist.")
raise exceptions.MgmtDriverNotFound(param="ssh_cp_name")
if not ssh_username:
LOG.error("The ssh_username "
"in the additionalParams does not exist.")
raise exceptions.MgmtDriverNotFound(param="ssh_username")
if not ssh_password:
LOG.error("The ssh_password "
"in the additionalParams does not exist.")
raise exceptions.MgmtDriverNotFound(param="ssh_password")
def _install_private_registry(self, context, vnf_instance,
vim_connection_info,
pr_installation_params):
LOG.debug("Start private registry installation. "
"installation param: {}".format(pr_installation_params))
# check parameters
self._check_pr_installation_params(pr_installation_params)
ssh_cp_name = pr_installation_params.get("ssh_cp_name")
ssh_username = pr_installation_params.get("ssh_username")
ssh_password = pr_installation_params.get("ssh_password")
image_path = pr_installation_params.get("image_path")
port_no = pr_installation_params.get("port_no")
proxy = pr_installation_params.get("proxy")
# get IP address from cp name
ssh_ip_address = self._get_cp_ip_address(
vnf_instance, vim_connection_info, ssh_cp_name)
# initialize RemoteCommandExecutor
retry = 4
while retry > 0:
try:
commander = cmd_executer.RemoteCommandExecutor(
user=ssh_username, password=ssh_password,
host=ssh_ip_address, timeout=PR_CONNECT_TIMEOUT)
break
except (exceptions.NotAuthorized, paramiko.SSHException,
paramiko.ssh_exception.NoValidConnectionsError) as e:
LOG.debug(e)
retry -= 1
if retry < 0:
err_msg = "Failed to use SSH to connect to the registry " \
"server: {}".format(ssh_ip_address)
LOG.error(err_msg)
raise exceptions.MgmtDriverOtherError(
error_message=err_msg)
time.sleep(PR_CMD_RETRY_INTERVAL)
# check OS and architecture
ssh_command = ("cat /etc/os-release "
"| grep \"PRETTY_NAME=\" "
"| grep -c \"Ubuntu 20.04\"; arch | grep -c x86_64")
result = self._execute_command(commander, ssh_command)
os_check_result = result[0].replace("\n", "")
arch_check_result = result[1].replace("\n", "")
if os_check_result == "0" or arch_check_result == "0":
err_msg = ("Failed to install. "
"Your OS does not support at present. "
"It only supports Ubuntu 20.04 (x86_64)")
LOG.error(err_msg)
commander.close_session()
raise exceptions.MgmtDriverOtherError(error_message=err_msg)
# get proxy params
http_proxy = ""
https_proxy = ""
no_proxy = ""
if proxy:
http_proxy = proxy.get("http_proxy")
https_proxy = proxy.get("https_proxy")
no_proxy = proxy.get("no_proxy")
# execute apt-get install command
ssh_command = ""
if http_proxy or https_proxy:
# set apt's proxy config
ssh_command = "echo -e \""
if http_proxy:
ssh_command += ("Acquire::http::Proxy "
"\\\"{}\\\";\\n".format(http_proxy))
if https_proxy:
ssh_command += ("Acquire::https::Proxy "
"\\\"{}\\\";\\n".format(https_proxy))
ssh_command += ("\" | sudo tee /etc/apt/apt.conf.d/proxy.conf "
">/dev/null && ")
ssh_command += (
"sudo apt-get update && "
"export DEBIAN_FRONTEND=noninteractive;"
"sudo -E apt-get install -y apt-transport-https "
"ca-certificates curl gnupg-agent software-properties-common")
self._execute_command(commander, ssh_command, PR_CMD_TIMEOUT_INSTALL)
# execute add-apt-repository command
ssh_command = ""
if http_proxy:
ssh_command += "export http_proxy=\"{}\";".format(http_proxy)
if https_proxy:
ssh_command += "export https_proxy=\"{}\";".format(https_proxy)
if no_proxy:
ssh_command += "export no_proxy=\"{}\";".format(no_proxy)
ssh_command += (
"export APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn;"
"curl -fsSL https://download.docker.com/linux/ubuntu/gpg "
"| sudo -E apt-key add - && "
"sudo add-apt-repository \"deb [arch=amd64] "
"https://download.docker.com/linux/ubuntu "
"$(lsb_release -cs) stable\"")
self._execute_command(commander, ssh_command, PR_CMD_TIMEOUT_INSTALL)
# install docker
ssh_command = (
"sudo apt-get update && "
"export DEBIAN_FRONTEND=noninteractive;"
"sudo -E apt-get install -y "
"docker-ce=5:19.03.11~3-0~ubuntu-focal "
"docker-ce-cli containerd.io")
self._execute_command(commander, ssh_command, PR_CMD_TIMEOUT_INSTALL)
# wait for the Docker service running
err_msg = "Failed to install Docker(Docker service is not running)"
self._wait_docker_running(commander, err_msg)
# set Docker's proxy config
if http_proxy or https_proxy or no_proxy:
proxy_env_list = []
if http_proxy:
proxy_env = "\\\"HTTP_PROXY={}\\\"".format(http_proxy)
proxy_env_list.append(proxy_env)
if https_proxy:
proxy_env = "\\\"HTTPS_PROXY={}\\\"".format(https_proxy)
proxy_env_list.append(proxy_env)
if no_proxy:
proxy_env = "\\\"NO_PROXY={}\\\"".format(no_proxy)
proxy_env_list.append(proxy_env)
proxy_env = " ".join(proxy_env_list)
ssh_command = (
"sudo mkdir -p /etc/systemd/system/docker.service.d && "
"echo -e \"[Service]\\nEnvironment={}\" | sudo tee "
"/etc/systemd/system/docker.service.d/https-proxy.conf "
">/dev/null && "
"sudo systemctl daemon-reload && "
"sudo systemctl restart docker".format(proxy_env))
self._execute_command(commander, ssh_command)
# wait for the Docker service running
err_msg = ("Failed to restart Docker"
"(Docker service is not running)")
self._wait_docker_running(commander, err_msg)
# pull or load the Docker image named "registry"
if not image_path:
# pull the Docker image
ssh_command = "sudo docker pull registry"
self._execute_command(commander, ssh_command)
else:
vnf_package_path = vnflcm_utils._get_vnf_package_path(
context, vnf_instance.vnfd_id)
local_image_path = os.path.join(
vnf_package_path, image_path)
# check existence of local image file
if not os.path.exists(local_image_path):
LOG.error("The image_path in the additionalParams is invalid. "
"File does not exist.")
commander.close_session()
raise exceptions.MgmtDriverParamInvalid(param="image_path")
# transfer the Docker image file to Private registry VM
image_file_name = os.path.basename(image_path)
remote_image_path = os.path.join("/tmp", image_file_name)
transport = paramiko.Transport(ssh_ip_address, 22)
transport.connect(username=ssh_username, password=ssh_password)
sftp_client = paramiko.SFTPClient.from_transport(transport)
sftp_client.put(local_image_path, remote_image_path)
transport.close()
# load the Docker image
ssh_command = "sudo docker load -i {}".format(remote_image_path)
self._execute_command(commander, ssh_command)
# check Docker images list
ssh_command = "sudo docker images | grep -c registry"
result = self._execute_command(commander, ssh_command)
count_result = result[0].replace("\n", "")
if count_result == "0":
err_msg = "Failed to pull or load the Docker image named registry"
LOG.error(err_msg)
commander.close_session()
raise exceptions.MgmtDriverOtherError(error_message=err_msg)
# run the Private registry container
if port_no is None:
port = DEFAULT_HOST_PORT
else:
port = str(port_no)
ssh_command = (
"sudo docker run -d -p {}:5000 "
"-v /private_registry:/var/lib/registry "
"--restart=always "
"--name private_registry "
"registry:latest".format(port))
self._execute_command(commander, ssh_command)
# wait for the Private registry container running
self._wait_private_registry_running(commander)
commander.close_session()
LOG.debug("Private registry installation complete.")
def instantiate_start(self, context, vnf_instance,
instantiate_vnf_request, grant,
grant_request, **kwargs):
pass
def instantiate_end(self, context, vnf_instance,
instantiate_vnf_request, grant,
grant_request, **kwargs):
# get vim_connection_info
vim_info = vnflcm_utils._get_vim(context,
instantiate_vnf_request.vim_connection_info)
vim_connection_info = objects.VimConnectionInfo.obj_from_primitive(
vim_info, context)
# get parameters for private registry installation
pr_installation_params = instantiate_vnf_request.additional_params.get(
"private_registry_installation_param")
self._install_private_registry(
context, vnf_instance, vim_connection_info, pr_installation_params)
def terminate_start(self, context, vnf_instance,
terminate_vnf_request, grant,
grant_request, **kwargs):
pass
def terminate_end(self, context, vnf_instance,
terminate_vnf_request, grant,
grant_request, **kwargs):
pass
def scale_start(self, context, vnf_instance,
scale_vnf_request, grant,
grant_request, **kwargs):
pass
def scale_end(self, context, vnf_instance,
scale_vnf_request, grant,
grant_request, **kwargs):
pass
def heal_start(self, context, vnf_instance,
heal_vnf_request, grant,
grant_request, **kwargs):
pass
def heal_end(self, context, vnf_instance,
heal_vnf_request, grant,
grant_request, **kwargs):
# NOTE: Private registry VNF has only one VNFC.
# Therefore, VNFC that is repaired by entire Heal and
# VNFC that is repaired by specifying VNFC instance are the same VNFC.
# get vim_connection_info
vim_info = vnflcm_utils._get_vim(context,
vnf_instance.vim_connection_info)
vim_connection_info = objects.VimConnectionInfo.obj_from_primitive(
vim_info, context)
# get parameters for private registry installation
pr_installation_params = (
vnf_instance.instantiated_vnf_info.additional_params.get(
"private_registry_installation_param"))
self._install_private_registry(
context, vnf_instance, vim_connection_info, pr_installation_params)
def change_external_connectivity_start(
self, context, vnf_instance,
change_ext_conn_request, grant,
grant_request, **kwargs):
pass
def change_external_connectivity_end(
self, context, vnf_instance,
change_ext_conn_request, grant,
grant_request, **kwargs):
pass

View File

@ -0,0 +1,202 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: ETSI NFV SOL 001 common types definitions version 2.6.1
metadata:
template_name: etsi_nfv_sol001_common_types
template_author: ETSI_NFV
template_version: 2.6.1
data_types:
tosca.datatypes.nfv.L2AddressData:
derived_from: tosca.datatypes.Root
description: Describes the information on the MAC addresses to be assigned to a connection point.
properties:
mac_address_assignment:
type: boolean
description: Specifies if the address assignment is the responsibility of management and orchestration function or not. If it is set to True, it is the management and orchestration function responsibility
required: true
tosca.datatypes.nfv.L3AddressData:
derived_from: tosca.datatypes.Root
description: Provides information about Layer 3 level addressing scheme and parameters applicable to a CP
properties:
ip_address_assignment:
type: boolean
description: Specifies if the address assignment is the responsibility of management and orchestration function or not. If it is set to True, it is the management and orchestration function responsibility
required: true
floating_ip_activated:
type: boolean
description: Specifies if the floating IP scheme is activated on the Connection Point or not
required: true
ip_address_type:
type: string
description: Defines address type. The address type should be aligned with the address type supported by the layer_protocols properties of the parent VnfExtCp
required: false
constraints:
- valid_values: [ ipv4, ipv6 ]
number_of_ip_address:
type: integer
description: Minimum number of IP addresses to be assigned
required: false
constraints:
- greater_than: 0
tosca.datatypes.nfv.AddressData:
derived_from: tosca.datatypes.Root
description: Describes information about the addressing scheme and parameters applicable to a CP
properties:
address_type:
type: string
description: Describes the type of the address to be assigned to a connection point. The content type shall be aligned with the address type supported by the layerProtocol property of the connection point
required: true
constraints:
- valid_values: [ mac_address, ip_address ]
l2_address_data:
type: tosca.datatypes.nfv.L2AddressData
description: Provides the information on the MAC addresses to be assigned to a connection point.
required: false
l3_address_data:
type: tosca.datatypes.nfv.L3AddressData
description: Provides the information on the IP addresses to be assigned to a connection point
required: false
tosca.datatypes.nfv.ConnectivityType:
derived_from: tosca.datatypes.Root
description: describes additional connectivity information of a virtualLink
properties:
layer_protocols:
type: list
description: Identifies the protocol a virtualLink gives access to (ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire).The top layer protocol of the virtualLink protocol stack shall always be provided. The lower layer protocols may be included when there are specific requirements on these layers.
required: true
entry_schema:
type: string
constraints:
- valid_values: [ ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire ]
flow_pattern:
type: string
description: Identifies the flow pattern of the connectivity
required: false
constraints:
- valid_values: [ line, tree, mesh ]
tosca.datatypes.nfv.LinkBitrateRequirements:
derived_from: tosca.datatypes.Root
description: describes the requirements in terms of bitrate for a virtual link
properties:
root:
type: integer # in bits per second
description: Specifies the throughput requirement in bits per second of the link (e.g. bitrate of E-Line, root bitrate of E-Tree, aggregate capacity of E-LAN).
required: true
constraints:
- greater_or_equal: 0
leaf:
type: integer # in bits per second
description: Specifies the throughput requirement in bits per second of leaf connections to the link when applicable to the connectivity type (e.g. for E-Tree and E LAN branches).
required: false
constraints:
- greater_or_equal: 0
tosca.datatypes.nfv.CpProtocolData:
derived_from: tosca.datatypes.Root
description: Describes and associates the protocol layer that a CP uses together with other protocol and connection point information
properties:
associated_layer_protocol:
type: string
required: true
description: One of the values of the property layer_protocols of the CP
constraints:
- valid_values: [ ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire ]
address_data:
type: list
description: Provides information on the addresses to be assigned to the CP
entry_schema:
type: tosca.datatypes.nfv.AddressData
required: false
tosca.datatypes.nfv.VnfProfile:
derived_from: tosca.datatypes.Root
description: describes a profile for instantiating VNFs of a particular NS DF according to a specific VNFD and VNF DF.
properties:
instantiation_level:
type: string
description: Identifier of the instantiation level of the VNF DF to be used for instantiation. If not present, the default instantiation level as declared in the VNFD shall be used.
required: false
min_number_of_instances:
type: integer
description: Minimum number of instances of the VNF based on this VNFD that is permitted to exist for this VnfProfile.
required: true
constraints:
- greater_or_equal: 0
max_number_of_instances:
type: integer
description: Maximum number of instances of the VNF based on this VNFD that is permitted to exist for this VnfProfile.
required: true
constraints:
- greater_or_equal: 0
tosca.datatypes.nfv.Qos:
derived_from: tosca.datatypes.Root
description: describes QoS data for a given VL used in a VNF deployment flavour
properties:
latency:
type: scalar-unit.time #Number
description: Specifies the maximum latency
required: true
constraints:
- greater_than: 0 s
packet_delay_variation:
type: scalar-unit.time #Number
description: Specifies the maximum jitter
required: true
constraints:
- greater_or_equal: 0 s
packet_loss_ratio:
type: float
description: Specifies the maximum packet loss ratio
required: false
constraints:
- in_range: [ 0.0, 1.0 ]
capability_types:
tosca.capabilities.nfv.VirtualLinkable:
derived_from: tosca.capabilities.Node
description: A node type that includes the VirtualLinkable capability indicates that it can be pointed by tosca.relationships.nfv.VirtualLinksTo relationship type
relationship_types:
tosca.relationships.nfv.VirtualLinksTo:
derived_from: tosca.relationships.DependsOn
description: Represents an association relationship between the VduCp and VnfVirtualLink node types
valid_target_types: [ tosca.capabilities.nfv.VirtualLinkable ]
node_types:
tosca.nodes.nfv.Cp:
derived_from: tosca.nodes.Root
description: Provides information regarding the purpose of the connection point
properties:
layer_protocols:
type: list
description: Identifies which protocol the connection point uses for connectivity purposes
required: true
entry_schema:
type: string
constraints:
- valid_values: [ ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire ]
role: #Name in ETSI NFV IFA011 v0.7.3: cpRole
type: string
description: Identifies the role of the port in the context of the traffic flow patterns in the VNF or parent NS
required: false
constraints:
- valid_values: [ root, leaf ]
description:
type: string
description: Provides human-readable information on the purpose of the connection point
required: false
protocol:
type: list
description: Provides information on the addresses to be assigned to the connection point(s) instantiated from this Connection Point Descriptor
required: false
entry_schema:
type: tosca.datatypes.nfv.CpProtocolData
trunk_mode:
type: boolean
description: Provides information about whether the CP instantiated from this Cp is in Trunk mode (802.1Q or other), When operating in "trunk mode", the Cp is capable of carrying traffic for several VLANs. Absence of this property implies that trunkMode is not configured for the Cp i.e. It is equivalent to boolean value "false".
required: false

View File

@ -0,0 +1,101 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Sample Private registry VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- sample_vnfd_types.yaml
topology_template:
inputs:
id:
type: string
vendor:
type: string
version:
type: version
descriptor_id:
type: string
descriptor_version:
type: string
provider:
type: string
product_name:
type: string
software_version:
type: string
vnfm_info:
type: list
entry_schema:
type: string
flavour_id:
type: string
flavour_description:
type: string
substitution_mappings:
node_type: company.provider.VNF
properties:
flavour_id: simple
requirements:
virtual_link_external: [ CP1, virtual_link ]
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_description: A simple flavour
interfaces:
Vnflcm:
instantiate_end:
implementation: mgmt-drivers-private-registry
heal_end:
implementation: mgmt-drivers-private-registry
artifacts:
mgmt-drivers-private-registry:
description: Management driver for Docker private registry
type: tosca.artifacts.Implementation.Python
file: Scripts/private_registry_mgmt.py
PrivateRegistryVDU:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: PrivateRegistryVDU
description: Private registry compute node
vdu_profile:
min_number_of_instances: 1
max_number_of_instances: 1
sw_image_data:
name: Image for Private registry
version: '20.04'
checksum:
algorithm: sha-512
hash: fb1a1e50f9af2df6ab18a69b6bc5df07ebe8ef962b37e556ce95350ffc8f4a1118617d486e2018d1b3586aceaeda799e6cc073f330a7ad8f0ec0416cbd825452
container_format: bare
disk_format: qcow2
min_disk: 0 GB
size: 2 GB
artifacts:
sw_image:
type: tosca.artifacts.nfv.SwImage
file: ../Files/images/ubuntu-20.04-server-cloudimg-amd64.img
capabilities:
virtual_compute:
properties:
virtual_memory:
virtual_mem_size: 4 GB
virtual_cpu:
num_virtual_cpu: 2
virtual_local_storage:
- size_of_storage: 45 GB
CP1:
type: tosca.nodes.nfv.VduCp
properties:
layer_protocols: [ ipv4 ]
order: 0
requirements:
- virtual_binding: PrivateRegistryVDU

View File

@ -0,0 +1,31 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Sample Private registry VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- sample_vnfd_types.yaml
- sample_vnfd_df_simple.yaml
topology_template:
inputs:
selected_flavour:
type: string
description: VNF deployment flavour selected by the consumer. It is provided in the API
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_id: { get_input: selected_flavour }
descriptor_id: b1bb0ce7-ebca-4fa7-95ed-4840d70a1189
provider: Company
product_name: Sample Private registry VNF
software_version: '1.0'
descriptor_version: '1.0'
vnfm_info:
- Tacker
requirements:
#- virtual_link_external # mapped in lower-level templates
#- virtual_link_internal # mapped in lower-level templates

View File

@ -0,0 +1,63 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: VNF type definition
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
node_types:
company.provider.VNF:
derived_from: tosca.nodes.nfv.VNF
properties:
id:
type: string
description: ID of this VNF
default: vnf_id
vendor:
type: string
description: name of the vendor who generate this VNF
default: vendor
version:
type: version
description: version of the software for this VNF
default: 1.0
descriptor_id:
type: string
constraints: [ valid_values: [ b1bb0ce7-ebca-4fa7-95ed-4840d70a1189 ] ]
default: b1bb0ce7-ebca-4fa7-95ed-4840d70a1189
descriptor_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
provider:
type: string
constraints: [ valid_values: [ 'Company' ] ]
default: 'Company'
product_name:
type: string
constraints: [ valid_values: [ 'Sample Private registry VNF' ] ]
default: 'Sample Private registry VNF'
software_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
vnfm_info:
type: list
entry_schema:
type: string
constraints: [ valid_values: [ Tacker ] ]
default: [ Tacker ]
flavour_id:
type: string
constraints: [ valid_values: [ simple ] ]
default: simple
flavour_description:
type: string
default: "This is the default flavour description"
requirements:
- virtual_link_external:
capability: tosca.capabilities.nfv.VirtualLinkable
interfaces:
Vnflcm:
type: tosca.interfaces.nfv.Vnflcm

View File

@ -0,0 +1,12 @@
TOSCA-Meta-File-Version: 1.0
Created-by: Dummy User
CSAR-Version: 1.1
Entry-Definitions: Definitions/sample_vnfd_top.yaml
Name: Files/images/ubuntu-20.04-server-cloudimg-amd64.img
Content-Type: application/x-iso9066-image
Name: Scripts/private_registry_mgmt.py
Content-Type: text/x-python
Algorithm: SHA-256
Hash: <Calculated value here>