diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst index ce9863f56..4f5631ad3 100644 --- a/doc/source/user/index.rst +++ b/doc/source/user/index.rst @@ -68,6 +68,7 @@ Use cases vnffg_usage_guide_advanced.rst vnfm_usage_guide.rst placement_policy_usage_guide.rst + mgmt_driver_deploy_k8s_usage_guide.rst Feature Documentation --------------------- diff --git a/doc/source/user/mgmt_driver_deploy_k8s_usage_guide.rst b/doc/source/user/mgmt_driver_deploy_k8s_usage_guide.rst new file mode 100644 index 000000000..23adef60e --- /dev/null +++ b/doc/source/user/mgmt_driver_deploy_k8s_usage_guide.rst @@ -0,0 +1,2319 @@ +======================================================= +How to use Mgmt Driver for deploying Kubernetes Cluster +======================================================= + +Overview +-------- + +1. Mgmt Driver Introduction +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Mgmt Driver enables Users to configure their VNF before and/or after +its VNF Lifecycle Management operation. Users can customize the logic +of Mgmt Driver by implementing their own Mgmt Driver and these +customizations are specified by "interface" definition in +`NFV-SOL001 v2.6.1`_. +This user guide aims to deploy Kubernetes cluster via +Mgmt Driver which is customized by user. + +2. Use Cases +^^^^^^^^^^^^ +In the present user guide, two cases are supported with the sample Mgmt Driver +and VNF Package providing two deployment flavours in VNFD: + +* simple: Deploy one master node with worker nodes. In this + case, it supports to scale worker node and heal worker node. +* complex: Deploy three(or more) master nodes with worker nodes. In + this case, it supports to scale worker node and heal worker + node and master node. + +In all the above cases, ``kubeadm`` is used for deploying Kubernetes in +the sample script. + +1. Simple : Single Master Node +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The simple Kubernetes cluster contains one master node as controller node. +You can deploy it use the sample script we provided. The diagram below shows +simple Kubernetes cluster architecture: + +.. code-block:: console + + +-------------------------------+ + | Kubernetes cluster | + | +---------------+ | + | | +---------+ | | + | | | k8s-api | | | + | | +---------+ | | + | | +---------+ | | + | | | etcd | | | + | | +---------+ | | + | | Master VM | | + | +---------------+ | + | | + | | + | +----------+ +----------+ | + | | +------+ | | +------+ | | + | | | Pod | | | | Pod | | | + | | +------+ | | +------+ | | + | | Worker VM| | Worker VM| | + | +----------+ +----------+ | + | | + +-------------------------------+ + +2. Complex : High Availability(HA) Configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Kubernetes is known for its resilience and reliability. This is possible +by ensuring that the cluster does not have any single points of failure. +Because of this, to have a highly availability(HA) cluster, you need to have +multiple master nodes. We provide the sample script which can be used to +deploy an HA Kubernetes cluster. The diagram below shows HA Kubernetes +cluster architecture: + +.. code-block:: console + + +-----------------------------------------------------------+ + | High availability(HA) Kubernetes cluster | + | +-------------------------------------+ | + | | | | + | | +---------------+ +---------+ | | + | | | VIP - Active | | HAProxy | | | + | | | |----->| (Active)|------+ | + | | |(keep - alived)| +---------+ | | +-----------+ | + | | | | +---------+ | | | | | + | | +---------------+ | k8s-api |<-----+ | | | + | | ^ +---------+ | | | | | + | | | +---------+ | | | | | + | | VRRP | +--->| etcd | | | | | | + | | | | +---------+ | | | | | + | | | | Master01 VM | | | | | + | +------------|------|-----------------+ | | | | + | | | | | | | + | +------------|------|-----------------+ | | | | + | | v | | | |Worker01 VM| | + | | +---------------+ | +---------+ | | | | | + | | | VIP - Standby | | | HAProxy | | | +-----------+ | + | | | | | |(Standby)| | | | + | | |(keep - alived)| | +---------+ | | | + | | | | | +---------+ | | | + | | +---------------+ | | k8s-api |<-----+ | + | | ^ | +---------+ | | | + | | | | +---------+ | | | + | | VRRP | +--->| etcd | | | +-----------+ | + | | | | +---------+ | | | | | + | | | | Master02 VM | | | | | + | +------------|------|-----------------+ | | | | + | | | | | | | + | +------------|------|-----------------+ | | | | + | | v | | | | | | + | | +---------------+ | +---------+ | | | | | + | | | VIP - Standby | | | HAProxy | | | | | | + | | | | | |(Standby)| | | | | | + | | |(keep - alived)| | +---------+ | | | | | + | | | | | +---------+ | | |Worker02 VM| | + | | +---------------+ | | k8s-api |<-----+ | | | + | | | +---------+ | +-----------+ | + | | | +---------+ | | + | | +--->| etcd | | | + | | +---------+ | | + | | Master03 VM | | + | +-------------------------------------+ | + +-----------------------------------------------------------+ + +Mgmt Driver supports the construction of an HA master node through the +``instantiate_end`` process as follows: + +1. Identify the VMs created by OpenStackInfraDriver(which is + used to create OpenStack resources). +2. Invoke the script to configure for HAProxy_ (a reliable solution + offering high availability, load balancing, and proxying for + TCP and HTTP-based applications) to start signal distribution + to Master nodes. +3. Install all Master-nodes first, followed by Worker-nodes by + invoking the script setting up the new Kubernetes cluster. + +Preparations +------------ +If you use the sample script to deploy your Kubernetes cluster, you need +to ensure that the virtual machine(VM) you created on the OpenStack can +access the external network. If you installed the tacker +service through ``devstack``, the following is an optional way to set the +network configuration. + +.. note:: + In case of installed using ``devstack``, please execute all the + following commands under the ``stack`` user. You can use + ``sudo su stack`` command to change your user. + +1. OpenStack Router +^^^^^^^^^^^^^^^^^^^ + +1. Create an OpenStack Router +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +To ensure your VMs can access the external network, a router between +public network and internal network may be required. It can be created +by OpenStack dashboard or cli command. The following steps will +create a router between the ``public`` network and the internal ``net0`` +network. The cli command is shown below: + +.. code-block:: console + + $ openstack router create router-net0 + +-------------------------+--------------------------------------+ + | Field | Value | + +-------------------------+--------------------------------------+ + | admin_state_up | UP | + | availability_zone_hints | | + | availability_zones | | + | created_at | 2021-02-17T04:49:09Z | + | description | | + | distributed | False | + | external_gateway_info | null | + | flavor_id | None | + | ha | False | + | id | 66fcada3-e101-4136-ad5a-ed4f0f2a7ac1 | + | name | router-net0 | + | project_id | 4e7c90a9c086427fbfc817ed6b372d97 | + | revision_number | 1 | + | routes | | + | status | ACTIVE | + | tags | | + | updated_at | 2021-02-17T04:49:09Z | + +-------------------------+--------------------------------------+ + $ openstack router set --external-gateway public router-net0 + $ openstack router show router-net0 + +-------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Field | Value | + +-------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | admin_state_up | UP | + | availability_zone_hints | | + | availability_zones | nova | + | created_at | 2021-02-17T04:49:09Z | + | description | | + | distributed | False | + | external_gateway_info | {"network_id": "70459da3-e4ba-44a1-959c-ee1540bf532f", "external_fixed_ips": [{"subnet_id": "0fe68555-8d3a-4fcb-83e2-602744eab106", "ip_address": "192.168.10.4"}, {"subnet_id": "d1bebebe-dde4-486a-8bca-eb9939aec972", | + | | "ip_address": "2001:db8::2f0"}], "enable_snat": true} | + | flavor_id | None | + | ha | False | + | id | 66fcada3-e101-4136-ad5a-ed4f0f2a7ac1 | + | interfaces_info | [] | + | name | router-net0 | + | project_id | 4e7c90a9c086427fbfc817ed6b372d97 | + | revision_number | 3 | + | routes | | + | status | ACTIVE | + | tags | | + | updated_at | 2021-02-17T04:51:59Z | + +-------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + $ openstack router add subnet router-net0 subnet0 + $ openstack router show router-net0 + +-------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Field | Value | + +-------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | admin_state_up | UP | + | availability_zone_hints | | + | availability_zones | nova | + | created_at | 2021-02-17T04:49:09Z | + | description | | + | distributed | False | + | external_gateway_info | {"network_id": "70459da3-e4ba-44a1-959c-ee1540bf532f", "external_fixed_ips": [{"subnet_id": "0fe68555-8d3a-4fcb-83e2-602744eab106", "ip_address": "192.168.10.4"}, {"subnet_id": "d1bebebe-dde4-486a-8bca-eb9939aec972", | + | | "ip_address": "2001:db8::2f0"}], "enable_snat": true} | + | flavor_id | None | + | ha | False | + | id | 66fcada3-e101-4136-ad5a-ed4f0f2a7ac1 | + | interfaces_info | [{"port_id": "0d2abb5d-7b01-4227-b5b4-325d153dfe4a", "ip_address": "10.10.0.1", "subnet_id": "70e60dee-b654-49ee-9692-147de8f07844"}] | + | name | router-net0 | + | project_id | 4e7c90a9c086427fbfc817ed6b372d97 | + | revision_number | 4 | + | routes | | + | status | ACTIVE | + | tags | | + | updated_at | 2021-02-17T04:54:35Z | + +-------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +Through the above command, you can get the gateway ip between the internal +net0 network and the external network. Here is ``192.168.10.4`` in the +``external_gateway_info``. The ``net0`` network's cidr is ``10.10.0.0/24``. + +2. Set Route Rule in Controller Node +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +According to the gateway ip obtained in step 1., you should add a route +rule in controller node of OpenStack. The command is shown below: + +.. code-block:: console + + $ sudo route add -net 10.10.0.0/24 gw 192.168.10.4 + +3. Set the Security Group +~~~~~~~~~~~~~~~~~~~~~~~~~ + +In order to access the k8s cluster, you need to set the security group rules. +You can create a new security group or add the rules to +the ``default`` security group. The minimum settings are shown below using +cli command: + +- get the nfv project's default security group id + +.. code-block:: console + + $ auth='--os-username nfv_user --os-project-name nfv --os-password devstack --os-auth-url http://127.0.0.1/identity --os-project-domain-name Default --os-user-domain-name Default' + $ nfv_project_id=`openstack project list $auth | grep -w '| nfv' | awk '{print $2}'` + $ default_id=`openstack security group list $auth | grep -w 'default' | grep $nfv_project_id | awk '{print $2}'` + +- add new security group rule into default security group using the id above + +.. code-block:: console + + #ssh 22 port + $ openstack security group rule create --protocol tcp --dst-port 22 $default_id $auth + #all tcp + $ openstack security group rule create --protocol tcp $default_id $auth + #all icmp + $ openstack security group rule create --protocol icmp $default_id $auth + #all udp + $ openstack security group rule create --protocol udp $default_id $auth + #dns 53 port + $ openstack security group rule create --protocol tcp --dst-port 53 $default_id $auth + #k8s port + $ openstack security group rule create --protocol tcp --dst-port 6443 $default_id $auth + $ openstack security group rule create --protocol tcp --dst-port 16443 $default_id $auth + $ openstack security group rule create --protocol tcp --dst-port 2379:2380 $default_id $auth + $ openstack security group rule create --protocol tcp --dst-port 10250:10255 $default_id $auth + $ openstack security group rule create --protocol tcp --dst-port 30000:32767 $default_id $auth + +2. Ubuntu Image +^^^^^^^^^^^^^^^ + +In this user guide, Ubuntu image is used for master/worker node. +To ensure that Mgmt Driver can access to VMs via SSH, +some configurations are required. + +1. Download Ubuntu Image +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can download the ubuntu image(version 20.04) from the official website. +The command is shown below: + +.. code-block:: console + + $ wget -P /opt/stack/tacker/samples/mgmt_driver https://cloud-images.ubuntu.com/releases/focal/release/ubuntu-20.04-server-cloudimg-amd64.img + +2. Install the libguestfs-tools +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you use the sample script to deploy the Kubernetes cluster, you need +to ensure the VM created by your image allows you to login using username +and password via SSH. However, the VM created by the ubuntu image downloaded +from official website does not allow you to login using username and +password via SSH. So you need to modify the ubuntu image. The following +is a way to modify the image using guestfish tool or you can modify +it using your own way. The way to install the tool is shown below: + +.. code-block:: console + + $ sudo apt-get install libguestfs-tools + $ guestfish --version + guestfish 1.36.13 + +3. Set the Image's Configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The guestfish tool can modify image's configuration using its own command. +The command is shown below: + +.. code-block:: console + + $ cd /opt/stack/tacker/samples/mgmt_driver + $ sudo guestfish -a ubuntu-20.04-server-cloudimg-amd64.img -i sh "sed -i 's/lock\_passwd\: True/lock\_passwd\: false/g' /etc/cloud/cloud.cfg" + $ sudo guestfish -a ubuntu-20.04-server-cloudimg-amd64.img -i sh "sed -i '/[ ][ ][ ][ ][ ]lock\_passwd\: false/a\ plain\_text\_passwd\: ubuntu' /etc/cloud/cloud.cfg" + $ sudo guestfish -a ubuntu-20.04-server-cloudimg-amd64.img -i sh "sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config" + $ sha512sum ubuntu-20.04-server-cloudimg-amd64.img + fb1a1e50f9af2df6ab18a69b6bc5df07ebe8ef962b37e556ce95350ffc8f4a1118617d486e2018d1b3586aceaeda799e6cc073f330a7ad8f0ec0416cbd825452 + +.. note:: + The hash of the ubuntu image is different after modifying, so you + should calculate it by yourself. And the value should be written + into the ``sample_kubernetes_df_simple.yaml`` and + ``sample_kubernetes_df_complex.yaml`` defined in + ``Create and Upload VNF Package``. + +3. Set Tacker Configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +First, copy the sample script that was stored in +``tacker/samples/mgmt_driver/kubernetes_mgmt.py`` into the directory of +``tacker/tacker/vnfm/mgmt_drivers``. + +.. code-block:: console + + $ cp /opt/stack/tacker/samples/mgmt_driver/kubernetes_mgmt.py /opt/stack/tacker/tacker/vnfm/mgmt_drivers/ + +1. Set the setup.cfg +~~~~~~~~~~~~~~~~~~~~ + +You have to register ``kubernetes_mgmt.py`` in the operation environment +of the tacker. +The sample script(``kubernetes_mgmt.py``) uses the +``mgmt-drivers-kubernetes`` field to register in Mgmt Driver. + +.. code-block:: console + + $ vi /opt/stack/tacker/setup.cfg + ... + tacker.tacker.mgmt.drivers = + noop = tacker.vnfm.mgmt_drivers.noop:VnfMgmtNoop + openwrt = tacker.vnfm.mgmt_drivers.openwrt.openwrt:VnfMgmtOpenWRT + vnflcm_noop = tacker.vnfm.mgmt_drivers.vnflcm_noop:VnflcmMgmtNoop + mgmt-drivers-kubernetes = tacker.vnfm.mgmt_drivers.kubernetes_mgmt:KubernetesMgmtDriver + ... + +2. Set the tacker.conf +~~~~~~~~~~~~~~~~~~~~~~ + +Then find the ``vnflcm_mgmt_driver`` field in the ``tacker.conf``. +Add the ``mgmt-drivers-kubernetes`` defined in step 1 to it, +and separate by commas. + +.. code-block:: console + + $ vi /etc/tacker/tacker.conf + ... + [tacker] + ... + vnflcm_mgmt_driver = vnflcm_noop,mgmt-drivers-kubernetes + ... + +3. Update the tacker.egg-info +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +After the above two steps, the configuration has +not yet taken effect. +You also need to execute the ``setup.py`` script to regenerate +the contents of the ``tacker.egg-info`` directory. + +.. code-block:: console + + $ cd /opt/stack/tacker/ + $ python setup.py build + running build + running build_py + running egg_info + writing requirements to tacker.egg-info/requires.txt + writing tacker.egg-info/PKG-INFO + writing top-level names to tacker.egg-info/top_level.txt + writing dependency_links to tacker.egg-info/dependency_links.txt + writing entry points to tacker.egg-info/entry_points.txt + writing pbr to tacker.egg-info/pbr.json + [pbr] Processing SOURCES.txt + [pbr] In git context, generating filelist from git + warning: no files found matching 'AUTHORS' + warning: no files found matching 'ChangeLog' + warning: no previously-included files matching '*.pyc' found anywhere in distribution + writing manifest file 'tacker.egg-info/SOURCES.txt' + +Then you can use Mgmt Driver to deploy Kubernetes cluster after +restarting the service of ``tacker`` and ``tacker-conductor``. + +.. code-block:: console + + $ sudo systemctl stop devstack@tacker + $ sudo systemctl restart devstack@tacker-conductor + $ sudo systemctl start devstack@tacker + +Create and Upload VNF Package +----------------------------- + +VNF Package is a ZIP file including VNFD, software images for VM, and other +artifact resources such as scripts and config files. The directory structure +and file contents are defined in `NFV-SOL004 v2.6.1`_. +According to `NFV-SOL004 v2.6.1`_, VNF Package should be the ZIP file format +with the `TOSCA-Simple-Profile-YAML-v1.2`_ Specifications. +In this user guide, the CSAR with TOSCA-Metadata directory is used to deploy +Kubernetes cluster. + +.. note:: + + For more detailed definitions of VNF Package, you can see `VNF Package`_. + +1. Directory Structure +^^^^^^^^^^^^^^^^^^^^^^ +The sample structure of VNF Package for both simple case and complex case +is shown below. + +.. note:: + + You can also find them in the ``samples/mgmt_driver/kubernetes_vnf_package/`` directory of the tacker. + +The directory structure: + +* **TOSCA-Metadata/TOSCA.meta** +* **Definitions/** +* **Files/images/** +* **Scripts/** +* **BaseHOT/** +* **UserData/** + +.. code-block:: console + + !----TOSCA-Metadata + !---- TOSCA.meta + !----Definitions + !---- etsi_nfv_sol001_common_types.yaml + !---- etsi_nfv_sol001_vnfd_types.yaml + !---- sample_kubernetes_top.vnfd.yaml + !---- sample_kubernetes_types.yaml + !---- sample_kubernetes_df_simple.yaml + !---- sample_kubernetes_df_complex.yaml + !----Files + !---- images + !---- ubuntu-20.04-server-cloudimg-amd64.img + !----Scripts + !---- install_k8s_cluster.sh + !---- kubernetes_mgmt.py + !----BaseHOT + !---- simple + !---- nested + !---- simple_nested_master.yaml + !---- simple_nested_worker.yaml + !---- simple_hot_top.yaml + !---- complex + !---- nested + !---- complex_nested_master.yaml + !---- complex_nested_worker.yaml + !---- complex_hot_top.yaml + !----UserData + !---- __init__.py + !---- k8s_cluster_user_data.py + +TOSCA-Metadata/TOSCA.meta +~~~~~~~~~~~~~~~~~~~~~~~~~ + +According to `TOSCA-Simple-Profile-YAML-v1.2`_ specifications, the +``TOSCA.meta`` metadata file is described in `TOSCA-1.0-specification`_. +The files under ``Scripts`` directory are artifact files, therefore, you +should add their location and digest into ``TOSCA.meta`` metadata file. +The sample file is shown below: + +* `TOSCA.meta`_ + +Definitions/ +~~~~~~~~~~~~ +All VNFD YAML files are located here. In this guide, there are two types +of definition files, ETSI NFV types definition file and User defined types +definition file. + +ETSI NFV provides two types of definition files [#f1]_ which +contain all defined type definitions in `NFV-SOL001 v2.6.1`_. +You can download them from official website. + +* `etsi_nfv_sol001_common_types.yaml`_ +* `etsi_nfv_sol001_vnfd_types.yaml`_ + +You can extend their own types definition from `NFV-SOL001 v2.6.1`_. In most +cases, you need to extend ``tosca.nodes.nfv.VNF`` to define your VNF node +types. In this guide, ``sample_kubernetes_df_simple.yaml`` is used in simple +case, ``sample_kubernetes_df_complex.yaml`` is used in complex case. The two +files can be distinguished by ``deployment_flavour``. The sample files are +shown below: + +* `sample_kubernetes_top.vnfd.yaml`_ + +* `sample_kubernetes_types.yaml`_ + +* `sample_kubernetes_df_simple.yaml`_ + +* `sample_kubernetes_df_complex.yaml`_ + +Files/images/ +~~~~~~~~~~~~~ + +VNF Software Images are located here. These files are also described in +``TOSCA.meta``. The image used for deploying Kubernetes cluster is +``ubuntu-20.04-server-cloudimg-amd64.img`` downloaded in +``Download Image``. + +Scripts/ +~~~~~~~~ + +There are two script files for deploying Kubernetes cluster. +``install_k8s_cluster.sh`` is used to install k8s cluster on +VM created by tacker. ``kubernetes_mgmt.py`` is a Mgmt Driver +file to be executed before or after instantiate, terminate, +scale and heal. You can obtain these scripts in the directory +at the same level as this guide. + +* `install_k8s_cluster.sh`_ +* `kubernetes_mgmt.py`_ + +BaseHOT/ +~~~~~~~~ + +Base HOT file is a Native cloud orchestration template, HOT in this context, +which is commonly used for LCM operations in different VNFs. It is the +responsibility of the user to prepare this file, and it is necessary to make +it consistent with VNFD placed under the **Definitions/** directory. + +In this guide, you must use user data to deploy the Kubernetes cluster, so the +BaseHot directory must be included. + +You must place the directory corresponding to **deployment_flavour** stored in +the **Definitions/** under the **BaseHOT/** directory, and store the +Base HOT files in it. + +In this guide, there are two cases(simple and complex) in this VNF Package, so +there are two directories under **BaseHOT/** directory. The sample files are +shown below: + +simple +:::::: + +* `nested/simple_nested_master.yaml`_ + +* `nested/simple_nested_worker.yaml`_ + +* `simple_hot_top.yaml`_ + +complex +::::::: + +* `nested/complex_nested_master.yaml`_ + +* `nested/complex_nested_worker.yaml`_ + +* `complex_hot_top.yaml`_ + +UserData/ +~~~~~~~~~ + +LCM operation user data is a script that returns key/value data as +Heat input parameters used for Base HOT. The sample file is shown below: + +* `k8s_cluster_user_data.py`_ + +2. Create VNF Package +^^^^^^^^^^^^^^^^^^^^^ + +Execute the following CLI command to create VNF Package. + +.. code-block:: console + + $ openstack vnf package create + + +Result: + +.. code-block:: console + + $ openstack vnf package create + +-------------------+-------------------------------------------------------------------------------------------------+ + | Field | Value | + +-------------------+-------------------------------------------------------------------------------------------------+ + | ID | 03a8eb3e-a981-434e-a548-82d9b90161d7 | + | Links | { | + | | "self": { | + | | "href": "/vnfpkgm/v1/vnf_packages/03a8eb3e-a981-434e-a548-82d9b90161d7" | + | | }, | + | | "packageContent": { | + | | "href": "/vnfpkgm/v1/vnf_packages/03a8eb3e-a981-434e-a548-82d9b90161d7/package_content" | + | | } | + | | } | + | Onboarding State | CREATED | + | Operational State | DISABLED | + | Usage State | NOT_IN_USE | + | User Defined Data | {} | + +-------------------+-------------------------------------------------------------------------------------------------+ + +3. Upload VNF Package +^^^^^^^^^^^^^^^^^^^^^ + +Before you instantiate VNF, you must create a zip file of VNF Package +and upload it. + +Execute the following command to make a zip file. + +.. code-block:: console + + $ zip sample_kubernetes_csar.zip -r Definitions/ Files/ TOSCA-Metadata/ BaseHOT/ UserData/ Scripts/ + +Execute the following CLI command to upload VNF Package. + +.. code-block:: console + + $ openstack vnf package upload --path ./sample_kubernetes_csar.zip 03a8eb3e-a981-434e-a548-82d9b90161d7 + + +Result: + +.. code-block:: console + + Upload request for VNF package 03a8eb3e-a981-434e-a548-82d9b90161d7 has been accepted. + + +After that, execute the following CLI command and confirm that +VNF Package uploading was successful. + +* Confirm that the 'Onboarding State' is 'ONBOARDED'. +* Confirm that the 'Operational State' is 'ENABLED'. +* Confirm that the 'Usage State' is 'NOT_IN_USE'. +* Take a note of the 'VNFD ID' because you will need it in the next + 'Deploy Kubernetes cluster'. + +.. code-block:: console + + $ openstack vnf package show 03a8eb3e-a981-434e-a548-82d9b90161d7 + +----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Field | Value | + +----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Additional Artifacts | [ | + | | { | + | | "artifactPath": "Scripts/install_k8s_cluster.sh", | + | | "checksum": { | + | | "algorithm": "SHA-256", | + | | "hash": "7f1f4518a3db7b386a473aebf0aa2561eaa94073ac4c95b9d3e7b3fb5bba3017" | + | | }, | + | | "metadata": {} | + | | }, | + | | { | + | | "artifactPath": "Scripts/kubernetes_mgmt.py", | + | | "checksum": { | + | | "algorithm": "SHA-256", | + | | "hash": "3d8fc578cca5eec0fb625fc3f5eeaa67c34c2a5f89329ed9307f343cfc25cdc4" | + | | }, | + | | "metadata": {} | + | | } | + | | ] | + | Checksum | { | + | | "hash": "d853ca27df5ad5270516adc8ec3cef6ebf982f09f2291eb150c677691d2c793e454e0feb61f211a2b4b8b6df899ab2f2c808684ae1f9100081e5375f8bfcec3d", | + | | "algorithm": "sha512" | + | | } | + | ID | 03a8eb3e-a981-434e-a548-82d9b90161d7 | + | Links | { | + | | "self": { | + | | "href": "/vnfpkgm/v1/vnf_packages/03a8eb3e-a981-434e-a548-82d9b90161d7" | + | | }, | + | | "packageContent": { | + | | "href": "/vnfpkgm/v1/vnf_packages/03a8eb3e-a981-434e-a548-82d9b90161d7/package_content" | + | | } | + | | } | + | Onboarding State | ONBOARDED | + | Operational State | ENABLED | + | Software Images | [ | + | | { | + | | "size": 2000000000, | + | | "version": "20.04", | + | | "name": "Image for masterNode kubernetes", | + | | "createdAt": "2021-02-18 08:49:39+00:00", | + | | "id": "masterNode", | + | | "containerFormat": "bare", | + | | "minDisk": 0, | + | | "imagePath": "", | + | | "minRam": 0, | + | | "diskFormat": "qcow2", | + | | "provider": "", | + | | "checksum": { | + | | "algorithm": "sha-512", | + | | "hash": "fb1a1e50f9af2df6ab18a69b6bc5df07ebe8ef962b37e556ce95350ffc8f4a1118617d486e2018d1b3586aceaeda799e6cc073f330a7ad8f0ec0416cbd825452" | + | | }, | + | | "userMetadata": {} | + | | }, | + | | { | + | | "size": 2000000000, | + | | "version": "20.04", | + | | "name": "Image for workerNode kubernetes", | + | | "createdAt": "2021-02-18 08:49:40+00:00", | + | | "id": "workerNode", | + | | "containerFormat": "bare", | + | | "minDisk": 0, | + | | "imagePath": "", | + | | "minRam": 0, | + | | "diskFormat": "qcow2", | + | | "provider": "", | + | | "checksum": { | + | | "algorithm": "sha-512", | + | | "hash": "fb1a1e50f9af2df6ab18a69b6bc5df07ebe8ef962b37e556ce95350ffc8f4a1118617d486e2018d1b3586aceaeda799e6cc073f330a7ad8f0ec0416cbd825452" | + | | }, | + | | "userMetadata": {} | + | | }, | + | | { | + | | "size": 2000000000, | + | | "version": "20.04", | + | | "name": "Image for workerNode kubernetes", | + | | "createdAt": "2021-02-18 08:49:39+00:00", | + | | "id": "workerNode", | + | | "containerFormat": "bare", | + | | "minDisk": 0, | + | | "imagePath": "", | + | | "minRam": 0, | + | | "diskFormat": "qcow2", | + | | "provider": "", | + | | "checksum": { | + | | "algorithm": "sha-512", | + | | "hash": "fb1a1e50f9af2df6ab18a69b6bc5df07ebe8ef962b37e556ce95350ffc8f4a1118617d486e2018d1b3586aceaeda799e6cc073f330a7ad8f0ec0416cbd825452" | + | | }, | + | | "userMetadata": {} | + | | }, | + | | { | + | | "size": 2000000000, | + | | "version": "20.04", | + | | "name": "Image for masterNode kubernetes", | + | | "createdAt": "2021-02-18 08:49:39+00:00", | + | | "id": "masterNode", | + | | "containerFormat": "bare", | + | | "minDisk": 0, | + | | "imagePath": "", | + | | "minRam": 0, | + | | "diskFormat": "qcow2", | + | | "provider": "", | + | | "checksum": { | + | | "algorithm": "sha-512", | + | | "hash": "fb1a1e50f9af2df6ab18a69b6bc5df07ebe8ef962b37e556ce95350ffc8f4a1118617d486e2018d1b3586aceaeda799e6cc073f330a7ad8f0ec0416cbd825452" | + | | }, | + | | "userMetadata": {} | + | | } | + | | ] | + | Usage State | NOT_IN_USE | + | User Defined Data | {} | + | VNF Product Name | Sample VNF | + | VNF Provider | Company | + | VNF Software Version | 1.0 | + | VNFD ID | b1db0ce7-ebca-1fb7-95ed-4840d70a1163 | + | VNFD Version | 1.0 | + +----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + +Deploy Kubernetes Cluster +------------------------- + +1. Single Master Node +^^^^^^^^^^^^^^^^^^^^^ + +A single master Kubernetes cluster can be installed and set up in +"instantiate_end" operation, which allows you to execute any +scripts after its instantiation, and it's enabled with Mgmt Driver +support. The instantiated Kubernetes cluster only supports one +master node and multiple worker nodes. The instantiated Kubernetes +cluster will be automatically registered as VIM. Then you can use +the VIM to deploy CNF. + +If you want to deploy a single master Kubernetes cluster, you can +use VNF Package with 'simple' flavour created in +``Create and Upload VNF Package``. +The most important thing is that you must create the parameter file which +is used to instantiate correctly. The following are the methods of creating +the parameter file and cli commands of OpenStack. + +1. Create the Parameter File +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Create a ``simple_kubernetes_param_file.json`` file with the following format. +This is the file that defines the parameters for an instantiate request. +These parameters will be set in the body of the instantiate request. + +Required parameter: + +* flavourId +* additionalParams + +.. note:: + [This is UserData specific part] + additionalParams is a parameter that can be described by KeyValuePairs. + By setting the following two parameters in this parameter, + instantiate using LCM operation user data becomes possible. + For file_name.py and class_name, set the file name and class name + described in Prerequisites. + + * lcm-operation-user-data: ./UserData/file_name.py + * lcm-operation-user-data-class: class_name + +Optional parameters: + +* instantiationLevelId +* extVirtualLinks +* extManagedVirtualLinks +* vimConnectionInfo + +In this guide, the VMs need to have extCPs to be accessed via SSH by Tacker. +Therefore, ``extVirtualLinks`` parameter is required. You can skip +``vimConnectionInfo`` only when you have the default VIM described in +`cli-legacy-vim`_. + +**Explanation of the parameters for deploying a Kubernetes cluster** + +For deploying Kubernetes cluster, you must set the +``k8s_cluster_installation_param`` key in additionalParams. +The KeyValuePairs is shown in table below: + +.. code-block:: + + ## List of additionalParams.k8s_cluster_installation_param(specified by user) + +------------------+-----------+---------------------------------------------+-------------------+ + | parameter | data type | description | required/optional | + +------------------+-----------+---------------------------------------------+-------------------+ + | script_path | String | The path where the Kubernetes installation | required | + | | | script stored in the VNF Package | | + +------------------+-----------+---------------------------------------------+-------------------+ + | vim_name | String | The vim name of deployed Kubernetes cluster | optional | + | | | registered by tacker | | + +------------------+-----------+---------------------------------------------+-------------------+ + | master_node | dict | Information for the VM of the master node | required | + | | | group | | + +------------------+-----------+---------------------------------------------+-------------------+ + | worker_node | dict | Information for the VM of the worker node | required | + | | | group | | + +------------------+-----------+---------------------------------------------+-------------------+ + | proxy | dict | Information for proxy setting on VM | optional | + +------------------+-----------+---------------------------------------------+-------------------+ + + ## master_node dict + +------------------+-----------+---------------------------------------------+-------------------+ + | parameter | data type | description | required/optional | + +------------------+-----------+---------------------------------------------+-------------------+ + | aspect_id | String | The resource name of the master node group, | optional | + | | | and is same as the `aspect` in `vnfd`. If | | + | | | you use user data, it must be set | | + +------------------+-----------+---------------------------------------------+-------------------+ + | ssh_cp_name | String | Resource name of port corresponding to the | required | + | | | master node's ssh ip | | + +------------------+-----------+---------------------------------------------+-------------------+ + | nic_cp_name | String | Resource name of port corresponding to the | required | + | | | master node's nic ip(which used for | | + | | | deploying Kubernetes cluster) | | + +------------------+-----------+---------------------------------------------+-------------------+ + | username | String | Username for VM access | required | + +------------------+-----------+---------------------------------------------+-------------------+ + | password | String | Password for VM access | required | + +------------------+-----------+---------------------------------------------+-------------------+ + | pod_cidr | String | CIDR for pod | optional | + +------------------+-----------+---------------------------------------------+-------------------+ + | cluster_cidr | String | CIDR for service | optional | + +------------------+-----------+---------------------------------------------+-------------------+ + | cluster_cp_name | String | Resource name of the Port corresponding to | required | + | | | cluster ip | | + +------------------+-----------+---------------------------------------------+-------------------+ + | cluster_fip_name | String | Resource name of the Port corresponding to | optional | + | | | cluster ip used for reigstering vim. If you | | + | | | use floating ip as ssh ip, it must be set | | + +------------------+-----------+---------------------------------------------+-------------------+ + + ## worker_node dict + +------------------+-----------+---------------------------------------------+-------------------+ + | parameter | data type | description | required/optional | + +------------------+-----------+---------------------------------------------+-------------------+ + | aspect_id | String | The resource name of the worker node group, | optional | + | | | and is same as the `aspect` in `vnfd`. If | | + | | | you use user data, it must be set | | + +------------------+-----------+---------------------------------------------+-------------------+ + | ssh_cp_name | String | Resource name of port corresponding to the | required | + | | | worker node's ssh ip | | + +------------------+-----------+---------------------------------------------+-------------------+ + | nic_cp_name | String | Resource name of port corresponding to the | required | + | | | worker node's nic ip(which used for | | + | | | deploying Kubernetes cluster) | | + +------------------+-----------+---------------------------------------------+-------------------+ + | username | String | Username for VM access | required | + +------------------+-----------+---------------------------------------------+-------------------+ + | password | String | Password for VM access | required | + +------------------+-----------+---------------------------------------------+-------------------+ + + ## proxy dict + +------------------+-----------+---------------------------------------------+-------------------+ + | parameter | data type | description | required/optional | + +------------------+-----------+---------------------------------------------+-------------------+ + | http_proxy | string | Http proxy server address | optional | + +------------------+-----------+---------------------------------------------+-------------------+ + | https_proxy | string | Https proxy server address | optional | + +------------------+-----------+---------------------------------------------+-------------------+ + | no_proxy | string | User-customized, proxy server-free IP | optional | + | | | address or segment | | + +------------------+-----------+---------------------------------------------+-------------------+ + | k8s_node_cidr | string | CIDR for Kubernetes node, all its ip will be| optional | + | | | set into no_proxy | | + +------------------+-----------+---------------------------------------------+-------------------+ + +simple_kubernetes_param_file.json + +.. code-block:: + + + { + "flavourId": "simple", + "vimConnectionInfo": [{ + "id": "3cc2c4ff-525c-48b4-94c9-29247223322f", + "vimId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", #Set the uuid of the VIM to use + "vimType": "openstack" + }], + "additionalParams": { + "k8s_cluster_installation_param": { + "script_path": "Scripts/install_k8s_cluster.sh", + "vim_name": "kubernetes_vim", + "master_node": { + "aspect_id": "master_instance", + "ssh_cp_name": "masterNode_CP1", + "nic_cp_name": "masterNode_CP1", + "username": "ubuntu", + "password": "ubuntu", + "pod_cidr": "192.168.3.0/16", + "cluster_cidr": "10.199.187.0/24", + "cluster_cp_name": "masterNode_CP1" + }, + "worker_node": { + "aspect_id": "worker_instance", + "ssh_cp_name": "workerNode_CP2", + "nic_cp_name": "workerNode_CP2", + "username": "ubuntu", + "password": "ubuntu" + }, + "proxy": { + "http_proxy": "http://user1:password1@host1:port1", + "https_proxy": "https://user2:password2@host2:port2", + "no_proxy": "192.168.246.0/24,10.0.0.1", + "k8s_node_cidr": "10.10.0.0/24" + } + }, + "lcm-operation-user-data": "./UserData/k8s_cluster_user_data.py", + "lcm-operation-user-data-class": "KubernetesClusterUserData" + }, + "extVirtualLinks": [{ + "id": "net0_master", + "resourceId": "71a3fbd1-f31e-4c2c-b0e2-26267d64a9ee", #Set the uuid of the network to use + "extCps": [{ + "cpdId": "masterNode_CP1", + "cpConfig": [{ + "cpProtocolData": [{ + "layerProtocol": "IP_OVER_ETHERNET" + }] + }] + }] + }, { + "id": "net0_worker", + "resourceId": "71a3fbd1-f31e-4c2c-b0e2-26267d64a9ee", #Set the uuid of the network to use + "extCps": [{ + "cpdId": "workerNode_CP2", + "cpConfig": [{ + "cpProtocolData": [{ + "layerProtocol": "IP_OVER_ETHERNET" + }] + }] + }] + }] + } + + +2. Execute the Instantiation Operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Execute the following CLI command to instantiate the VNF instance. + +.. code-block:: console + + $ openstack vnflcm create b1db0ce7-ebca-1fb7-95ed-4840d70a1163 + +--------------------------+---------------------------------------------------------------------------------------------+ + | Field | Value | + +--------------------------+---------------------------------------------------------------------------------------------+ + | ID | 3f32428d-e8ce-4d6a-9be9-4c7f3a02ac72 | + | Instantiation State | NOT_INSTANTIATED | + | Links | { | + | | "self": { | + | | "href": "/vnflcm/v1/vnf_instances/3f32428d-e8ce-4d6a-9be9-4c7f3a02ac72" | + | | }, | + | | "instantiate": { | + | | "href": "/vnflcm/v1/vnf_instances/3f32428d-e8ce-4d6a-9be9-4c7f3a02ac72/instantiate" | + | | } | + | | } | + | VNF Instance Description | None | + | VNF Instance Name | vnf-3f32428d-e8ce-4d6a-9be9-4c7f3a02ac72 | + | VNF Product Name | Sample VNF | + | VNF Provider | Company | + | VNF Software Version | 1.0 | + | VNFD ID | b1db0ce7-ebca-1fb7-95ed-4840d70a1163 | + | VNFD Version | 1.0 | + | vnfPkgId | | + +--------------------------+---------------------------------------------------------------------------------------------+ + $ openstack vnflcm instantiate 3f32428d-e8ce-4d6a-9be9-4c7f3a02ac72 ./simple_kubernetes_param_file.json + Instantiate request for VNF Instance 3f32428d-e8ce-4d6a-9be9-4c7f3a02ac72 has been accepted. + $ openstack vnflcm show 3f32428d-e8ce-4d6a-9be9-4c7f3a02ac72 + +--------------------------+-------------------------------------------------------------------------------------------+ + | Field | Value | + +--------------------------+-------------------------------------------------------------------------------------------+ + | ID | 3f32428d-e8ce-4d6a-9be9-4c7f3a02ac72 | + | Instantiated Vnf Info | { | + | | "flavourId": "simple", | + | | "vnfState": "STARTED", | + | | "scaleStatus": [ | + | | { | + | | "aspectId": "master_instance", | + | | "scaleLevel": 0 | + | | }, | + | | { | + | | "aspectId": "worker_instance", | + | | "scaleLevel": 0 | + | | } | + | | ], | + | | "extCpInfo": [ | + | | { | + | | "id": "d6ed7fd0-c26e-4e1e-81ab-71dc8c6d8293", | + | | "cpdId": "masterNode_CP1", | + | | "extLinkPortId": null, | + | | "associatedVnfcCpId": "1f830544-57ef-4f93-bdb5-b59e465f58d8", | + | | "cpProtocolInfo": [ | + | | { | + | | "layerProtocol": "IP_OVER_ETHERNET" | + | | } | + | | ] | + | | }, | + | | { | + | | "id": "ba0f7de5-32b3-48dd-944d-341990ede0cb", | + | | "cpdId": "workerNode_CP2", | + | | "extLinkPortId": null, | + | | "associatedVnfcCpId": "9244012d-ad53-4685-912b-f6413ae38493", | + | | "cpProtocolInfo": [ | + | | { | + | | "layerProtocol": "IP_OVER_ETHERNET" | + | | } | + | | ] | + | | } | + | | ], | + | | "extVirtualLinkInfo": [ | + | | { | + | | "id": "b396126a-6a95-4a24-94ae-67b58f5bd9c2", | + | | "resourceHandle": { | + | | "vimConnectionId": null, | + | | "resourceId": "71a3fbd1-f31e-4c2c-b0e2-26267d64a9ee", | + | | "vimLevelResourceType": null | + | | } | + | | }, | + | | { | + | | "id": "10dfbb44-a8ff-435b-98f8-70539e71af8c", | + | | "resourceHandle": { | + | | "vimConnectionId": null, | + | | "resourceId": "71a3fbd1-f31e-4c2c-b0e2-26267d64a9ee", | + | | "vimLevelResourceType": null | + | | } | + | | } | + | | ], | + | | "vnfcResourceInfo": [ | + | | { | + | | "id": "1f830544-57ef-4f93-bdb5-b59e465f58d8", | + | | "vduId": "masterNode", | + | | "computeResource": { | + | | "vimConnectionId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "resourceId": "a0eccaee-ff7b-4c70-8c11-ba79c8d4deb6", | + | | "vimLevelResourceType": "OS::Nova::Server" | + | | }, | + | | "storageResourceIds": [], | + | | "vnfcCpInfo": [ | + | | { | + | | "id": "9fe655ab-1d35-4d22-a6f3-9a07fa797884", | + | | "cpdId": "masterNode_CP1", | + | | "vnfExtCpId": null, | + | | "vnfLinkPortId": "e66a44a4-965f-49dd-b168-ff4cc2485c34", | + | | "cpProtocolInfo": [ | + | | { | + | | "layerProtocol": "IP_OVER_ETHERNET" | + | | } | + | | ] | + | | } | + | | ] | + | | }, | + | | { | + | | "id": "9244012d-ad53-4685-912b-f6413ae38493", | + | | "vduId": "workerNode", | + | | "computeResource": { | + | | "vimConnectionId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "resourceId": "5b3ff765-7a9f-447a-a06d-444e963b74c9", | + | | "vimLevelResourceType": "OS::Nova::Server" | + | | }, | + | | "storageResourceIds": [], | + | | "vnfcCpInfo": [ | + | | { | + | | "id": "59176610-fc1c-4abe-9648-87a9b8b79640", | + | | "cpdId": "workerNode_CP2", | + | | "vnfExtCpId": null, | + | | "vnfLinkPortId": "977b8775-350d-4ef0-95e5-552c4c4099f3", | + | | "cpProtocolInfo": [ | + | | { | + | | "layerProtocol": "IP_OVER_ETHERNET" | + | | } | + | | ] | + | | } | + | | ] | + | | }, | + | | { | + | | "id": "974a4b98-5d07-44d4-9e13-a8ed21805111", | + | | "vduId": "workerNode", | + | | "computeResource": { | + | | "vimConnectionId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "resourceId": "63402e5a-67c9-4f5c-b03f-b21f4a88507f", | + | | "vimLevelResourceType": "OS::Nova::Server" | + | | }, | + | | "storageResourceIds": [], | + | | "vnfcCpInfo": [ | + | | { | + | | "id": "523b1328-9704-4ac1-986f-99c9b46ee1c4", | + | | "cpdId": "workerNode_CP2", | + | | "vnfExtCpId": null, | + | | "vnfLinkPortId": "eba708c4-14de-4d96-bc82-ed0abd95780b", | + | | "cpProtocolInfo": [ | + | | { | + | | "layerProtocol": "IP_OVER_ETHERNET" | + | | } | + | | ] | + | | } | + | | ] | + | | } | + | | ], | + | | "vnfVirtualLinkResourceInfo": [ | + | | { | + | | "id": "96d15ae5-a1d8-4867-aaee-a4372de8bc0e", | + | | "vnfVirtualLinkDescId": "b396126a-6a95-4a24-94ae-67b58f5bd9c2", | + | | "networkResource": { | + | | "vimConnectionId": null, | + | | "resourceId": "71a3fbd1-f31e-4c2c-b0e2-26267d64a9ee", | + | | "vimLevelResourceType": "OS::Neutron::Net" | + | | }, | + | | "vnfLinkPorts": [ | + | | { | + | | "id": "e66a44a4-965f-49dd-b168-ff4cc2485c34", | + | | "resourceHandle": { | + | | "vimConnectionId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "resourceId": "b5ed388b-de4e-4de8-a24a-f1b70c5cce94", | + | | "vimLevelResourceType": "OS::Neutron::Port" | + | | }, | + | | "cpInstanceId": "9fe655ab-1d35-4d22-a6f3-9a07fa797884" | + | | } | + | | ] | + | | }, | + | | { | + | | "id": "c67b6f41-fd7a-45b2-b69a-8de9623dc16b", | + | | "vnfVirtualLinkDescId": "10dfbb44-a8ff-435b-98f8-70539e71af8c", | + | | "networkResource": { | + | | "vimConnectionId": null, | + | | "resourceId": "71a3fbd1-f31e-4c2c-b0e2-26267d64a9ee", | + | | "vimLevelResourceType": "OS::Neutron::Net" | + | | }, | + | | "vnfLinkPorts": [ | + | | { | + | | "id": "977b8775-350d-4ef0-95e5-552c4c4099f3", | + | | "resourceHandle": { | + | | "vimConnectionId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "resourceId": "0002bba0-608b-4e2c-bd4d-23f1717f017c", | + | | "vimLevelResourceType": "OS::Neutron::Port" | + | | }, | + | | "cpInstanceId": "59176610-fc1c-4abe-9648-87a9b8b79640" | + | | }, | + | | { | + | | "id": "eba708c4-14de-4d96-bc82-ed0abd95780b", | + | | "resourceHandle": { | + | | "vimConnectionId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "resourceId": "facc9eae-6f2d-4cfb-89c2-27841eea771c", | + | | "vimLevelResourceType": "OS::Neutron::Port" | + | | }, | + | | "cpInstanceId": "523b1328-9704-4ac1-986f-99c9b46ee1c4" | + | | } | + | | ] | + | | } | + | | ], | + | | "vnfcInfo": [ | + | | { | + | | "id": "1405984c-b174-4f33-8cfa-851d54ab95ce", | + | | "vduId": "masterNode", | + | | "vnfcState": "STARTED" | + | | }, | + | | { | + | | "id": "08b3f00e-a133-4262-8edb-03e2484ce870", | + | | "vduId": "workerNode", | + | | "vnfcState": "STARTED" | + | | }, | + | | { | + | | "id": "027502d6-d072-4819-a502-cb7cc688ec16", | + | | "vduId": "workerNode", | + | | "vnfcState": "STARTED" | + | | } | + | | ], | + | | "additionalParams": { | + | | "lcm-operation-user-data": "./UserData/k8s_cluster_user_data.py", | + | | "lcm-operation-user-data-class": "KubernetesClusterUserData", | + | | "k8sClusterInstallationParam": { | + | | "vimName": "kubernetes_vim", | + | | "proxy": { | + | | "noProxy": "192.168.246.0/24,10.0.0.1", | + | | "httpProxy": "http://user1:password1@host1:port1", | + | | "httpsProxy": "https://user2:password2@host2:port2", | + | | "k8sNodeCidr": "10.10.0.0/24" | + | | }, | + | | "masterNode": { | + | | "password": "ubuntu", | + | | "podCidr": "192.168.3.0/16", | + | | "username": "ubuntu", | + | | "aspectId": "master_instance", | + | | "nicCpName": "masterNode_CP1", | + | | "sshCpName": "masterNode_CP1", | + | | "clusterCidr": "10.199.187.0/24", | + | | "clusterCpName": "masterNode_CP1" | + | | }, | + | | "scriptPath": "Scripts/install_k8s_cluster.sh", | + | | "workerNode": { | + | | "password": "ubuntu", | + | | "username": "ubuntu", | + | | "aspectId": "worker_instance", | + | | "nicCpName": "workerNode_CP2", | + | | "sshCpName": "workerNode_CP2" | + | | } | + | | } | + | | } | + | | } | + | Instantiation State | INSTANTIATED | + | Links | { | + | | "self": { | + | | "href": "/vnflcm/v1/vnf_instances/3f32428d-e8ce-4d6a-9be9-4c7f3a02ac72" | + | | }, | + | | "terminate": { | + | | "href": "/vnflcm/v1/vnf_instances/3f32428d-e8ce-4d6a-9be9-4c7f3a02ac72/terminate" | + | | }, | + | | "heal": { | + | | "href": "/vnflcm/v1/vnf_instances/3f32428d-e8ce-4d6a-9be9-4c7f3a02ac72/heal" | + | | } | + | | } | + | VIM Connection Info | [ | + | | { | + | | "id": "9ab53adf-ca70-47b2-8877-1858cfb53618", | + | | "vimId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "vimType": "openstack", | + | | "interfaceInfo": {}, | + | | "accessInfo": {} | + | | }, | + | | { | + | | "id": "ef2c6b0c-c930-4d6c-9fe4-7c143e80ad94", | + | | "vimId": "2aeef9af-6a5b-4122-8510-21dbc71bc7cb", | + | | "vimType": "kubernetes", | + | | "interfaceInfo": null, | + | | "accessInfo": { | + | | "authUrl": "https://10.10.0.35:6443" | + | | } | + | | } | + | | ] | + | VNF Instance Description | None | + | VNF Instance Name | vnf-3f32428d-e8ce-4d6a-9be9-4c7f3a02ac72 | + | VNF Product Name | Sample VNF | + | VNF Provider | Company | + | VNF Software Version | 1.0 | + | VNFD ID | b1db0ce7-ebca-1fb7-95ed-4840d70a1163 | + | VNFD Version | 1.0 | + | vnfPkgId | | + +--------------------------+-------------------------------------------------------------------------------------------+ + +2. Multi-master Nodes +^^^^^^^^^^^^^^^^^^^^^ + +When you install the Kubernetes cluster in an HA configuration, +at least three Master nodes are configured in the Kubernetes cluster. +On each Master node, a load balancer (HAProxy) and etcd will be built. +Those described above are performed by "instantiate_end" operation with Mgmt Driver. + +If you want to deploy a multi-master Kubernetes cluster, you can +use VNF Package with ``complex`` flavour created in +``Create and Upload VNF Package``. +The following are the methods of creating +the parameter file and cli commands of OpenStack. + +1. Create the Parameter File +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The parameters in parameter file are the same as those in +``1. Single master node``. It should be noted that +since you need to create a group (at least three) master nodes, you +must set the ``aspect_id``. At the same time, HA cluster needs a representative +IP to access, so the ``cluster_cp_name`` must be set to the port name of the +virtual ip created in BaseHot. In this guide, +``cluster_cp_name`` is ``vip_CP``. The ``complex_kubernetes_param_file.json`` +is shown below. + +complex_kubernetes_param_file.json + +.. code-block:: + + + { + "flavourId": "complex", + "vimConnectionInfo": [{ + "id": "3cc2c4ff-525c-48b4-94c9-29247223322f", + "vimId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", #Set the uuid of the VIM to use + "vimType": "openstack" + }], + "additionalParams": { + "k8s_cluster_installation_param": { + "script_path": "Scripts/install_k8s_cluster.sh", + "vim_name": "kubernetes_vim_complex", + "master_node": { + "aspect_id": "master_instance", + "ssh_cp_name": "masterNode_CP1", + "nic_cp_name": "masterNode_CP1", + "username": "ubuntu", + "password": "ubuntu", + "pod_cidr": "192.168.3.0/16", + "cluster_cidr": "10.199.187.0/24", + "cluster_cp_name": "vip_CP" + }, + "worker_node": { + "aspect_id": "worker_instance", + "ssh_cp_name": "workerNode_CP2", + "nic_cp_name": "workerNode_CP2", + "username": "ubuntu", + "password": "ubuntu" + }, + "proxy": { + "http_proxy": "http://user1:password1@host1:port1", + "https_proxy": "https://user2:password2@host2:port2", + "no_proxy": "192.168.246.0/24,10.0.0.1", + "k8s_node_cidr": "10.10.0.0/24" + } + }, + "lcm-operation-user-data": "./UserData/k8s_cluster_user_data.py", + "lcm-operation-user-data-class": "KubernetesClusterUserData" + }, + "extVirtualLinks": [{ + "id": "net0_master", + "resourceId": "71a3fbd1-f31e-4c2c-b0e2-26267d64a9ee", #Set the uuid of the network to use + "extCps": [{ + "cpdId": "masterNode_CP1", + "cpConfig": [{ + "cpProtocolData": [{ + "layerProtocol": "IP_OVER_ETHERNET" + }] + }] + }] + }, { + "id": "net0_worker", + "resourceId": "71a3fbd1-f31e-4c2c-b0e2-26267d64a9ee", #Set the uuid of the network to use + "extCps": [{ + "cpdId": "workerNode_CP2", + "cpConfig": [{ + "cpProtocolData": [{ + "layerProtocol": "IP_OVER_ETHERNET" + }] + }] + }] + }] + } + +2. Execute the Instantiation Operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The VNF Package has been uploaded in +``Create and Upload VNF Package``. +So you just execute the following cli command on OpenStack Controller Node. + +.. code-block:: console + + $ openstack vnflcm create b1db0ce7-ebca-1fb7-95ed-4840d70a1163 + +--------------------------+---------------------------------------------------------------------------------------------+ + | Field | Value | + +--------------------------+---------------------------------------------------------------------------------------------+ + | ID | c5215213-af4b-4080-95ab-377920474e1a | + | Instantiation State | NOT_INSTANTIATED | + | Links | { | + | | "self": { | + | | "href": "/vnflcm/v1/vnf_instances/c5215213-af4b-4080-95ab-377920474e1a" | + | | }, | + | | "instantiate": { | + | | "href": "/vnflcm/v1/vnf_instances/c5215213-af4b-4080-95ab-377920474e1a/instantiate" | + | | } | + | | } | + | VNF Instance Description | None | + | VNF Instance Name | vnf-c5215213-af4b-4080-95ab-377920474e1a | + | VNF Product Name | Sample VNF | + | VNF Provider | Company | + | VNF Software Version | 1.0 | + | VNFD ID | b1db0ce7-ebca-1fb7-95ed-4840d70a1163 | + | VNFD Version | 1.0 | + | vnfPkgId | | + +--------------------------+---------------------------------------------------------------------------------------------+ + + $ openstack vnflcm instantiate c5215213-af4b-4080-95ab-377920474e1a ./complex_kubernetes_param_file.json + Instantiate request for VNF Instance c5215213-af4b-4080-95ab-377920474e1a has been accepted. + $ openstack vnflcm show c5215213-af4b-4080-95ab-377920474e1a + +--------------------------+-------------------------------------------------------------------------------------------+ + | Field | Value | + +--------------------------+-------------------------------------------------------------------------------------------+ + | ID | c5215213-af4b-4080-95ab-377920474e1a | + | Instantiated Vnf Info | { | + | | "flavourId": "complex", | + | | "vnfState": "STARTED", | + | | "scaleStatus": [ | + | | { | + | | "aspectId": "master_instance", | + | | "scaleLevel": 0 | + | | }, | + | | { | + | | "aspectId": "worker_instance", | + | | "scaleLevel": 0 | + | | } | + | | ], | + | | "extCpInfo": [ | + | | { | + | | "id": "a36f667a-f0f8-4ac8-a120-b19569d7bd72", | + | | "cpdId": "masterNode_CP1", | + | | "extLinkPortId": null, | + | | "associatedVnfcCpId": "bbce9656-f051-434f-8c4a-660ac23e91f6", | + | | "cpProtocolInfo": [ | + | | { | + | | "layerProtocol": "IP_OVER_ETHERNET" | + | | } | + | | ] | + | | }, | + | | { | + | | "id": "67f38bd4-ae0b-4257-82eb-09a3c2dfd470", | + | | "cpdId": "workerNode_CP2", | + | | "extLinkPortId": null, | + | | "associatedVnfcCpId": "b4af0652-74b8-47bd-bcf6-94769bdbf756", | + | | "cpProtocolInfo": [ | + | | { | + | | "layerProtocol": "IP_OVER_ETHERNET" | + | | } | + | | ] | + | | } | + | | ], | + | | "extVirtualLinkInfo": [ | + | | { | + | | "id": "24e3e9ae-0df4-49d6-9ee4-e21dfe359baf", | + | | "resourceHandle": { | + | | "vimConnectionId": null, | + | | "resourceId": "71a3fbd1-f31e-4c2c-b0e2-26267d64a9ee", | + | | "vimLevelResourceType": null | + | | } | + | | }, | + | | { | + | | "id": "2283b96d-64f8-4403-9b21-643aa1058e86", | + | | "resourceHandle": { | + | | "vimConnectionId": null, | + | | "resourceId": "71a3fbd1-f31e-4c2c-b0e2-26267d64a9ee", | + | | "vimLevelResourceType": null | + | | } | + | | } | + | | ], | + | | "vnfcResourceInfo": [ | + | | { | + | | "id": "bbce9656-f051-434f-8c4a-660ac23e91f6", | + | | "vduId": "masterNode", | + | | "computeResource": { | + | | "vimConnectionId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "resourceId": "a0eccaee-ff7b-4c70-8c11-ba79c8d4deb6", | + | | "vimLevelResourceType": "OS::Nova::Server" | + | | }, | + | | "storageResourceIds": [], | + | | "vnfcCpInfo": [ | + | | { | + | | "id": "9fe655ab-1d35-4d22-a6f3-9a07fa797884", | + | | "cpdId": "masterNode_CP1", | + | | "vnfExtCpId": null, | + | | "vnfLinkPortId": "e66a44a4-965f-49dd-b168-ff4cc2485c34", | + | | "cpProtocolInfo": [ | + | | { | + | | "layerProtocol": "IP_OVER_ETHERNET" | + | | } | + | | ] | + | | } | + | | ] | + | | }, | + | | { | + | | "id": "8bee8301-eb14-4c5c-bab8-a1b244d4d954", | + | | "vduId": "masterNode", | + | | "computeResource": { | + | | "vimConnectionId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "resourceId": "4a40d65c-3440-4c44-858a-72a66324a11a", | + | | "vimLevelResourceType": "OS::Nova::Server" | + | | }, | + | | "storageResourceIds": [], | + | | "vnfcCpInfo": [ | + | | { | + | | "id": "65c9f35a-08a2-4875-bd85-af419f26b19d", | + | | "cpdId": "masterNode_CP1", | + | | "vnfExtCpId": null, | + | | "vnfLinkPortId": "26fa4b33-ad07-4982-ad97-18b66abba541", | + | | "cpProtocolInfo": [ | + | | { | + | | "layerProtocol": "IP_OVER_ETHERNET" | + | | } | + | | ] | + | | } | + | | ] | + | | }, | + | | { | + | | "id": "28ac0cb9-3bc1-4bc2-8be2-cf60f51b7b7a", | + | | "vduId": "masterNode", | + | | "computeResource": { | + | | "vimConnectionId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "resourceId": "12708197-9724-41b8-b48c-9eb6862331dc", | + | | "vimLevelResourceType": "OS::Nova::Server" | + | | }, | + | | "storageResourceIds": [], | + | | "vnfcCpInfo": [ | + | | { | + | | "id": "d51f3b54-a9ed-46be-8ffe-64b5d07d1a7b", | + | | "cpdId": "masterNode_CP1", | + | | "vnfExtCpId": null, | + | | "vnfLinkPortId": "b71dc885-8e3e-4ccd-ac6f-feff332fd395", | + | | "cpProtocolInfo": [ | + | | { | + | | "layerProtocol": "IP_OVER_ETHERNET" | + | | } | + | | ] | + | | } | + | | ] | + | | }, | + | | { | + | | "id": "b4af0652-74b8-47bd-bcf6-94769bdbf756", | + | | "vduId": "workerNode", | + | | "computeResource": { | + | | "vimConnectionId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "resourceId": "5b3ff765-7a9f-447a-a06d-444e963b74c9", | + | | "vimLevelResourceType": "OS::Nova::Server" | + | | }, | + | | "storageResourceIds": [], | + | | "vnfcCpInfo": [ | + | | { | + | | "id": "59176610-fc1c-4abe-9648-87a9b8b79640", | + | | "cpdId": "workerNode_CP2", | + | | "vnfExtCpId": null, | + | | "vnfLinkPortId": "977b8775-350d-4ef0-95e5-552c4c4099f3", | + | | "cpProtocolInfo": [ | + | | { | + | | "layerProtocol": "IP_OVER_ETHERNET" | + | | } | + | | ] | + | | } | + | | ] | + | | }, | + | | { | + | | "id": "974a4b98-5d07-44d4-9e13-a8ed21805111", | + | | "vduId": "workerNode", | + | | "computeResource": { | + | | "vimConnectionId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "resourceId": "63402e5a-67c9-4f5c-b03f-b21f4a88507f", | + | | "vimLevelResourceType": "OS::Nova::Server" | + | | }, | + | | "storageResourceIds": [], | + | | "vnfcCpInfo": [ | + | | { | + | | "id": "523b1328-9704-4ac1-986f-99c9b46ee1c4", | + | | "cpdId": "workerNode_CP2", | + | | "vnfExtCpId": null, | + | | "vnfLinkPortId": "eba708c4-14de-4d96-bc82-ed0abd95780b", | + | | "cpProtocolInfo": [ | + | | { | + | | "layerProtocol": "IP_OVER_ETHERNET" | + | | } | + | | ] | + | | } | + | | ] | + | | } | + | | ], | + | | "vnfVirtualLinkResourceInfo": [ | + | | { | + | | "id": "96d15ae5-a1d8-4867-aaee-a4372de8bc0e", | + | | "vnfVirtualLinkDescId": "24e3e9ae-0df4-49d6-9ee4-e21dfe359baf", | + | | "networkResource": { | + | | "vimConnectionId": null, | + | | "resourceId": "71a3fbd1-f31e-4c2c-b0e2-26267d64a9ee", | + | | "vimLevelResourceType": "OS::Neutron::Net" | + | | }, | + | | "vnfLinkPorts": [ | + | | { | + | | "id": "e66a44a4-965f-49dd-b168-ff4cc2485c34", | + | | "resourceHandle": { | + | | "vimConnectionId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "resourceId": "b5ed388b-de4e-4de8-a24a-f1b70c5cce94", | + | | "vimLevelResourceType": "OS::Neutron::Port" | + | | }, | + | | "cpInstanceId": "9fe655ab-1d35-4d22-a6f3-9a07fa797884" | + | | }, | + | | { | + | | "id": "26fa4b33-ad07-4982-ad97-18b66abba541", | + | | "resourceHandle": { | + | | "vimConnectionId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "resourceId": "dfab524f-dec9-4247-973c-a0e22475f950", | + | | "vimLevelResourceType": "OS::Neutron::Port" | + | | }, | + | | "cpInstanceId": "65c9f35a-08a2-4875-bd85-af419f26b19d" | + | | }, | + | | { | + | | "id": "b71dc885-8e3e-4ccd-ac6f-feff332fd395", | + | | "resourceHandle": { | + | | "vimConnectionId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "resourceId": "45733936-0a9e-4eaa-a71f-3a77cb034581", | + | | "vimLevelResourceType": "OS::Neutron::Port" | + | | }, | + | | "cpInstanceId": "d51f3b54-a9ed-46be-8ffe-64b5d07d1a7b" | + | | } | + | | ] | + | | }, | + | | { | + | | "id": "c67b6f41-fd7a-45b2-b69a-8de9623dc16b", | + | | "vnfVirtualLinkDescId": "2283b96d-64f8-4403-9b21-643aa1058e86", | + | | "networkResource": { | + | | "vimConnectionId": null, | + | | "resourceId": "71a3fbd1-f31e-4c2c-b0e2-26267d64a9ee", | + | | "vimLevelResourceType": "OS::Neutron::Net" | + | | }, | + | | "vnfLinkPorts": [ | + | | { | + | | "id": "977b8775-350d-4ef0-95e5-552c4c4099f3", | + | | "resourceHandle": { | + | | "vimConnectionId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "resourceId": "0002bba0-608b-4e2c-bd4d-23f1717f017c", | + | | "vimLevelResourceType": "OS::Neutron::Port" | + | | }, | + | | "cpInstanceId": "59176610-fc1c-4abe-9648-87a9b8b79640" | + | | }, | + | | { | + | | "id": "eba708c4-14de-4d96-bc82-ed0abd95780b", | + | | "resourceHandle": { | + | | "vimConnectionId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "resourceId": "facc9eae-6f2d-4cfb-89c2-27841eea771c", | + | | "vimLevelResourceType": "OS::Neutron::Port" | + | | }, | + | | "cpInstanceId": "523b1328-9704-4ac1-986f-99c9b46ee1c4" | + | | } | + | | ] | + | | } | + | | ], | + | | "vnfcInfo": [ | + | | { | + | | "id": "3ca607b9-f270-4077-8af8-d5d244f8893b", | + | | "vduId": "masterNode", | + | | "vnfcState": "STARTED" | + | | }, | + | | { | + | | "id": "c2b19ef1-f748-4175-9f3a-6792a9ee7a62", | + | | "vduId": "masterNode", | + | | "vnfcState": "STARTED" | + | | }, | + | | { | + | | "id": "59f5fd29-d20f-426f-a1a6-526757205cb4", | + | | "vduId": "masterNode", | + | | "vnfcState": "STARTED" | + | | }, | + | | { | + | | "id": "08b3f00e-a133-4262-8edb-03e2484ce870", | + | | "vduId": "workerNode", | + | | "vnfcState": "STARTED" | + | | }, | + | | { | + | | "id": "027502d6-d072-4819-a502-cb7cc688ec16", | + | | "vduId": "workerNode", | + | | "vnfcState": "STARTED" | + | | } | + | | ], | + | | "additionalParams": { | + | | "lcm-operation-user-data": "./UserData/k8s_cluster_user_data.py", | + | | "lcm-operation-user-data-class": "KubernetesClusterUserData", | + | | "k8sClusterInstallationParam": { | + | | "vimName": "kubernetes_vim_complex", | + | | "proxy": { | + | | "noProxy": "192.168.246.0/24,10.0.0.1", | + | | "httpProxy": "http://user1:password1@host1:port1", | + | | "httpsProxy": "https://user2:password2@host2:port2", | + | | "k8sNodeCidr": "10.10.0.0/24" | + | | }, | + | | "masterNode": { | + | | "password": "ubuntu", | + | | "podCidr": "192.168.3.0/16", | + | | "username": "ubuntu", | + | | "aspectId": "master_instance", | + | | "nicCpName": "masterNode_CP1", | + | | "sshCpName": "masterNode_CP1", | + | | "clusterCidr": "10.199.187.0/24", | + | | "clusterCpName": "vip_CP" | + | | }, | + | | "scriptPath": "Scripts/install_k8s_cluster.sh", | + | | "workerNode": { | + | | "password": "ubuntu", | + | | "username": "ubuntu", | + | | "aspectId": "worker_instance", | + | | "nicCpName": "workerNode_CP2", | + | | "sshCpName": "workerNode_CP2" | + | | } | + | | } | + | | } | + | | } | + | Instantiation State | INSTANTIATED | + | Links | { | + | | "self": { | + | | "href": "/vnflcm/v1/vnf_instances/c5215213-af4b-4080-95ab-377920474e1a" | + | | }, | + | | "terminate": { | + | | "href": "/vnflcm/v1/vnf_instances/c5215213-af4b-4080-95ab-377920474e1a/terminate" | + | | }, | + | | "heal": { | + | | "href": "/vnflcm/v1/vnf_instances/c5215213-af4b-4080-95ab-377920474e1a/heal" | + | | } | + | | } | + | VIM Connection Info | [ | + | | { | + | | "id": "9ab53adf-ca70-47b2-8877-1858cfb53618", | + | | "vimId": "05ef7ca5-7e32-4a6b-a03d-52f811f04496", | + | | "vimType": "openstack", | + | | "interfaceInfo": {}, | + | | "accessInfo": {} | + | | }, | + | | { | + | | "id": "2e56da35-f343-4f9e-8f04-7722f8edbe7a", | + | | "vimId": "3e04bb8e-2dbd-4c32-9575-d2937f3aa931", | + | | "vimType": "kubernetes", | + | | "interfaceInfo": null, | + | | "accessInfo": { | + | | "authUrl": "https://10.10.0.80:16443" | + | | } | + | | } | + | | ] | + | VNF Instance Description | None | + | VNF Instance Name | vnf-c5215213-af4b-4080-95ab-377920474e1a | + | VNF Product Name | Sample VNF | + | VNF Provider | Company | + | VNF Software Version | 1.0 | + | VNFD ID | b1db0ce7-ebca-1fb7-95ed-4840d70a1163 | + | VNFD Version | 1.0 | + | vnfPkgId | | + +--------------------------+-------------------------------------------------------------------------------------------+ + +Scale Kubernetes Worker Nodes +----------------------------- + +According to `NFV-SOL001 v2.6.1`_, `scale_start` and `scale_end` +operation allows users to execute any scripts in the scale +operation, and scaling operations on the worker nodes in +Kubernetes cluster is supported with Mgmt Driver. + +After instantiating a Kubernetes cluster, +if you want to delete one or more worker node in Kubernetes cluster, +you can execute `scale in` operation. If you want to add new worker +nodes in Kubernetes cluster, you can execute `scale out` operation. +The following are the methods of creating +the parameter file and cli commands of OpenStack. + +1. Create the Parameter File +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The following is scale parameter to "POST /vnf_instances/{id}/scale" as +``ScaleVnfRequest`` data type in ETSI `NFV-SOL003 v2.6.1`_: + +.. code-block:: + + +------------------+---------------------------------------------------------+ + | Attribute name | Parameter description | + +------------------+---------------------------------------------------------+ + | type | User specify scaling operation type: | + | | "SCALE_IN" or "SCALE_OUT" | + +------------------+---------------------------------------------------------+ + | aspectId | User specify target aspectId, aspectId is defined in | + | | above VNFD and user can know by | + | | ``InstantiatedVnfInfo.ScaleStatus`` that contained in | + | | the response of "GET /vnf_instances/{id}" | + +------------------+---------------------------------------------------------+ + | numberOfSteps | Number of scaling steps | + +------------------+---------------------------------------------------------+ + | additionalParams | Not needed | + +------------------+---------------------------------------------------------+ + +Following are two samples of scaling request body: + +.. code-block:: console + + { + "type": "SCALE_OUT", + "aspectId": "worker_instance", + "numberOfSteps": "1" + } + +.. code-block:: console + + { + "type": "SCALE_IN", + "aspectId": "worker_instance", + "numberOfSteps": "1" + } + +.. note:: + Only the worker node can be scaled out(in). The current function does + not support scale master node. + +2. Execute the Scale Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Before you execute `scale` command, you must ensure that your VNF instance +is already instantiated. +The VNF Package should be uploaded in ``Create and Upload VNF Package`` +and the Kubernetes cluster should be deployed with the process in +``Deploy Kubernetes Cluster``. + +When executing the scale operation of worker nodes, the following Heat API +is called from Tacker. + +* stack update + +The steps to confirm whether scaling is successful are shown below: + +1. Execute Heat CLI command and check the number of resource list in +'worker_instance' of the stack +before and after scaling. + +2. Login to master node of Kubernetes cluster and check the number of +worker nodes before and after scaling. + +To confirm the number of worker nodes after scaling, you can find the +increased or decreased number of stack resource with Heat CLI. Also +the number of registered worker nodes in the Kubernetes cluster +should be updated. +See `Heat CLI reference`_ for details on Heat CLI commands. + +Stack information before scaling: + +.. code-block:: console + + $ openstack stack resource list vnf-c5215213-af4b-4080-95ab-377920474e1a -n 2 --filter type=base_hot_nested_worker.yaml -c resource_name -c physical_resource_id -c resource_type -c resource_status + +---------------+--------------------------------------+-----------------------------+-----------------+ + | resource_name | physical_resource_id | resource_type | resource_status | + +---------------+--------------------------------------+-----------------------------+-----------------+ + | lwljovool2wg | 07b79bbe-d0b2-4df0-8775-6202142b6054 | base_hot_nested_worker.yaml | CREATE_COMPLETE | + | n6nnjta4f4rv | 56c9ec6f-5e52-44db-9d0d-57e3484e763f | base_hot_nested_worker.yaml | CREATE_COMPLETE | + +---------------+--------------------------------------+-----------------------------+-----------------+ + +worker node in Kubernetes cluster before scaling: + +.. code-block:: console + + $ ssh ubuntu@10.10.0.80 + $ kubectl get node + NAME STATUS ROLES AGE VERSION + master59 Ready control-plane,master 1h25m v1.20.4 + master78 Ready control-plane,master 1h1m v1.20.4 + master31 Ready control-plane,master 35m v1.20.4 + worker18 Ready 10m v1.20.4 + worker20 Ready 4m v1.20.4 + +Scaling out execution of the vnf_instance: + +.. code-block:: console + + $ openstack vnflcm scale --type "SCALE_OUT" --aspect-id worker_instance --number-of-steps 1 c5215213-af4b-4080-95ab-377920474e1a + Scale request for VNF Instance c5215213-af4b-4080-95ab-377920474e1a has been accepted. + +Stack information after scaling out: + +.. code-block:: console + + $ openstack stack resource list vnf-c5215213-af4b-4080-95ab-377920474e1a -n 2 --filter type=base_hot_nested_worker.yaml -c resource_name -c physical_resource_id -c resource_type -c resource_status + +---------------+--------------------------------------+-----------------------------+-----------------+ + | resource_name | physical_resource_id | resource_type | resource_status | + +---------------+--------------------------------------+-----------------------------+-----------------+ + | lwljovool2wg | 07b79bbe-d0b2-4df0-8775-6202142b6054 | base_hot_nested_worker.yaml | UPDATE_COMPLETE | + | n6nnjta4f4rv | 56c9ec6f-5e52-44db-9d0d-57e3484e763f | base_hot_nested_worker.yaml | UPDATE_COMPLETE | + | z5nky6qcodlq | f9ab73ff-3ad7-40d2-830a-87bd0c45af32 | base_hot_nested_worker.yaml | CREATE_COMPLETE | + +---------------+--------------------------------------+-----------------------------+-----------------+ + +worker node in Kubernetes cluster after scaling out: + +.. code-block:: console + + $ ssh ubuntu@10.10.0.80 + $ kubectl get node + NAME STATUS ROLES AGE VERSION + master59 Ready control-plane,master 1h35m v1.20.4 + master78 Ready control-plane,master 1h11m v1.20.4 + master31 Ready control-plane,master 45m v1.20.4 + worker18 Ready 20m v1.20.4 + worker20 Ready 14m v1.20.4 + worker45 Ready 4m v1.20.4 + +Scaling in execution of the vnf_instance: + +.. code-block:: console + + $ openstack vnflcm scale --type "SCALE_IN" --aspect-id worker_instance --number-of-steps 1 c5215213-af4b-4080-95ab-377920474e1a + Scale request for VNF Instance c5215213-af4b-4080-95ab-377920474e1a has been accepted. + +.. note:: + This example shows the output of "SCALE_IN" after its "SCALE_OUT" operation. + +Stack information after scaling in: + +.. code-block:: console + + $ openstack stack resource list vnf-c5215213-af4b-4080-95ab-377920474e1a -n 2 --filter type=base_hot_nested_worker.yaml -c resource_name -c physical_resource_id -c resource_type -c resource_status + +---------------+--------------------------------------+-----------------------------+-----------------+ + | resource_name | physical_resource_id | resource_type | resource_status | + +---------------+--------------------------------------+-----------------------------+-----------------+ + | n6nnjta4f4rv | 56c9ec6f-5e52-44db-9d0d-57e3484e763f | base_hot_nested_worker.yaml | UPDATE_COMPLETE | + | z5nky6qcodlq | f9ab73ff-3ad7-40d2-830a-87bd0c45af32 | base_hot_nested_worker.yaml | UPDATE_COMPLETE | + +---------------+--------------------------------------+-----------------------------+-----------------+ + +worker node in Kubernetes cluster after scaling in: + +.. code-block:: console + + $ ssh ubuntu@10.10.0.80 + $ kubectl get node + NAME STATUS ROLES AGE VERSION + master59 Ready control-plane,master 1h38m v1.20.4 + master78 Ready control-plane,master 1h14m v1.20.4 + master31 Ready control-plane,master 48m v1.20.4 + worker20 Ready 17m v1.20.4 + worker45 Ready 7m v1.20.4 + +Heal Kubernetes Master/Worker Nodes +----------------------------------- + +According to `NFV-SOL001 v2.6.1`_, `heal_start` and `heal_end` +operation allows users to execute any scripts in the heal +operation, and healing operations on the master nodes and +worker nodes in Kubernetes cluster is supported +with Mgmt Driver. + +After instantiating a Kubernetes cluster, +if one of your node in Kubernetes cluster is not running properly, +you can heal it. The healing of entire Kubernetes cluster is also +supported. The following are the methods of creating +the parameter file and cli commands of OpenStack. + +1. Create the Parameter File +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following is heal parameter to "POST /vnf_instances/{id}/heal" as +``HealVnfRequest`` data type. It is not the same in SOL002 and SOL003. + +In `NFV-SOL002 v2.6.1`_: + +.. code-block:: + + +------------------+---------------------------------------------------------+ + | Attribute name | Parameter description | + +------------------+---------------------------------------------------------+ + | vnfcInstanceId | User specify heal target, user can know "vnfcInstanceId"| + | | by ``InstantiatedVnfInfo.vnfcResourceInfo`` that | + | | contained in the response of "GET /vnf_instances/{id}". | + +------------------+---------------------------------------------------------+ + | cause | Not needed | + +------------------+---------------------------------------------------------+ + | additionalParams | Not needed | + +------------------+---------------------------------------------------------+ + | healScript | Not needed | + +------------------+---------------------------------------------------------+ + +In `NFV-SOL003 v2.6.1`_: + +.. code-block:: + + +------------------+---------------------------------------------------------+ + | Attribute name | Parameter description | + +------------------+---------------------------------------------------------+ + | cause | Not needed | + +------------------+---------------------------------------------------------+ + | additionalParams | Not needed | + +------------------+---------------------------------------------------------+ + + +``cause``, and ``additionalParams`` +are supported for both of SOL002 and SOL003. + +If the vnfcInstanceId parameter is null, this means that healing operation is +required for the entire Kubernetes cluster, which is the case in SOL003. + +Following is a sample of healing request body for SOL002: + + +.. code-block:: + + { + "vnfcInstanceId": "bbce9656-f051-434f-8c4a-660ac23e91f6" + } + +.. note:: + In chapter of ``Deploy Kubernetes cluster``, the result of VNF instance + instantiated has shown in CLI command `openstack vnflcm show VNF INSTANCE ID`. + + You can get the vnfcInstanceId from ``Instantiated Vnf Info`` in above result. + The ``vnfcResourceInfo.id`` is vnfcInstanceId. + + The ``physical_resource_id`` mentioned below is + the same as ``vnfcResourceInfo.computeResource.resourceId``. + +2. Execute the Heal Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +1. Heal a Master Node +~~~~~~~~~~~~~~~~~~~~~ + +When healing specified with VNFC instances, +Heat APIs are called from Tacker. + +* stack resource mark unhealthy +* stack update + +The steps to confirm whether healing is successful are shown below: + +1. Execute Heat CLI command and check physical_resource_id and +resource_status of master node before and after healing. + +2. Login to master node of Kubernetes cluster and check the age +of master node before and after healing. + +To confirm that healing the master node is successful, you can find +the physical_resource_id of this resource of +'master_instance resource list' has changed with Heat CLI. Also +the age of master node healed should be updated in Kubernetes cluster. + +.. note:: + Note that 'vnfc-instance-id' managed by Tacker and + 'physical-resource-id' managed by Heat are different. + +master node information before healing: + +.. code-block:: console + + $ openstack stack resource list vnf-c5215213-af4b-4080-95ab-377920474e1a -n 2 --filter type=OS::Nova::Server -c resource_name -c physical_resource_id -c resource_type -c resource_status + +---------------+--------------------------------------+------------------+-----------------+ + | resource_name | physical_resource_id | resource_type | resource_status | + +---------------+--------------------------------------+------------------+-----------------+ + | workerNode | 5b3ff765-7a9f-447a-a06d-444e963b74c9 | OS::Nova::Server | CREATE_COMPLETE | + | workerNode | 63402e5a-67c9-4f5c-b03f-b21f4a88507f | OS::Nova::Server | CREATE_COMPLETE | + | masterNode | a0eccaee-ff7b-4c70-8c11-ba79c8d4deb6 | OS::Nova::Server | CREATE_COMPLETE | + | masterNode | 4a40d65c-3440-4c44-858a-72a66324a11a | OS::Nova::Server | CREATE_COMPLETE | + | masterNode | 12708197-9724-41b8-b48c-9eb6862331dc | OS::Nova::Server | CREATE_COMPLETE | + +---------------+--------------------------------------+------------------+-----------------+ + +master node in Kubernetes cluster before healing: + +.. code-block:: console + + $ ssh ubuntu@10.10.0.80 + $ kubectl get node + NAME STATUS ROLES AGE VERSION + master59 Ready control-plane,master 1h38m v1.20.4 + master78 Ready control-plane,master 1h14m v1.20.4 + master31 Ready control-plane,master 48m v1.20.4 + worker20 Ready 17m v1.20.4 + worker45 Ready 7m v1.20.4 + +We heal the master node with ``physical_resource_id`` +``a0eccaee-ff7b-4c70-8c11-ba79c8d4deb6``, its ``vnfc_instance_id`` +is ``bbce9656-f051-434f-8c4a-660ac23e91f6``. + +Healing master node execution of the vnf_instance: + +.. code-block:: console + + $ openstack vnflcm heal c5215213-af4b-4080-95ab-377920474e1a --vnfc-instance bbce9656-f051-434f-8c4a-660ac23e91f6 + Heal request for VNF Instance 9e086f34-b3c9-4986-b5e5-609a5ac4c1f9 has been accepted. + +master node information after healing: + +.. code-block:: console + + $ openstack stack resource list vnf-c5215213-af4b-4080-95ab-377920474e1a -n 2 --filter type=OS::Nova::Server -c resource_name -c physical_resource_id -c resource_type -c resource_status + +---------------+--------------------------------------+------------------+-----------------+ + | resource_name | physical_resource_id | resource_type | resource_status | + +---------------+--------------------------------------+------------------+-----------------+ + | workerNode | 5b3ff765-7a9f-447a-a06d-444e963b74c9 | OS::Nova::Server | CREATE_COMPLETE | + | workerNode | 63402e5a-67c9-4f5c-b03f-b21f4a88507f | OS::Nova::Server | CREATE_COMPLETE | + | masterNode | aaecc9b4-8ce5-4f1c-a90b-3571fd4bfb5f | OS::Nova::Server | CREATE_COMPLETE | + | masterNode | 4a40d65c-3440-4c44-858a-72a66324a11a | OS::Nova::Server | CREATE_COMPLETE | + | masterNode | 12708197-9724-41b8-b48c-9eb6862331dc | OS::Nova::Server | CREATE_COMPLETE | + +---------------+--------------------------------------+------------------+-----------------+ + +master node in Kubernetes cluster after healing: + +.. code-block:: console + + $ ssh ubuntu@10.10.0.80 + $ kubectl get node + NAME STATUS ROLES AGE VERSION + master78 Ready control-plane,master 1h36m v1.20.4 + master31 Ready control-plane,master 1h10m v1.20.4 + worker20 Ready 39m v1.20.4 + worker45 Ready 29m v1.20.4 + master59 Ready control-plane,master 2m v1.20.4 + +2. Heal a Worker Node +~~~~~~~~~~~~~~~~~~~~~ + +Healing a worker node is the same as Healing a master node. +You just replace the vnfc_instance_id in healing command. + +worker node information before healing: + +.. code-block:: console + + $ openstack stack resource list vnf-c5215213-af4b-4080-95ab-377920474e1a -n 2 --filter type=OS::Nova::Server -c resource_name -c physical_resource_id -c resource_type -c resource_status + +---------------+--------------------------------------+------------------+-----------------+ + | resource_name | physical_resource_id | resource_type | resource_status | + +---------------+--------------------------------------+------------------+-----------------+ + | workerNode | 5b3ff765-7a9f-447a-a06d-444e963b74c9 | OS::Nova::Server | CREATE_COMPLETE | + | workerNode | 63402e5a-67c9-4f5c-b03f-b21f4a88507f | OS::Nova::Server | CREATE_COMPLETE | + | masterNode | aaecc9b4-8ce5-4f1c-a90b-3571fd4bfb5f | OS::Nova::Server | CREATE_COMPLETE | + | masterNode | 4a40d65c-3440-4c44-858a-72a66324a11a | OS::Nova::Server | CREATE_COMPLETE | + | masterNode | 12708197-9724-41b8-b48c-9eb6862331dc | OS::Nova::Server | CREATE_COMPLETE | + +---------------+--------------------------------------+------------------+-----------------+ + +worker node in Kubernetes cluster before healing: + +.. code-block:: console + + $ ssh ubuntu@10.10.0.80 + $ kubectl get node + NAME STATUS ROLES AGE VERSION + master78 Ready control-plane,master 1h36m v1.20.4 + master31 Ready control-plane,master 1h10m v1.20.4 + worker20 Ready 39m v1.20.4 + worker45 Ready 29m v1.20.4 + master59 Ready control-plane,master 2m v1.20.4 + +We heal the worker node with ``physical_resource_id`` +``5b3ff765-7a9f-447a-a06d-444e963b74c9``, its ``vnfc_instance_id`` +is ``b4af0652-74b8-47bd-bcf6-94769bdbf756``. + +Healing worker node execution of the vnf_instance: + +.. code-block:: console + + $ openstack vnflcm heal c5215213-af4b-4080-95ab-377920474e1a --vnfc-instance b4af0652-74b8-47bd-bcf6-94769bdbf756 + Heal request for VNF Instance 9e086f34-b3c9-4986-b5e5-609a5ac4c1f9 has been accepted. + +worker node information after healing: + +.. code-block:: console + + $ openstack stack resource list vnf-c5215213-af4b-4080-95ab-377920474e1a -n 2 --filter type=OS::Nova::Server -c resource_name -c physical_resource_id -c resource_type -c resource_status + +---------------+--------------------------------------+------------------+-----------------+ + | resource_name | physical_resource_id | resource_type | resource_status | + +---------------+--------------------------------------+------------------+-----------------+ + | workerNode | 5b3ff765-7a9f-447a-a06d-444e963b74c9 | OS::Nova::Server | CREATE_COMPLETE | + | workerNode | c94f8952-bf2e-4a08-906e-67cee771112b | OS::Nova::Server | CREATE_COMPLETE | + | masterNode | aaecc9b4-8ce5-4f1c-a90b-3571fd4bfb5f | OS::Nova::Server | CREATE_COMPLETE | + | masterNode | 4a40d65c-3440-4c44-858a-72a66324a11a | OS::Nova::Server | CREATE_COMPLETE | + | masterNode | 12708197-9724-41b8-b48c-9eb6862331dc | OS::Nova::Server | CREATE_COMPLETE | + +---------------+--------------------------------------+------------------+-----------------+ + +worker node in Kubernetes cluster after healing: + +.. code-block:: console + + $ ssh ubuntu@10.10.0.80 + $ kubectl get node + NAME STATUS ROLES AGE VERSION + master78 Ready control-plane,master 1h46m v1.20.4 + master31 Ready control-plane,master 1h20m v1.20.4 + worker45 Ready 39m v1.20.4 + master59 Ready control-plane,master 10m v1.20.4 + worker20 Ready 2m v1.20.4 + +3. Heal the Entire Kubernetes Cluster +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When healing of the entire VNF, the following APIs are executed +from Tacker to Heat. + +* stack delete +* stack create + +1. Execute Heat CLI command and check 'ID' and 'Stack Status' of the stack +before and after healing. + +2. All the information of Kubernetes cluster will be +changed. + +This is to confirm that stack 'ID' has changed +before and after healing. + +Stack information before healing: + +.. code-block:: console + + $ openstack stack list -c 'ID' -c 'Stack Name' -c 'Stack Status' + +--------------------------------------+------------------------------------------+-----------------+ + | ID | Stack Name | Stack Status | + +--------------------------------------+------------------------------------------+-----------------+ + | f485f3f2-8181-4ed5-b927-e582b5aa9b14 | vnf-c5215213-af4b-4080-95ab-377920474e1a | CREATE_COMPLETE | + +--------------------------------------+------------------------------------------+-----------------+ + +Kubernetes cluster information before healing: + +.. code-block:: console + + $ ssh ubuntu@10.10.0.80 + $ kubectl get node + NAME STATUS ROLES AGE VERSION + master59 Ready control-plane,master 1h38m v1.20.4 + master78 Ready control-plane,master 1h14m v1.20.4 + master31 Ready control-plane,master 48m v1.20.4 + worker20 Ready 17m v1.20.4 + worker45 Ready 7m v1.20.4 + +Healing execution of the entire VNF: + +.. code-block:: console + + $ openstack vnflcm heal c5215213-af4b-4080-95ab-377920474e1a + Heal request for VNF Instance c5215213-af4b-4080-95ab-377920474e1a has been accepted. + +Stack information after healing: + +.. code-block:: console + + $ openstack stack list -c 'ID' -c 'Stack Name' -c 'Stack Status' + +--------------------------------------+------------------------------------------+-----------------+ + | ID | Stack Name | Stack Status | + +--------------------------------------+------------------------------------------+-----------------+ + | 03aaadbe-bf5a-44a0-84b0-8f2a18f8a844 | vnf-c5215213-af4b-4080-95ab-377920474e1a | CREATE_COMPLETE | + +--------------------------------------+------------------------------------------+-----------------+ + +Kubernetes cluster information after healing: + +.. code-block:: console + + $ ssh ubuntu@10.10.0.93 + $ kubectl get node + NAME STATUS ROLES AGE VERSION + master46 Ready control-plane,master 1h25m v1.20.4 + master37 Ready control-plane,master 1h1m v1.20.4 + master14 Ready control-plane,master 35m v1.20.4 + worker101 Ready 10m v1.20.4 + worker214 Ready 4m v1.20.4 + +Limitations +----------- +1. If you deploy a single master node Kubernetes cluster, + you cannot heal the master node. +2. This user guide provides a VNF Package in format of UserData. + You can also use TOSCA based VNF Package in the manner of SOL001 + v2.6.1, but it only supports single master case and the scaling + operation is not supported. + +Reference +--------- + +.. [#f1] https://forge.etsi.org/rep/nfv/SOL001 +.. _TOSCA-Simple-Profile-YAML-v1.2 : http://docs.oasis-open.org/tosca/TOSCA-Simple-Profile-YAML/v1.2/TOSCA-Simple-Profile-YAML-v1.2.html +.. _VNF Package: https://docs.openstack.org/tacker/latest/user/vnf-package.html +.. _TOSCA-1.0-specification : http://docs.oasis-open.org/tosca/TOSCA/v1.0/os/TOSCA-v1.0-os.pdf +.. _NFV-SOL001 v2.6.1 : https://www.etsi.org/deliver/etsi_gs/NFV-SOL/001_099/001/02.06.01_60/gs_NFV-SOL001v020601p.pdf +.. _NFV-SOL002 v2.6.1 : https://www.etsi.org/deliver/etsi_gs/NFV-SOL/001_099/002/02.06.01_60/gs_NFV-SOL002v020601p.pdf +.. _NFV-SOL003 v2.6.1 : https://www.etsi.org/deliver/etsi_gs/NFV-SOL/001_099/003/02.06.01_60/gs_NFV-SOL003v020601p.pdf +.. _NFV-SOL004 v2.6.1 : https://www.etsi.org/deliver/etsi_gs/NFV-SOL/001_099/004/02.06.01_60/gs_NFV-SOL004v020601p.pdf +.. _etsi_nfv_sol001_common_types.yaml : https://forge.etsi.org/rep/nfv/SOL001/raw/v2.6.1/etsi_nfv_sol001_common_types.yaml +.. _etsi_nfv_sol001_vnfd_types.yaml : https://forge.etsi.org/rep/nfv/SOL001/raw/v2.6.1/etsi_nfv_sol001_vnfd_types.yaml +.. _cli-legacy-vim : https://docs.openstack.org/tacker/latest/cli/cli-legacy-vim.html#register-vim +.. _HAProxy: https://www.haproxy.org/ +.. _Heat CLI reference : https://docs.openstack.org/python-openstackclient/latest/cli/plugin-commands/heat.html +.. _TOSCA.meta: https://opendev.org/openstack/tacker/src/branch/master/samples/mgmt_driver/kubernetes_vnf_package/TOSCA-Metadata/TOSCA.meta +.. _sample_kubernetes_top.vnfd.yaml: https://opendev.org/openstack/tacker/src/branch/master/samples/mgmt_driver/kubernetes_vnf_package/Definitions/sample_kubernetes_top.vnfd.yaml +.. _sample_kubernetes_types.yaml: https://opendev.org/openstack/tacker/src/branch/master/samples/mgmt_driver/kubernetes_vnf_package/Definitions/sample_kubernetes_types.yaml +.. _sample_kubernetes_df_simple.yaml: https://opendev.org/openstack/tacker/src/branch/master/samples/mgmt_driver/kubernetes_vnf_package/Definitions/sample_kubernetes_df_simple.yaml +.. _sample_kubernetes_df_complex.yaml: https://opendev.org/openstack/tacker/src/branch/master/samples/mgmt_driver/kubernetes_vnf_package/Definitions/sample_kubernetes_df_complex.yaml +.. _install_k8s_cluster.sh: https://opendev.org/openstack/tacker/src/branch/master/samples/mgmt_driver/install_k8s_cluster.sh +.. _kubernetes_mgmt.py: https://opendev.org/openstack/tacker/src/branch/master/samples/mgmt_driver/kubernetes_mgmt.py +.. _nested/simple_nested_master.yaml: https://opendev.org/openstack/tacker/src/branch/master/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/simple/nested/simple_nested_master.yaml +.. _nested/simple_nested_worker.yaml: https://opendev.org/openstack/tacker/src/branch/master/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/simple/nested/simple_nested_worker.yaml +.. _simple_hot_top.yaml: https://opendev.org/openstack/tacker/src/branch/master/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/simple/simple_hot_top.yaml +.. _nested/complex_nested_master.yaml: https://opendev.org/openstack/tacker/src/branch/master/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/complex/nested/complex_nested_master.yaml +.. _nested/complex_nested_worker.yaml: https://opendev.org/openstack/tacker/src/branch/master/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/complex/nested/complex_nested_worker.yaml +.. _complex_hot_top.yaml: https://opendev.org/openstack/tacker/src/branch/master/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/complex/complex_hot_top.yaml +.. _k8s_cluster_user_data.py: https://opendev.org/openstack/tacker/src/branch/master/samples/mgmt_driver/kubernetes_vnf_package/UserData/k8s_cluster_user_data.py diff --git a/releasenotes/notes/mgmt-driver-for-deploying-kubernetes-cluster-9ce82cc550b50065.yaml b/releasenotes/notes/mgmt-driver-for-deploying-kubernetes-cluster-9ce82cc550b50065.yaml new file mode 100644 index 000000000..ffd3367b9 --- /dev/null +++ b/releasenotes/notes/mgmt-driver-for-deploying-kubernetes-cluster-9ce82cc550b50065.yaml @@ -0,0 +1,32 @@ +--- +features: + - | + MgmtDriver function configures applications provided by VNF vendors. + VNF vendors can customize configuration methods for applications via + MgmtDriver. These customizations are specified by "interface" definition + in ETSI NFV-SOL001 v2.6.1. We provide the sample of MgmtDriver and + scripts which can be used to deploy a Kubernetes cluster. The sample + script for deploying Kubernetes cluster can be used in two cases. + One is to deploy one master node with worker nodes. Under this case, + it supports to scale worker node and heal worker node. The other is to + deploy a high availability cluster, there are three(or more) master + nodes with worker nodes. Under this case, it supports to scale worker + node and to heal worker node and master node. In all the above cases, + kubeadm is used for deploying Kubernetes in the sample script. We also + provide a user guide to help users understand how to use this feature. + + Instantiate single master node kubernetes cluster: + The Kubernetes cluster can be instantiated with VNF Lifecycle + Management Interface in ETSI NFV-SOL 003 v2.6.1. + + Instantiate multi-master nodes kubernetes cluster: + A Kubenrnetes cluster with a high availability (HA) configuration + can be deployed. + + Scale kubernetes worker node: + Scaling operations on the Worker-nodes for the VNF including + Kubernetes cluster is supported with MgmtDriver. + + Heal kubernetes master and worker nodes: + Healing operations on the Master-nodes and Worker-nodes for the + VNF including Kubernetes cluster is supported with MgmtDriver. \ No newline at end of file diff --git a/samples/mgmt_driver/create_admin_token.yaml b/samples/mgmt_driver/create_admin_token.yaml new file mode 100644 index 000000000..07060e923 --- /dev/null +++ b/samples/mgmt_driver/create_admin_token.yaml @@ -0,0 +1,23 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: admin + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: admin + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: admin + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile \ No newline at end of file diff --git a/samples/mgmt_driver/install_k8s_cluster.sh b/samples/mgmt_driver/install_k8s_cluster.sh new file mode 100644 index 000000000..3e6ab2417 --- /dev/null +++ b/samples/mgmt_driver/install_k8s_cluster.sh @@ -0,0 +1,782 @@ +#!/bin/bash +set -o xtrace +############################################################################### +# +# This script will install and setting for the Kubernetes Cluster on Ubuntu. +# It's confirmed operation on Ubuntu of below. +# +# * OS type : Ubuntu(64 bit) +# * OS version : 20.04 LTS +# * OS architecture : amd64 (x86_64) +# * Disk/Ram size : 15GB/2GB +# * Pre setup user : ubuntu +# +############################################################################### + +#============================================================================== +# Usage Definition +#============================================================================== +function usage { + sudo cat <<_EOT_ +$(basename ${0}) is script to construct the kubernetes cluster. + +Usage: + $(basename ${0}) [-d] [-o] [-m ] + [-w ] [-i ] + [-a ] + [-t ] [-s ] [-k ] + +Description: + This script is to construct the kubernetes cluster on a virtual machine. + It can install and configure a Master node or each Worker Node + as specify arguments. + +Options: + -m Install and setup all master nodes(use "," to separate, the first master ip is main master ip) + -w Install and setup worker node + -i master cluster IP address (e.g. 192.168.120.100) + -a Kubernetes api cluster CIDR (e.g. 10.96.0.0/12) + -p Kubernetes pod network CIDR (e.g. 192.168.0.0/16) + -d Display the execution result in debug mode + -o Output the execution result to the log file + -t The first master's token name + -s The first master's token hash + -k The first master‘s certificate key + --help, -h Print this + +_EOT_ + exit 1 +} + +declare -g INSTALL_MODE="" +declare -g DEBUG_MODE="False" +declare -g OUTPUT_LOGFILE="False" +# master/worker ip +declare -g MASTER_IPADDRS=${MASTER_IPADDRS:-} +declare -a -g MASTER_IPS=${MASTER_IPS:-} +declare -g MASTER_IP=${MASTER_IP:-} +declare -g WORKER_IPADDR=${WORKER_IPADDR:-} +declare -g TOKEN_NAME=${TOKEN_NAME:-} +declare -g TOKEN_HASH=${TOKEN_HASH:-} +declare -g CERT_KEY=${CERT_KEY:-} +declare -g K8S_API_CLUSTER_CIDR=${K8S_API_CLUSTER_CIDR:-10.96.0.0/12} +declare -g K8S_POD_CIDR=${K8S_POD_CIDR:-192.168.0.0/16} + +if [ "$OPTIND" = 1 ]; then + while getopts dom:w:i:a:p:t:s:k:h OPT; do + case $OPT in + m) + MASTER_IPADDRS=$OPTARG # 192.168.120.17,192.168.120.18,192.168.120.19 + INSTALL_MODE="master" # master + MASTER_IPS=(${MASTER_IPADDRS//,/ }) + MASTER_IP=${MASTER_IPS[0]} + ;; + w) + WORKER_IPADDR=$OPTARG # 192.168.120.2 + INSTALL_MODE="worker" # worker + ;; + i) + MASTER_CLUSTER_IP=$OPTARG # master cluster ip: 192.168.120.100 + ;; + a) + K8S_API_CLUSTER_CIDR=$OPTARG # cluster cidr: 10.96.0.0/12 + ;; + p) + K8S_POD_CIDR=$OPTARG # pod network cidr: 192.168.0.0/16 + ;; + d) + DEBUG_MODE="True" # start debug + ;; + o) + OUTPUT_LOGFILE="True" # output log file + ;; + t) + TOKEN_NAME=$OPTARG # token name + ;; + s) + TOKEN_HASH=$OPTARG # token hash + ;; + k) + CERT_KEY=$OPTARG # certificate key + ;; + h) + echo "h option. display help" + usage + ;; + \?) + echo "Try to enter the h option." 1>&2 + ;; + esac + done +else + echo "No installed getopts-command." 1>&2 + exit 1 +fi + +# check parameter entered by user +if [ "$DEBUG_MODE" == "True" ]; then + echo "*** DEBUG MODE ***" + set -x +fi + +if [ "$OUTPUT_LOGFILE" == "True" ]; then + echo "*** OUTPUT LOGFILE MODE ***" + exec > /tmp/k8s_install_`date +%Y%m%d%H%M%S`.log 2>&1 +fi + +# Application Variables +#---------------------- +# haproxy +declare -g CURRENT_HOST_IP=${CURRENT_HOST_IP:-} +declare -g MASTER_CLUSTER_PORT=16443 +# kubeadm join +declare -g KUBEADM_JOIN_WORKER_RESULT=${KUBEADM_JOIN_WORKER_RESULT:-} + + +# Functions +#========== + +# Set OS common functions +#------------------------ + +# Set public DNS +function set_public_dns { + sudo sed -i -e 's/^#DNS=/DNS=8.8.8.8 8.8.4.4/g' /etc/systemd/resolved.conf + sudo systemctl restart systemd-resolved.service +} + +function set_hostname { + tmp_master_ipaddr3=`echo ${MASTER_IP} | sudo sed -e "s/.[0-9]\{1,3\}$//"` + local tmp_result="" + if [[ "$INSTALL_MODE" =~ "master" ]]; then + for _ip in `ip -4 addr | grep -oP '(?<=inet\s)\d+(\.\d+){3}'`; do + _tmp_ip=`echo ${_ip} |sudo sed -e "s/.[0-9]\{1,3\}$//"` + if [[ $_tmp_ip == $tmp_master_ipaddr3 ]]; then + CURRENT_HOST_IP=$_ip + tmp_result=`echo $_ip|cut -d"." -f4` + break + fi + done + sudo /usr/bin/hostnamectl set-hostname master$tmp_result + elif [[ "$INSTALL_MODE" == "worker" ]]; then + CURRENT_HOST_IP=$WORKER_IPADDR + tmp_result=`echo $CURRENT_HOST_IP|cut -d"." -f4` + sudo /usr/bin/hostnamectl set-hostname worker$tmp_result + else + echo "error. please execute sh install_k8s_cluster.sh -h." + exit 0 + fi +} + +function set_sudoers { + echo "ubuntu ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/ubuntu +} + +function set_hosts { + hostname=`hostname` + sudo sed -i -e 's/127.0.0.1localhost/127.0.0.1 localhost master/g' \ + /etc/hosts + sudo sed -i -e "s/127.0.1.1 $hostname/127.0.1.1 $hostname master/g" \ + /etc/hosts +} + +function invalidate_swap { + sudo sed -i -e '/swap/s/^/#/' /etc/fstab + swapoff -a +} + + +# Install Haproxy +#---------------- +function install_haproxy { + REPOS_UPDATED=False apt_get_update + apt_get install haproxy +} + +function modify_haproxy_conf { + cat </dev/null +global + log /dev/log local0 + log /dev/log local1 notice + chroot /var/lib/haproxy + stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners + stats timeout 30s + user haproxy + group haproxy + daemon + + # Default SSL material locations + ca-base /etc/ssl/certs + crt-base /etc/ssl/private + + # Default ciphers to use on SSL-enabled listening sockets. + # For more information, see ciphers(1SSL). This list is from: + # https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ + # An alternative list with additional directives can be obtained from + # https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=haproxy + ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS + ssl-default-bind-options no-sslv3 + +defaults + log global + mode http + option httplog + option dontlognull + timeout connect 5000 + timeout client 50000 + timeout server 50000 + errorfile 400 /etc/haproxy/errors/400.http + errorfile 403 /etc/haproxy/errors/403.http + errorfile 408 /etc/haproxy/errors/408.http + errorfile 500 /etc/haproxy/errors/500.http + errorfile 502 /etc/haproxy/errors/502.http + errorfile 503 /etc/haproxy/errors/503.http + errorfile 504 /etc/haproxy/errors/504.http + +frontend kubernetes-apiserver + mode tcp + bind *:$MASTER_CLUSTER_PORT + option tcplog + default_backend kubernetes-apiserver + +backend kubernetes-apiserver + mode tcp + balance roundrobin +EOF + for master_ip in ${MASTER_IPS[@]}; do + split_ips=(${master_ip//./ }) + cat </dev/null + server master${split_ips[3]} $master_ip:6443 check +EOF + done + cat </dev/null +listen stats + bind *:1080 + stats auth admin:awesomePassword + stats refresh 5s + stats realm HAProxy\ Statistics + stats uri /admin?stats +EOF + +} + +function start_haproxy { + sudo systemctl enable haproxy + sudo systemctl start haproxy + sudo systemctl status haproxy | grep Active + result=$(ss -lnt |grep -E "16443|1080") + if [[ -z $result ]]; then + sudo systemctl restart haproxy + fi +} + + +# Install Keepalived +#------------------- +function install_keepalived { + REPOS_UPDATED=False apt_get_update + apt_get install keepalived +} +function modify_keepalived_conf { + local priority + local ip_name + local index=0 + for master_ip in ${MASTER_IPS[@]}; do + if [[ "$CURRENT_HOST_IP" == "$master_ip" ]]; then + priority=$(expr 103 - $index) + fi + index=$(expr $index + 1) + done + + ip_name=$(ip a s | grep $CURRENT_HOST_IP | awk '{print $NF}') + + cat </dev/null +vrrp_script chk_haproxy { + script "killall -0 haproxy" + interval 3 fall 3 +} +vrrp_instance VRRP1 { + state MASTER + interface $ip_name + virtual_router_id 51 + priority $priority + advert_int 1 + virtual_ipaddress { + $MASTER_CLUSTER_IP/24 + } + track_script { + chk_haproxy + } +} +EOF +} + +function start_keepalived { + sudo systemctl enable keepalived.service + sudo systemctl start keepalived.service + sudo systemctl status keepalived.service | grep Active + result=$(sudo systemctl status keepalived.service | \ + grep Active | grep "running") + if [[ "$result" == "" ]]; then + exit 0 + fi +} + +# Install Docker +#--------------- +function install_docker { + arch=$(sudo dpkg --print-architecture) + REPOS_UPDATED=False apt_get_update + DEBIAN_FRONTEND=noninteractive sudo apt-get install -y \ + apt-transport-https ca-certificates curl gnupg-agent \ + software-properties-common + result=`curl -fsSL https://download.docker.com/linux/ubuntu/gpg | \ + sudo apt-key add -` + if [[ $result != "OK" ]]; then + exit 0 + fi + sudo add-apt-repository \ + "deb [arch=${arch}] \ +https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + apt_get update + DEBIAN_FRONTEND=noninteractive sudo apt-get install \ + docker-ce=5:19.03.11~3-0~ubuntu-focal \ + docker-ce-cli containerd.io << EOF +y +EOF +} + +function set_docker_proxy { + sudo mkdir -p /etc/systemd/system/docker.service.d + sudo touch /etc/systemd/system/docker.service.d/https-proxy.conf + + cat </dev/null +[Service] +Environment="HTTP_PROXY=${http_proxy//%40/@}" "HTTPS_PROXY=${https_proxy//%40/@}" "NO_PROXY=$no_proxy" +EOF + cat </dev/null +{ + "exec-opts": ["native.cgroupdriver=systemd"] +} +EOF + sudo systemctl daemon-reload + sudo systemctl restart docker + sleep 3 + result=$(sudo systemctl status docker | grep Active | grep "running") + if [[ -z "$result" ]]; then + exit 0 + fi + sleep 7 + sudo docker run hello-world +} + + +# Install Kubernetes +#------------------- +function set_k8s_components { + REPOS_UPDATED=False apt_get_update + sudo apt-get install -y apt-transport-https curl + result=`curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | \ + sudo apt-key add -` + if [[ $result != "OK" ]]; then + exit 0 + fi + echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | \ + sudo tee -a /etc/apt/sources.list.d/kubernetes.list + apt_get update + apt_get install -y kubelet kubeadm kubectl + sudo apt-mark hold kubelet kubeadm kubectl + echo "starting kubelet, wait 30s ..." + sleep 30 + sudo systemctl status kubelet | grep Active +} + +function init_master { + if [[ "$MASTER_IPADDRS" =~ "," ]]; then + sudo kubeadm init --pod-network-cidr=$K8S_POD_CIDR \ + --service-cidr=$K8S_API_CLUSTER_CIDR \ + --control-plane-endpoint "$MASTER_CLUSTER_IP:16443" --upload-certs + else + sudo kubeadm init --pod-network-cidr=$K8S_POD_CIDR \ + --service-cidr=$K8S_API_CLUSTER_CIDR \ + --control-plane-endpoint "$MASTER_CLUSTER_IP:6443" --upload-certs + fi + sleep 3 + sudo mkdir -p $HOME/.kube + sudo /bin/cp -f /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + sleep 20 +} + +function install_pod_network { + curl https://docs.projectcalico.org/manifests/calico.yaml -O + echo "waiting install pod network..." + while true; do + result=$(kubectl apply -f calico.yaml) + if [[ "$result" =~ "created" ]] || \ + [[ "$result" =~ "unchanged" ]]; then + echo "$result" + break + fi + sudo rm -rf $HOME/.kube + sudo mkdir -p $HOME/.kube + sudo /bin/cp -f /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + sleep 10 + done +} + +function add_master_node { + sudo kubeadm join $MASTER_CLUSTER_IP:16443 \ + --token $TOKEN_NAME \ + --discovery-token-ca-cert-hash sha256:$TOKEN_HASH \ + --control-plane --certificate-key $CERT_KEY + sudo mkdir -p $HOME/.kube + sudo /bin/cp -f /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + echo "add node ..." + sleep 10 + kubectl get nodes -o wide + echo "add node successfully" +} + +function init_worker { + sudo kubeadm init --pod-network-cidr=$K8S_POD_CIDR \ + --service-cidr=$K8S_API_CLUSTER_CIDR + sleep 5 + sudo mkdir -p $HOME/.kube + sudo /bin/cp -f /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + sleep 10 +} + +function add_worker_node { + if [[ "$ha_flag" != "False" ]]; then + KUBEADM_JOIN_WORKER_RESULT=$(sudo kubeadm join \ + $MASTER_CLUSTER_IP:16443 --token $TOKEN_NAME \ + --discovery-token-ca-cert-hash sha256:$TOKEN_HASH) + else + KUBEADM_JOIN_WORKER_RESULT=$(sudo kubeadm join \ + $MASTER_CLUSTER_IP:6443 --token $TOKEN_NAME \ + --discovery-token-ca-cert-hash sha256:$TOKEN_HASH) + fi +} + +function check_k8s_resource { + cat </dev/null +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 2 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.7.9 + ports: + - containerPort: 80 +EOF + cat </dev/null +apiVersion: v1 +kind: Service +metadata: + name: nginx-service +spec: + type: NodePort + sessionAffinity: ClientIP + selector: + app: nginx + ports: + - port: 80 + nodePort: 30080 +EOF + kubectl apply -f test-nginx-deployment.yaml + kubectl apply -f test-nginx-service.yaml + echo "please wait 1m to create resources..." + sleep 60 + kubectl get pod,deployment,service -o wide + pod_name=`kubectl get pod | grep nginx-deployment | \ + head -1 | awk '{print $1}'` + result=`kubectl describe pod $pod_name | grep Warning` + echo $result + if [[ "$result" =~ "FailedScheduling" ]]; then + local node_role + for role in ${result[@]}; do + if [[ "$role" =~ "master" ]]; then + index=${#role}-2 + node_role=${role: 1:$index} + fi + done + split_ips=(${CURRENT_HOST_IP//./ }) + kubectl taint node master${split_ips[3]} $node_role:NoSchedule- + echo "please wait 500s to create resources successfully..." + sleep 500 + kubectl get pod,deployment,service -o wide + else + echo "please wait 500s to create resources successfully..." + sleep 500 + kubectl get pod,deployment,service -o wide + fi +} + +# Set common functions +# +# Refer: devstack project functions-common +#----------------------------------------- +function apt_get_update { + if [[ "$REPOS_UPDATED" == "True" ]]; then + return + fi + + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + + # time all the apt operations + time_start "apt-get-update" + + local update_cmd="sudo apt-get update" + if ! timeout 300 sh -c "while ! $update_cmd; do sleep 30; done"; then + die $LINENO "Failed to update apt repos, we're dead now" + fi + + REPOS_UPDATED=True + # stop the clock + time_stop "apt-get-update" +} + +function time_start { + local name=$1 + local start_time=${_TIME_START[$name]} + if [[ -n "$start_time" ]]; then + die $LINENO \ + "Trying to start the clock on $name, but it's already been started" + fi + + _TIME_START[$name]=$(date +%s%3N) +} + +function time_stop { + local name + local end_time + local elapsed_time + local total + local start_time + + name=$1 + start_time=${_TIME_START[$name]} + + if [[ -z "$start_time" ]]; then + die $LINENO \ + "Trying to stop the clock on $name, but it was never started" + fi + end_time=$(date +%s%3N) + elapsed_time=$(($end_time - $start_time)) + total=${_TIME_TOTAL[$name]:-0} + # reset the clock so we can start it in the future + _TIME_START[$name]="" + _TIME_TOTAL[$name]=$(($total + $elapsed_time)) +} + +function apt_get { + local xtrace result + xtrace=$(set +o | grep xtrace) # set +o xtrace + set +o xtrace + + [[ "$OFFLINE" = "True" || -z "$@" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + + # time all the apt operations + time_start "apt-get" + + $xtrace + + $sudo DEBIAN_FRONTEND=noninteractive \ + http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} \ + no_proxy=${no_proxy:-} \ + apt-get --option "Dpkg::Options::=--force-confold" \ + --assume-yes "$@" < /dev/null + result=$? + + # stop the clock + time_stop "apt-get" + return $result +} + +# Choose install function based on install mode +#---------------------------------------------- +function main_master { + # prepare + set_public_dns + set_hostname + set_sudoers + set_hosts + invalidate_swap + if [[ "$MASTER_IPADDRS" =~ "," ]]; then + # haproxy + install_haproxy + modify_haproxy_conf + start_haproxy + + # keepalived + install_keepalived + modify_keepalived_conf + start_keepalived + fi + + # Docker + install_docker + set_docker_proxy + + # kubernetes + set_k8s_components + init_master + install_pod_network + +# check_k8s_resource + + clear + token=$(sudo kubeadm token create) + echo "token:$token" + server=$(kubectl cluster-info | \ + sed 's,\x1B\[[0-9;]*[a-zA-Z],,g' | \ + grep 'Kubernetes' |awk '{print $7}') + echo "server:$server" + cat /etc/kubernetes/pki/ca.crt + ssl_ca_cert_hash=$(openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | \ + openssl rsa -pubin -outform der 2>/dev/null | \ + openssl dgst -sha256 -hex | sudo sed 's/^.* //') + echo "ssl_ca_cert_hash:$ssl_ca_cert_hash" + cert_key=$(sudo kubeadm init phase upload-certs --upload-certs) + echo "certificate_key:$cert_key" +} + +function normal_master { + # prepare + set_public_dns + set_hostname + set_sudoers + set_hosts + invalidate_swap + + # haproxy + install_haproxy + modify_haproxy_conf + start_haproxy + + # keepalived + install_keepalived + modify_keepalived_conf + start_keepalived + + # Docker + install_docker + set_docker_proxy + + # kubernetes + set_k8s_components + add_master_node + +} + +function main_worker { + # prepare + set_public_dns + set_hostname + set_sudoers + set_hosts + invalidate_swap + + # Docker + install_docker + set_docker_proxy + + # kubernetes + set_k8s_components + add_worker_node +} + +# Pre preparations +# ________________ + +function check_OS { + . /etc/os-release + if [[ $PRETTY_NAME =~ "Ubuntu 20.04" ]]; then + os_architecture=`uname -a | grep 'x86_64'` + if [[ $os_architecture == "" ]]; then + echo "Your OS does not support at present." + echo "It only supports x86_64." + fi + else + echo "Your OS does not support at present." + echo "It only supports Ubuntu 20.04.1 LTS." + fi +} + +function set_apt-conf_proxy { + sudo touch /etc/apt/apt.conf.d/proxy.conf + + cat </dev/null +Acquire::http::Proxy "${http_proxy}"; +Acquire::https::Proxy "${https_proxy}"; +EOF +} + +# Main +# ____ + +flag="False" +set_apt-conf_proxy +check_OS +if [[ "$INSTALL_MODE" =~ "master" ]]; then + echo "Start install to main master node" + for _ip in `ip -4 addr | grep -oP '(?<=inet\s)\d+(\.\d+){3}'`; do + if [[ $_ip == $MASTER_IP ]]; then + flag="True" + break + fi + done + if [[ "$flag" == "True" ]]; then + INSTALL_MODE="main_master" + main_master + else + INSTALL_MODE="normal_master" + normal_master + fi +elif [ "$INSTALL_MODE" == "worker" ]; then + echo "Start install to worker node" + main_worker +else + echo "The install mode does not support at present!" + exit 255 +fi + +if [[ "$INSTALL_MODE" =~ "master" ]]; then + result=$(kubectl get nodes -o wide | grep $CURRENT_HOST_IP) + if [[ -z "$result" ]];then + echo "Install Failed! The node does not in Kubernetes cluster." + exit 255 + else + echo "Install Success!" + fi +else + if [[ "$KUBEADM_JOIN_WORKER_RESULT" =~ \ + "This node has joined the cluster" ]]; then + echo "Install Success!" + else + echo "Install Failed! The node does not in Kubernetes cluster." + exit 255 + fi +fi +exit 0 diff --git a/samples/mgmt_driver/kubernetes_mgmt.py b/samples/mgmt_driver/kubernetes_mgmt.py new file mode 100644 index 000000000..13caaed6a --- /dev/null +++ b/samples/mgmt_driver/kubernetes_mgmt.py @@ -0,0 +1,1936 @@ +# Copyright (C) 2021 FUJITSU +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +import ipaddress +import json +import os +import re +import time + +from oslo_log import log as logging +from oslo_utils import uuidutils +import paramiko + +from tacker.common import cmd_executer +from tacker.common import exceptions +from tacker.db.db_base import CommonDbMixin +from tacker.db.nfvo import nfvo_db +from tacker.nfvo.nfvo_plugin import NfvoPlugin +from tacker import objects +from tacker.vnflcm import utils as vnflcm_utils +from tacker.vnfm.infra_drivers.openstack import heat_client as hc +from tacker.vnfm.mgmt_drivers import vnflcm_abstract_driver + +LOG = logging.getLogger(__name__) +K8S_CMD_TIMEOUT = 30 +K8S_INSTALL_TIMEOUT = 2700 + + +class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver): + FLOATING_IP_FLAG = False + + def get_type(self): + return 'mgmt-drivers-kubernetes' + + def get_name(self): + return 'mgmt-drivers-kubernetes' + + def get_description(self): + return 'Tacker Kubernetes VNFMgmt Driver' + + def instantiate_start(self, context, vnf_instance, + instantiate_vnf_request, grant, + grant_request, **kwargs): + pass + + def _check_is_cidr(self, cidr_str): + # instantiate: check cidr + try: + ipaddress.ip_network(cidr_str) + return True + except ValueError: + return False + + def _execute_command(self, commander, ssh_command, timeout, type, retry): + eventlet.monkey_patch() + while retry >= 0: + try: + with eventlet.Timeout(timeout, True): + result = commander.execute_command( + ssh_command, input_data=None) + break + except eventlet.timeout.Timeout: + LOG.debug('It is time out, When execute command: ' + '{}.'.format(ssh_command)) + retry -= 1 + if retry < 0: + LOG.error('It is time out, When execute command: ' + '{}.'.format(ssh_command)) + raise exceptions.MgmtDriverOtherError( + error_message='It is time out, When execute command: ' + '{}.'.format(ssh_command)) + time.sleep(30) + if type == 'common' or type == 'etcd': + err = result.get_stderr() + if err: + LOG.error(err) + raise exceptions.MgmtDriverRemoteCommandError(err_info=err) + elif type == 'drain': + for res in result.get_stdout(): + if 'drained' in res: + break + else: + err = result.get_stderr() + stdout = result.get_stdout() + LOG.debug(stdout) + LOG.debug(err) + elif type == 'certificate_key': + if '[upload-certs] Using certificate key:\n' not \ + in result.get_stdout(): + err = result.get_stderr() + LOG.error(err) + raise exceptions.MgmtDriverRemoteCommandError(err_info=err) + elif type == 'install': + err = result.get_stderr() + if 'Install Failed!\n' in result.get_stdout(): + LOG.error(err) + raise exceptions.MgmtDriverRemoteCommandError(err_info=err) + return result.get_stdout() + + def _create_vim(self, context, vnf_instance, server, bearer_token, + ssl_ca_cert, vim_name, project_name, master_vm_dict_list): + # ha: create vim + vim_info = { + 'vim': { + 'name': vim_name, + 'auth_url': server, + 'vim_project': { + 'name': project_name + }, + 'auth_cred': { + 'bearer_token': bearer_token, + 'ssl_ca_cert': ssl_ca_cert + }, + 'type': 'kubernetes', + 'tenant_id': context.project_id + } + } + if self.FLOATING_IP_FLAG: + if not master_vm_dict_list[0].get( + 'k8s_cluster', {}).get('cluster_fip'): + register_ip = master_vm_dict_list[0].get('ssh').get('ipaddr') + else: + register_ip = master_vm_dict_list[0].get( + 'k8s_cluster', {}).get('cluster_fip') + server = re.sub(r'(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})', + register_ip, server) + vim_info['vim']['auth_url'] = server + del vim_info['vim']['auth_cred']['ssl_ca_cert'] + try: + nfvo_plugin = NfvoPlugin() + created_vim_info = nfvo_plugin.create_vim(context, vim_info) + except Exception as e: + LOG.error("Failed to register kubernetes vim: {}".format(e)) + raise exceptions.MgmtDriverOtherError( + error_message="Failed to register kubernetes vim: {}".format( + e)) + id = uuidutils.generate_uuid() + vim_id = created_vim_info.get('id') + vim_type = 'kubernetes' + access_info = { + 'auth_url': server + } + vim_connection_info = objects.VimConnectionInfo( + id=id, vim_id=vim_id, vim_type=vim_type, + access_info=access_info, interface_info=None + ) + vim_connection_infos = vnf_instance.vim_connection_info + vim_connection_infos.append(vim_connection_info) + vnf_instance.vim_connection_info = vim_connection_infos + vnf_instance.save() + + def _get_ha_group_resources_list( + self, heatclient, stack_id, node, additional_params): + # ha: get group resources list + nest_resources_list = heatclient.resources.list(stack_id=stack_id) + group_stack_name = node.get("aspect_id") + if 'lcm-operation-user-data' in additional_params.keys() and \ + 'lcm-operation-user-data-class' in additional_params.keys(): + group_stack_name = group_stack_name + '_group' + group_stack_id = "" + for nest_resources in nest_resources_list: + if nest_resources.resource_name == group_stack_name: + group_stack_id = nest_resources.physical_resource_id + if not group_stack_id: + LOG.error('No stack id matching the group was found.') + raise exceptions.MgmtDriverOtherError( + error_message="No stack id matching the group was found") + group_resources_list = heatclient.resources.list( + stack_id=group_stack_id) + return group_resources_list + + def _get_cluster_ip(self, heatclient, resource_num, + node, stack_id, nest_stack_id): + cluster_cp_name = node.get('cluster_cp_name') + if not node.get('aspect_id'): + # num_master_node = 1, type=OS::Nova::Server + cluster_ip = heatclient.resources.get( + stack_id=nest_stack_id, + resource_name=cluster_cp_name).attributes.get( + 'fixed_ips')[0].get('ip_address') + else: + # num_master_node > 1, type=OS::Heat::AutoScalingGroup + if resource_num > 1: + cluster_ip = heatclient.resources.get( + stack_id=nest_stack_id, + resource_name=cluster_cp_name).attributes.get( + 'fixed_ips')[0].get('ip_address') + # num_master_node = 1, type=OS::Heat::AutoScalingGroup + else: + cluster_ip = heatclient.resources.get( + stack_id=stack_id, + resource_name=cluster_cp_name).attributes.get( + 'fixed_ips')[0].get('ip_address') + if not cluster_ip: + LOG.error('Failed to get the cluster ip.') + raise exceptions.MgmtDriverOtherError( + error_message="Failed to get the cluster ip") + return cluster_ip + + def _get_install_info_for_k8s_node(self, nest_stack_id, node, + additional_params, role, + access_info): + # instantiate: get k8s ssh ips + vm_dict_list = [] + stack_id = '' + heatclient = hc.HeatClient(access_info) + + # get ssh_ip and nic_ip and set ssh's values + if not node.get('aspect_id'): + ssh_ip = heatclient.resources.get( + stack_id=nest_stack_id, + resource_name=node.get('ssh_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + nic_ip = heatclient.resources.get( + stack_id=nest_stack_id, + resource_name=node.get('nic_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + vm_dict = { + "ssh": { + "username": node.get("username"), + "password": node.get("password"), + "ipaddr": ssh_ip, + "nic_ip": nic_ip + } + } + vm_dict_list.append(vm_dict) + else: + group_resources_list = self._get_ha_group_resources_list( + heatclient, nest_stack_id, node, additional_params) + for group_resource in group_resources_list: + stack_id = group_resource.physical_resource_id + resource_name = node.get('ssh_cp_name') + resource_info = heatclient.resources.get( + stack_id=stack_id, + resource_name=resource_name) + if resource_info.attributes.get('floating_ip_address'): + self.FLOATING_IP_FLAG = True + ssh_ip = resource_info.attributes.get( + 'floating_ip_address') + nic_ip = heatclient.resources.get( + stack_id=stack_id, + resource_name=node.get('nic_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + else: + ssh_ip = heatclient.resources.get( + stack_id=stack_id, + resource_name=resource_name).attributes.get( + 'fixed_ips')[0].get('ip_address') + nic_ip = heatclient.resources.get( + stack_id=stack_id, + resource_name=node.get('nic_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + + vm_dict_list.append({ + "ssh": { + "username": node.get("username"), + "password": node.get("password"), + "ipaddr": ssh_ip, + "nic_ip": nic_ip + } + }) + + # get cluster_ip from master node + if role == 'master': + cluster_fip = '' + resource_num = len(vm_dict_list) + cluster_ip = self._get_cluster_ip(heatclient, + resource_num, node, stack_id, nest_stack_id) + if self.FLOATING_IP_FLAG and len(vm_dict_list) > 1: + cluster_fip = heatclient.resource_get( + nest_stack_id, + node.get('cluster_fip_name')).attributes.get( + 'floating_ip_address') + + # set k8s_cluster's values + for vm_dict in vm_dict_list: + vm_dict["k8s_cluster"] = { + "pod_cidr": node.get('pod_cidr'), + "cluster_cidr": node.get('cluster_cidr'), + "ipaddr": cluster_ip, + "cluster_fip": cluster_fip + } + return vm_dict_list + + def _get_hosts(self, master_vm_dict_list, worker_vm_dict_list): + # merge /etc/hosts + hosts = [] + for master_vm_dict in master_vm_dict_list: + hosts_master_ip = master_vm_dict.get('ssh', ()).get('nic_ip') + hosts.append(hosts_master_ip + ' ' + 'master' + + hosts_master_ip.split('.')[-1]) + for worker_vm_dict in worker_vm_dict_list: + hosts_worker_ip = worker_vm_dict.get('ssh', ()).get('nic_ip') + hosts.append(hosts_worker_ip + ' ' + 'worker' + + hosts_worker_ip.split('.')[-1]) + hosts_str = '\\n'.join(hosts) + return hosts_str + + def _init_commander_and_send_install_scripts(self, user, password, host, + vnf_package_path=None, script_path=None): + retry = 4 + while retry > 0: + try: + if vnf_package_path and script_path: + connect = paramiko.Transport(host, 22) + connect.connect(username=user, password=password) + sftp = paramiko.SFTPClient.from_transport(connect) + # put script file content to '/tmp/install_k8s_cluster.sh' + sftp.put(os.path.join(vnf_package_path, script_path), + "/tmp/install_k8s_cluster.sh") + sftp.put(os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "../../../samples/mgmt_driver/" + "create_admin_token.yaml"), + "/tmp/create_admin_token.yaml") + connect.close() + commander = cmd_executer.RemoteCommandExecutor( + user=user, password=password, host=host, + timeout=K8S_INSTALL_TIMEOUT) + return commander + except paramiko.SSHException as e: + LOG.debug(e) + retry -= 1 + if retry == 0: + LOG.error(e) + raise paramiko.SSHException() + time.sleep(30) + + def _get_vm_cidr_list(self, master_ip, proxy): + # ha and scale: get vm cidr list + vm_cidr_list = [] + if proxy.get('k8s_node_cidr'): + cidr = proxy.get('k8s_node_cidr') + else: + cidr = master_ip + '/24' + network_ips = ipaddress.ip_network(cidr, False) + for network_ip in network_ips: + vm_cidr_list.append(str(network_ip)) + return vm_cidr_list + + def _install_worker_node(self, commander, proxy, + ha_flag, nic_ip, cluster_ip, kubeadm_token, + ssl_ca_cert_hash): + if proxy.get('http_proxy') and proxy.get('https_proxy'): + ssh_command = \ + "export http_proxy={http_proxy};" \ + "export https_proxy={https_proxy};" \ + "export no_proxy={no_proxy};" \ + "export ha_flag={ha_flag};" \ + "bash /tmp/install_k8s_cluster.sh " \ + "-w {worker_ip} -i {cluster_ip} " \ + "-t {kubeadm_token} -s {ssl_ca_cert_hash}".format( + http_proxy=proxy.get('http_proxy'), + https_proxy=proxy.get('https_proxy'), + no_proxy=proxy.get('no_proxy'), + ha_flag=ha_flag, + worker_ip=nic_ip, cluster_ip=cluster_ip, + kubeadm_token=kubeadm_token, + ssl_ca_cert_hash=ssl_ca_cert_hash) + else: + ssh_command = \ + "export ha_flag={ha_flag};" \ + "bash /tmp/install_k8s_cluster.sh " \ + "-w {worker_ip} -i {cluster_ip} " \ + "-t {kubeadm_token} -s {ssl_ca_cert_hash}".format( + ha_flag=ha_flag, + worker_ip=nic_ip, cluster_ip=cluster_ip, + kubeadm_token=kubeadm_token, + ssl_ca_cert_hash=ssl_ca_cert_hash) + self._execute_command( + commander, ssh_command, K8S_INSTALL_TIMEOUT, 'install', 0) + + def _install_k8s_cluster(self, context, vnf_instance, + proxy, script_path, + master_vm_dict_list, worker_vm_dict_list): + # instantiate: pre /etc/hosts + hosts_str = self._get_hosts( + master_vm_dict_list, worker_vm_dict_list) + master_ssh_ips_str = ','.join([ + vm_dict.get('ssh', {}).get('nic_ip') + for vm_dict in master_vm_dict_list]) + ha_flag = "True" + if ',' not in master_ssh_ips_str: + ha_flag = "False" + + # get vnf package path and check script_path + vnf_package_path = vnflcm_utils._get_vnf_package_path( + context, vnf_instance.vnfd_id) + abs_script_path = os.path.join(vnf_package_path, script_path) + if not os.path.exists(abs_script_path): + LOG.error('The path of install script is invalid.') + raise exceptions.MgmtDriverOtherError( + error_message="The path of install script is invalid") + + # set no proxy + project_name = '' + if proxy.get("http_proxy") and proxy.get("https_proxy"): + vm_cidr_list = self._get_vm_cidr_list( + master_ssh_ips_str.split(',')[0], proxy) + master_cluster_ip = master_vm_dict_list[0].get( + "k8s_cluster", {}).get('ipaddr') + pod_cidr = master_vm_dict_list[0].get( + "k8s_cluster", {}).get("pod_cidr") + cluster_cidr = master_vm_dict_list[0].get( + "k8s_cluster", {}).get("cluster_cidr") + proxy["no_proxy"] = ",".join(list(filter(None, [ + proxy.get("no_proxy"), pod_cidr, cluster_cidr, + "127.0.0.1", "localhost", + master_cluster_ip] + vm_cidr_list))) + + # install k8s + active_username = "" + active_password = "" + active_host = "" + ssl_ca_cert_hash = "" + kubeadm_token = "" + # install master node + for vm_dict in master_vm_dict_list: + if vm_dict.get('ssh', {}).get('nic_ip') == \ + master_ssh_ips_str.split(',')[0]: + active_username = vm_dict.get('ssh', {}).get('username') + active_password = vm_dict.get('ssh', {}).get('password') + active_host = vm_dict.get('ssh', {}).get('ipaddr') + else: + # get certificate key from active master node + commander = cmd_executer.RemoteCommandExecutor( + user=active_username, password=active_password, + host=active_host, timeout=K8S_CMD_TIMEOUT) + ssh_command = "sudo kubeadm init phase upload-certs " \ + "--upload-certs" + result = self._execute_command( + commander, ssh_command, + K8S_CMD_TIMEOUT, 'certificate_key', 3) + certificate_key = result[-1].replace('\n', '') + + user = vm_dict.get('ssh', {}).get('username') + password = vm_dict.get('ssh', {}).get('password') + host = vm_dict.get('ssh', {}).get('ipaddr') + k8s_cluster = vm_dict.get('k8s_cluster', {}) + commander = self._init_commander_and_send_install_scripts( + user, password, host, + vnf_package_path, script_path) + + # set /etc/hosts for each node + ssh_command = "> /tmp/tmp_hosts" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "cp /etc/hosts /tmp/tmp_hosts" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "sed -i '$a{}' /tmp/tmp_hosts".format( + hosts_str) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "sudo mv /tmp/tmp_hosts /etc/hosts;" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + + # execute install k8s command on VM + if proxy.get('http_proxy') and proxy.get('https_proxy'): + if vm_dict.get('ssh', {}).get('nic_ip') == \ + master_ssh_ips_str.split(',')[0]: + ssh_command = \ + "export http_proxy={http_proxy};" \ + "export https_proxy={https_proxy};" \ + "export no_proxy={no_proxy};" \ + "bash /tmp/install_k8s_cluster.sh " \ + "-m {master_ip} -i {cluster_ip} " \ + "-p {pod_cidr} -a {k8s_cluster_cidr}".format( + http_proxy=proxy.get('http_proxy'), + https_proxy=proxy.get('https_proxy'), + no_proxy=proxy.get('no_proxy'), + master_ip=master_ssh_ips_str, + cluster_ip=k8s_cluster.get("ipaddr"), + pod_cidr=k8s_cluster.get('pod_cidr'), + k8s_cluster_cidr=k8s_cluster.get('cluster_cidr')) + else: + ssh_command = \ + "export http_proxy={http_proxy};" \ + "export https_proxy={https_proxy};" \ + "export no_proxy={no_proxy};" \ + "bash /tmp/install_k8s_cluster.sh " \ + "-m {master_ip} -i {cluster_ip} " \ + "-p {pod_cidr} -a {k8s_cluster_cidr} " \ + "-t {kubeadm_token} -s {ssl_ca_cert_hash} " \ + "-k {certificate_key}".format( + http_proxy=proxy.get('http_proxy'), + https_proxy=proxy.get('https_proxy'), + no_proxy=proxy.get('no_proxy'), + master_ip=master_ssh_ips_str, + cluster_ip=k8s_cluster.get("ipaddr"), + pod_cidr=k8s_cluster.get('pod_cidr'), + k8s_cluster_cidr=k8s_cluster.get('cluster_cidr'), + kubeadm_token=kubeadm_token, + ssl_ca_cert_hash=ssl_ca_cert_hash, + certificate_key=certificate_key) + else: + if vm_dict.get('ssh', {}).get('nic_ip') == \ + master_ssh_ips_str.split(',')[0]: + ssh_command = \ + "bash /tmp/install_k8s_cluster.sh " \ + "-m {master_ip} -i {cluster_ip} " \ + "-p {pod_cidr} -a {k8s_cluster_cidr}".format( + master_ip=master_ssh_ips_str, + cluster_ip=k8s_cluster.get("ipaddr"), + pod_cidr=k8s_cluster.get('pod_cidr'), + k8s_cluster_cidr=k8s_cluster.get('cluster_cidr')) + + else: + ssh_command = \ + "bash /tmp/install_k8s_cluster.sh " \ + "-m {master_ip} -i {cluster_ip} " \ + "-p {pod_cidr} -a {k8s_cluster_cidr} " \ + "-t {kubeadm_token} -s {ssl_ca_cert_hash} " \ + "-k {certificate_key}".format( + master_ip=master_ssh_ips_str, + cluster_ip=k8s_cluster.get("ipaddr"), + pod_cidr=k8s_cluster.get('pod_cidr'), + k8s_cluster_cidr=k8s_cluster.get('cluster_cidr'), + kubeadm_token=kubeadm_token, + ssl_ca_cert_hash=ssl_ca_cert_hash, + certificate_key=certificate_key) + results = self._execute_command( + commander, ssh_command, K8S_INSTALL_TIMEOUT, 'install', 0) + + # get install-information from active master node + if vm_dict.get('ssh', {}).get('nic_ip') == \ + master_ssh_ips_str.split(',')[0]: + for result in results: + if 'token:' in result: + kubeadm_token = result.replace( + 'token:', '').replace('\n', '') + if 'server:' in result: + server = result.replace( + 'server:', '').replace('\n', '') + if 'ssl_ca_cert_hash:' in result: + ssl_ca_cert_hash = result.replace( + 'ssl_ca_cert_hash:', '').replace('\n', '') + begin_index = results.index('-----BEGIN CERTIFICATE-----\n') + end_index = results.index('-----END CERTIFICATE-----\n') + ssl_ca_cert = ''.join(results[begin_index: end_index + 1]) + commander = cmd_executer.RemoteCommandExecutor( + user=user, password=password, host=host, + timeout=K8S_CMD_TIMEOUT) + ssh_command = "kubectl create -f /tmp/create_admin_token.yaml" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + time.sleep(30) + ssh_command = "kubectl get secret -n kube-system " \ + "| grep '^admin-token' " \ + "| awk '{print $1}' " \ + "| xargs -i kubectl describe secret {} " \ + "-n kube-system" \ + "| grep 'token:' | awk '{print $2}'" + bearer_token = self._execute_command( + commander, ssh_command, + K8S_CMD_TIMEOUT, 'common', 0)[0].replace('\n', '') + commander.close_session() + + # install worker node + for vm_dict in worker_vm_dict_list: + user = vm_dict.get('ssh', {}).get('username') + password = vm_dict.get('ssh', {}).get('password') + host = vm_dict.get('ssh', {}).get('ipaddr') + nic_ip = vm_dict.get('ssh', {}).get('nic_ip') + cluster_ip = master_vm_dict_list[0].get( + 'k8s_cluster', {}).get('ipaddr') + commander = self._init_commander_and_send_install_scripts( + user, password, host, + vnf_package_path, script_path) + + # set /etc/hosts for each node + ssh_command = "> /tmp/tmp_hosts" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "cp /etc/hosts /tmp/tmp_hosts" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "sed -i '$a{}' /tmp/tmp_hosts".format( + hosts_str) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "sudo mv /tmp/tmp_hosts /etc/hosts;" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + + # execute install k8s command on VM + self._install_worker_node( + commander, proxy, ha_flag, nic_ip, + cluster_ip, kubeadm_token, ssl_ca_cert_hash) + commander.close_session() + + return server, bearer_token, ssl_ca_cert, project_name + + def _check_values(self, additional_param): + for key, value in additional_param.items(): + if 'master_node' == key or 'worker_node' == key: + if not value.get('username'): + LOG.error('The username in the ' + 'additionalParams is invalid.') + raise exceptions.MgmtDriverNotFound(param='username') + if not value.get('password'): + LOG.error('The password in the ' + 'additionalParams is invalid.') + raise exceptions.MgmtDriverNotFound(param='password') + if not value.get('ssh_cp_name'): + LOG.error('The ssh_cp_name in the ' + 'additionalParams is invalid.') + raise exceptions.MgmtDriverNotFound( + param='ssh_cp_name') + if 'master_node' == key: + if not value.get('cluster_cp_name'): + LOG.error('The cluster_cp_name in the ' + 'additionalParams is invalid.') + raise exceptions.MgmtDriverNotFound( + param='cluster_cp_name') + + def _get_vim_connection_info(self, context, instantiate_vnf_req): + + vim_info = vnflcm_utils._get_vim(context, + instantiate_vnf_req.vim_connection_info) + + vim_connection_info = objects.VimConnectionInfo.obj_from_primitive( + vim_info, context) + + return vim_connection_info + + def instantiate_end(self, context, vnf_instance, + instantiate_vnf_request, grant, + grant_request, **kwargs): + # get vim_connect_info + if hasattr(instantiate_vnf_request, 'vim_connection_info'): + vim_connection_info = self._get_vim_connection_info( + context, instantiate_vnf_request) + else: + # In case of healing entire Kubernetes cluster, 'heal_end' method + # will call this method using 'vnf_instance.instantiated_vnf_info' + # as the 'instantiate_vnf_request', but there is no + # 'vim_connection_info' in it, so we should get + # 'vim_connection_info' from 'vnf_instance'. + vim_connection_info = self._get_vim_connection_info( + context, vnf_instance) + additional_param = instantiate_vnf_request.additional_params.get( + 'k8s_cluster_installation_param', {}) + script_path = additional_param.get('script_path') + vim_name = additional_param.get('vim_name') + master_node = additional_param.get('master_node', {}) + worker_node = additional_param.get('worker_node', {}) + proxy = additional_param.get('proxy', {}) + # check script_path + if not script_path: + LOG.error('The script_path in the ' + 'additionalParams is invalid.') + raise exceptions.MgmtDriverNotFound(param='script_path') + # get pod_cidr and cluster_cidr + pod_cidr = additional_param.get('master_node', {}).get('pod_cidr') + cluster_cidr = additional_param.get( + 'master_node', {}).get('cluster_cidr') + # check pod_cidr's value + if pod_cidr: + if not self._check_is_cidr(pod_cidr): + LOG.error('The pod_cidr in the ' + 'additionalParams is invalid.') + raise exceptions.MgmtDriverParamInvalid(param='pod_cidr') + else: + additional_param['master_node']['pod_cidr'] = '192.168.0.0/16' + # check cluster_cidr's value + if cluster_cidr: + if not self._check_is_cidr(cluster_cidr): + LOG.error('The cluster_cidr in the ' + 'additionalParams is invalid.') + raise exceptions.MgmtDriverParamInvalid(param='cluster_cidr') + else: + additional_param['master_node']['cluster_cidr'] = '10.96.0.0/12' + # get stack_id + nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id + # set vim_name + if not vim_name: + vim_name = 'kubernetes_vim_' + vnf_instance.id + + # get vm list + access_info = vim_connection_info.access_info + master_vm_dict_list = \ + self._get_install_info_for_k8s_node( + nest_stack_id, master_node, + instantiate_vnf_request.additional_params, + 'master', access_info) + worker_vm_dict_list = self._get_install_info_for_k8s_node( + nest_stack_id, worker_node, + instantiate_vnf_request.additional_params, 'worker', access_info) + server, bearer_token, ssl_ca_cert, project_name = \ + self._install_k8s_cluster(context, vnf_instance, + proxy, script_path, master_vm_dict_list, + worker_vm_dict_list) + + # register vim with kubernetes cluster info + self._create_vim(context, vnf_instance, server, + bearer_token, ssl_ca_cert, vim_name, project_name, + master_vm_dict_list) + + def terminate_start(self, context, vnf_instance, + terminate_vnf_request, grant, + grant_request, **kwargs): + pass + + def _get_vim_by_name(self, context, k8s_vim_name): + common_db_api = CommonDbMixin() + result = common_db_api._get_by_name( + self, context, nfvo_db.Vim, k8s_vim_name) + + if not result: + LOG.debug("Cannot find kubernetes " + "vim with name: {}".format(k8s_vim_name)) + + return result + + def terminate_end(self, context, vnf_instance, + terminate_vnf_request, grant, + grant_request, **kwargs): + k8s_params = vnf_instance.instantiated_vnf_info.additional_params.get( + 'k8s_cluster_installation_param', {}) + k8s_vim_name = k8s_params.get('vim_name') + if not k8s_vim_name: + k8s_vim_name = 'kubernetes_vim_' + vnf_instance.id + + vim_info = self._get_vim_by_name( + context, k8s_vim_name) + if vim_info: + nfvo_plugin = NfvoPlugin() + nfvo_plugin.delete_vim(context, vim_info.id) + + def _get_username_pwd(self, vnf_request, vnf_instance, role): + # heal and scale: get user pwd + kwargs_additional_params = vnf_request.additional_params + additionalParams = \ + vnf_instance.instantiated_vnf_info.additional_params + if role == 'master': + if kwargs_additional_params and \ + kwargs_additional_params.get('master_node_username') and \ + kwargs_additional_params.get('master_node_password'): + username = \ + kwargs_additional_params.get('master_node_username') + password = \ + kwargs_additional_params.get('master_node_password') + else: + username = \ + additionalParams.get( + 'k8s_cluster_installation_param').get( + 'master_node').get('username') + password = \ + additionalParams.get( + 'k8s_cluster_installation_param').get( + 'master_node').get('password') + else: + if kwargs_additional_params and \ + kwargs_additional_params.get('worker_node_username') and \ + kwargs_additional_params.get('worker_node_username'): + username = \ + kwargs_additional_params.get('worker_node_username') + password = \ + kwargs_additional_params.get('worker_node_password') + else: + username = \ + additionalParams.get( + 'k8s_cluster_installation_param').get( + 'worker_node').get('username') + password = \ + additionalParams.get( + 'k8s_cluster_installation_param').get( + 'worker_node').get('password') + return username, password + + def _get_resources_list(self, heatclient, stack_id, resource_name): + # scale: get resources list + physical_resource_id = heatclient.resources.get( + stack_id=stack_id, + resource_name=resource_name).physical_resource_id + resources_list = heatclient.resources.list( + stack_id=physical_resource_id) + return resources_list + + def _get_host_resource_list(self, heatclient, stack_id, node): + # scale: get host resource list + host_ips_list = [] + node_resource_name = node.get('aspect_id') + node_group_resource_name = node.get('aspect_id') + '_group' + if node_resource_name: + resources_list = self._get_resources_list( + heatclient, stack_id, node_group_resource_name) + for resources in resources_list: + resource_info = heatclient.resource_get( + resources.physical_resource_id, + node.get('ssh_cp_name')) + if resource_info.attributes.get('floating_ip_address'): + self.FLOATING_IP_FLAG = True + ssh_master_ip = resource_info.attributes.get( + 'floating_ip_address') + else: + ssh_master_ip = resource_info.attributes.get( + 'fixed_ips')[0].get('ip_address') + host_ips_list.append(ssh_master_ip) + else: + master_ip = heatclient.resource_get( + stack_id, node.get('ssh_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + host_ips_list.append(master_ip) + return host_ips_list + + def _connect_ssh_scale(self, master_ip_list, master_username, + master_password): + for master_ip in master_ip_list: + retry = 4 + while retry > 0: + try: + commander = cmd_executer.RemoteCommandExecutor( + user=master_username, password=master_password, + host=master_ip, + timeout=K8S_CMD_TIMEOUT) + return commander, master_ip + except (exceptions.NotAuthorized, paramiko.SSHException, + paramiko.ssh_exception.NoValidConnectionsError) as e: + LOG.debug(e) + retry -= 1 + time.sleep(30) + if master_ip == master_ip_list[-1]: + LOG.error('Failed to execute remote command.') + raise exceptions.MgmtDriverRemoteCommandError() + + def evacuate_wait(self, commander, daemonset_content): + # scale: evacuate wait + wait_flag = True + retry_count = 20 + while wait_flag and retry_count > 0: + if daemonset_content.get('items'): + ssh_command = "kubectl get pods --all-namespaces -o json" + result = self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3) + pods_list = json.loads(''.join(result)).get('items') + pods_names = [pod.get('metadata', {}).get('name') + for pod in pods_list] + for daemonset in daemonset_content.get('items'): + daemonset_name = daemonset.get('metadata', {}).get('name') + if daemonset_name in pods_names and \ + 'calico-node' not in daemonset_name and \ + 'kube-proxy' not in daemonset_name: + break + else: + wait_flag = False + else: + break + if not wait_flag: + break + time.sleep(15) + retry_count -= 1 + + def _delete_scale_in_worker( + self, worker_node, kwargs, heatclient, stack_id, + commander): + # scale: get host name + scale_worker_nic_ips = [] + normal_worker_ssh_ips = [] + worker_host_names = [] + scale_name_list = kwargs.get('scale_name_list') + physical_resource_id = heatclient.resource_get( + stack_id, + kwargs.get('scale_vnf_request', {}).aspect_id + '_group') \ + .physical_resource_id + worker_resource_list = heatclient.resource_get_list( + physical_resource_id) + for worker_resource in worker_resource_list: + worker_cp_resource = heatclient.resource_get( + worker_resource.physical_resource_id, + worker_node.get('nic_cp_name')) + if worker_resource.resource_name in scale_name_list: + scale_worker_ip = worker_cp_resource.attributes.get( + 'fixed_ips')[0].get('ip_address') + scale_worker_nic_ips.append(scale_worker_ip) + worker_host_name = \ + 'worker' + scale_worker_ip.split('.')[-1] + worker_host_names.append(worker_host_name) + else: + normal_worker_ssh_cp_resource = heatclient.resource_get( + worker_resource.physical_resource_id, + worker_node.get('ssh_cp_name')) + if normal_worker_ssh_cp_resource.attributes.get( + 'floating_ip_address'): + normal_worker_ssh_ips.append( + normal_worker_ssh_cp_resource.attributes.get( + 'floating_ip_address')) + else: + normal_worker_ssh_ips.append( + normal_worker_ssh_cp_resource.attributes.get( + 'fixed_ips')[0].get('ip_address')) + + for worker_host_name in worker_host_names: + ssh_command = "kubectl get pods --field-selector=spec." \ + "nodeName={} --all-namespaces " \ + "-o json".format(worker_host_name) + result = self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3) + daemonset_content_str = ''.join(result) + daemonset_content = json.loads( + daemonset_content_str) + ssh_command = \ + "kubectl drain {resource} --ignore-daemonsets " \ + "--timeout={k8s_cmd_timeout}s".format( + resource=worker_host_name, + k8s_cmd_timeout=K8S_CMD_TIMEOUT) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'drain', 3) + # evacuate_wait() + # input: resource, daemonset_content + self.evacuate_wait(commander, daemonset_content) + ssh_command = "kubectl delete node {}".format(worker_host_name) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3) + return scale_worker_nic_ips, normal_worker_ssh_ips + + def _set_node_ip_in_hosts(self, commander, + type, ips=None, hosts_str=None): + ssh_command = "> /tmp/tmp_hosts" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "cp /etc/hosts /tmp/tmp_hosts" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + if type == 'scale_in': + for ip in ips: + ssh_command = "sed -i '/{}/d' /tmp/tmp_hosts".format( + ip) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + elif type == 'scale_out' or type == 'heal_end': + ssh_command = "sed -i '$a{}' /tmp/tmp_hosts".format( + hosts_str) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "sudo mv /tmp/tmp_hosts /etc/hosts;" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + + def scale_start(self, context, vnf_instance, + scale_vnf_request, grant, + grant_request, **kwargs): + if scale_vnf_request.type == 'SCALE_IN': + vim_connection_info = \ + self._get_vim_connection_info(context, vnf_instance) + + kwargs['scale_vnf_request'] = scale_vnf_request + heatclient = hc.HeatClient(vim_connection_info.access_info) + additionalParams = \ + vnf_instance.instantiated_vnf_info.additional_params + master_username, master_password = self._get_username_pwd( + scale_vnf_request, vnf_instance, 'master') + worker_username, worker_password = self._get_username_pwd( + scale_vnf_request, vnf_instance, 'worker') + stack_id = vnf_instance.instantiated_vnf_info.instance_id + master_node = \ + additionalParams.get('k8s_cluster_installation_param').get( + 'master_node') + worker_node = \ + additionalParams.get('k8s_cluster_installation_param').get( + 'worker_node') + master_ip_list = self._get_host_resource_list( + heatclient, stack_id, master_node) + commander, master_ip = self._connect_ssh_scale( + master_ip_list, master_username, + master_password) + + scale_worker_nic_ips, normal_worker_ssh_ips = \ + self._delete_scale_in_worker( + worker_node, kwargs, heatclient, stack_id, commander) + commander.close_session() + + # modify /etc/hosts/ on each node + for master_ip in master_ip_list: + commander = self._init_commander_and_send_install_scripts( + master_username, master_password, master_ip) + self._set_node_ip_in_hosts( + commander, 'scale_in', scale_worker_nic_ips) + commander.close_session() + for worker_ip in normal_worker_ssh_ips: + commander = self._init_commander_and_send_install_scripts( + worker_username, worker_password, worker_ip) + self._set_node_ip_in_hosts( + commander, 'scale_in', scale_worker_nic_ips) + commander.close_session() + else: + pass + + def _get_worker_info(self, worker_node, worker_resource_list, + heatclient, scale_out_id_list): + normal_ssh_worker_ip_list = [] + normal_nic_worker_ip_list = [] + add_worker_ssh_ip_list = [] + add_worker_nic_ip_list = [] + for worker_resource in worker_resource_list: + if self.FLOATING_IP_FLAG: + ssh_ip = heatclient.resources.get( + stack_id=worker_resource.physical_resource_id, + resource_name=worker_node.get('ssh_cp_name')). \ + attributes.get('floating_ip_address') + else: + ssh_ip = heatclient.resources.get( + stack_id=worker_resource.physical_resource_id, + resource_name=worker_node.get('ssh_cp_name')). \ + attributes.get( + 'fixed_ips')[0].get('ip_address') + nic_ip = heatclient.resources.get( + stack_id=worker_resource.physical_resource_id, + resource_name=worker_node.get('nic_cp_name')). \ + attributes.get('fixed_ips')[0].get('ip_address') + + if worker_resource.physical_resource_id in scale_out_id_list: + add_worker_ssh_ip_list.append(ssh_ip) + add_worker_nic_ip_list.append(nic_ip) + elif worker_resource.physical_resource_id not in \ + scale_out_id_list: + normal_ssh_worker_ip_list.append(ssh_ip) + normal_nic_worker_ip_list.append(nic_ip) + return (add_worker_ssh_ip_list, add_worker_nic_ip_list, + normal_ssh_worker_ip_list, normal_nic_worker_ip_list) + + def _get_master_info( + self, master_resource_list, heatclient, master_node): + master_ssh_ip_list = [] + master_nic_ip_list = [] + for master_resource in master_resource_list: + master_host_reource_info = heatclient.resource_get( + master_resource.physical_resource_id, + master_node.get('ssh_cp_name')) + if master_host_reource_info.attributes.get('floating_ip_address'): + self.FLOATING_IP_FLAG = True + master_ssh_ip = master_host_reource_info.attributes.get( + 'floating_ip_address') + else: + master_ssh_ip = master_host_reource_info.attributes. \ + get('fixed_ips')[0].get('ip_address') + master_nic_ip = heatclient.resource_get( + master_resource.physical_resource_id, + master_node.get('nic_cp_name')).attributes. \ + get('fixed_ips')[0].get('ip_address') + master_ssh_ip_list.append(master_ssh_ip) + master_nic_ip_list.append(master_nic_ip) + return master_ssh_ip_list, master_nic_ip_list + + def scale_end(self, context, vnf_instance, + scale_vnf_request, grant, + grant_request, **kwargs): + if scale_vnf_request.type == 'SCALE_OUT': + k8s_cluster_installation_param = \ + vnf_instance.instantiated_vnf_info. \ + additional_params.get('k8s_cluster_installation_param') + vnf_package_path = vnflcm_utils._get_vnf_package_path( + context, vnf_instance.vnfd_id) + nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id + resource_name = scale_vnf_request.aspect_id + '_group' + vim_connection_info = \ + self._get_vim_connection_info(context, vnf_instance) + heatclient = hc.HeatClient(vim_connection_info.access_info) + scale_out_id_list = kwargs.get('scale_out_id_list') + + # get master_ip + master_ssh_ip_list = [] + master_nic_ip_list = [] + master_node = k8s_cluster_installation_param.get('master_node') + + # The VM is created with SOL001 TOSCA-based VNFD and + # not use policies. At present, scale operation dose + # not support this case. + if not master_node.get('aspect_id'): + master_ssh_ip_list.append(heatclient.resources.get( + stack_id=nest_stack_id, + resource_name=master_node.get( + 'ssh_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address')) + master_nic_ip_list.append(heatclient.resources.get( + stack_id=nest_stack_id, + resource_name=master_node.get( + 'nic_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address')) + cluster_ip = self._get_cluster_ip( + heatclient, 1, master_node, None, nest_stack_id) + + # The VM is created with UserData format + else: + master_resource_list = self._get_resources_list( + heatclient, nest_stack_id, master_node.get( + 'aspect_id') + '_group') + master_ssh_ip_list, master_nic_ip_list = \ + self._get_master_info(master_resource_list, + heatclient, master_node) + resource_num = len(master_resource_list) + cluster_ip = self._get_cluster_ip( + heatclient, resource_num, master_node, + master_resource_list[0].physical_resource_id, + nest_stack_id) + + # get scale out worker_ips + worker_resource_list = self._get_resources_list( + heatclient, nest_stack_id, resource_name) + worker_node = \ + k8s_cluster_installation_param['worker_node'] + (add_worker_ssh_ip_list, add_worker_nic_ip_list, + normal_ssh_worker_ip_list, normal_nic_worker_ip_list) = \ + self._get_worker_info( + worker_node, worker_resource_list, + heatclient, scale_out_id_list) + + # get kubeadm_token from one of master node + master_username, master_password = self._get_username_pwd( + scale_vnf_request, vnf_instance, 'master') + worker_username, worker_password = self._get_username_pwd( + scale_vnf_request, vnf_instance, 'worker') + commander, master_ip = self._connect_ssh_scale( + master_ssh_ip_list, master_username, + master_password) + ssh_command = "kubeadm token create;" + kubeadm_token = self._execute_command( + commander, ssh_command, + K8S_CMD_TIMEOUT, 'common', 3)[0].replace('\n', '') + + # get hash from one of master node + ssh_command = "openssl x509 -pubkey -in " \ + "/etc/kubernetes/pki/ca.crt | openssl rsa " \ + "-pubin -outform der 2>/dev/null | " \ + "openssl dgst -sha256 -hex | sed 's/^.* //';" + ssl_ca_cert_hash = self._execute_command( + commander, ssh_command, + K8S_CMD_TIMEOUT, 'common', 3)[0].replace('\n', '') + commander.close_session() + # set no_proxy + proxy = k8s_cluster_installation_param.get('proxy') + vm_cidr_list = self._get_vm_cidr_list( + master_nic_ip_list[0], proxy) + pod_cidr = master_node.get('pod_cidr', '192.168.0.0/16') + cluster_cidr = master_node.get("cluster_cidr", '10.96.0.0/12') + if proxy.get("http_proxy") and proxy.get("https_proxy"): + no_proxy = ','.join(list(filter(None, [ + proxy.get("no_proxy"), pod_cidr, cluster_cidr, + "127.0.0.1", "localhost", + cluster_ip] + vm_cidr_list))) + proxy['no_proxy'] = no_proxy + + # set /etc/hosts + master_hosts = [] + add_worker_hosts = [] + normal_worker_hosts = [] + for master_ip in master_nic_ip_list: + master_ip_str = \ + master_ip + ' master' + master_ip.split('.')[-1] + master_hosts.append(master_ip_str) + for worker_ip in add_worker_nic_ip_list: + worker_ip_str = \ + worker_ip + ' worker' + worker_ip.split('.')[-1] + add_worker_hosts.append(worker_ip_str) + for worker_ip in normal_nic_worker_ip_list: + worker_ip_str = \ + worker_ip + ' worker' + worker_ip.split('.')[-1] + normal_worker_hosts.append(worker_ip_str) + + ha_flag = True + if len(master_nic_ip_list) == 1: + ha_flag = False + for worker_ip in add_worker_ssh_ip_list: + script_path = \ + k8s_cluster_installation_param.get('script_path') + commander = self._init_commander_and_send_install_scripts( + worker_username, worker_password, + worker_ip, vnf_package_path, script_path) + hosts_str = '\\n'.join(master_hosts + add_worker_hosts + + normal_worker_hosts) + self._set_node_ip_in_hosts(commander, + 'scale_out', hosts_str=hosts_str) + worker_nic_ip = add_worker_nic_ip_list[ + add_worker_ssh_ip_list.index(worker_ip)] + self._install_worker_node( + commander, proxy, ha_flag, worker_nic_ip, + cluster_ip, kubeadm_token, ssl_ca_cert_hash) + commander.close_session() + + hosts_str = '\\n'.join(add_worker_hosts) + # set /etc/hosts on master node and normal worker node + for master_ip in master_ssh_ip_list: + commander = self._init_commander_and_send_install_scripts( + worker_username, worker_password, master_ip) + self._set_node_ip_in_hosts( + commander, 'scale_out', hosts_str=hosts_str) + commander.close_session() + for worker_ip in normal_ssh_worker_ip_list: + commander = self._init_commander_and_send_install_scripts( + worker_node.get('username'), worker_node.get('password'), + worker_ip) + self._set_node_ip_in_hosts( + commander, 'scale_out', hosts_str=hosts_str) + commander.close_session() + else: + pass + + def _get_vnfc_resource_id(self, vnfc_resource_info, vnfc_instance_id): + for vnfc_resource in vnfc_resource_info: + if vnfc_resource.id == vnfc_instance_id: + return vnfc_resource + else: + return None + + def _get_master_node_name( + self, heatclient, master_resource_list, + target_physical_resource_ids, master_node): + fixed_master_infos = {} + not_fixed_master_infos = {} + flag_master = False + for master_resource in master_resource_list: + master_resource_infos = heatclient.resources.list( + master_resource.physical_resource_id) + master_host_reource_info = heatclient.resource_get( + master_resource.physical_resource_id, + master_node.get('ssh_cp_name')) + for master_resource_info in master_resource_infos: + if master_resource_info.resource_type == \ + 'OS::Nova::Server' and \ + master_resource_info.physical_resource_id in \ + target_physical_resource_ids: + flag_master = True + if master_host_reource_info.attributes.get( + 'floating_ip_address'): + self.FLOATING_IP_FLAG = True + master_ssh_ip = master_host_reource_info.attributes.\ + get('floating_ip_address') + else: + master_ssh_ip = heatclient.resource_get( + master_resource.physical_resource_id, + master_node.get('ssh_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + master_nic_ip = heatclient.resource_get( + master_resource.physical_resource_id, + master_node.get('nic_cp_name')).attributes. \ + get('fixed_ips')[0].get('ip_address') + master_name = 'master' + master_nic_ip.split('.')[-1] + fixed_master_infos[master_name] = {} + fixed_master_infos[master_name]['master_ssh_ip'] = \ + master_ssh_ip + fixed_master_infos[master_name]['master_nic_ip'] = \ + master_nic_ip + elif master_resource_info.resource_type == \ + 'OS::Nova::Server' and \ + master_resource_info.physical_resource_id not in \ + target_physical_resource_ids: + if master_host_reource_info.attributes.get( + 'floating_ip_address'): + self.FLOATING_IP_FLAG = True + master_ssh_ip = master_host_reource_info.attributes.\ + get('floating_ip_address') + else: + master_ssh_ip = heatclient.resource_get( + master_resource.physical_resource_id, + master_node.get('ssh_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + master_nic_ip = heatclient.resource_get( + master_resource.physical_resource_id, + master_node.get('nic_cp_name')).attributes. \ + get('fixed_ips')[0].get('ip_address') + master_name = 'master' + master_nic_ip.split('.')[-1] + not_fixed_master_infos[master_name] = {} + not_fixed_master_infos[master_name]['master_ssh_ip'] = \ + master_ssh_ip + not_fixed_master_infos[master_name]['master_nic_ip'] = \ + master_nic_ip + if flag_master and len(master_resource_list) == 1: + LOG.error("An error occurred in MgmtDriver:{" + "The number of Master-Nodes is 1 " + "or less. If you want to heal, " + "please respawn.}") + raise exceptions.MgmtDriverOtherError( + error_message="An error occurred in MgmtDriver:{" + "The number of Master-Nodes is 1 " + "or less. If you want to heal, " + "please respawn.}") + return flag_master, fixed_master_infos, not_fixed_master_infos + + def _get_worker_node_name( + self, heatclient, worker_resource_list, + target_physical_resource_ids, worker_node): + fixed_worker_infos = {} + not_fixed_worker_infos = {} + flag_worker = False + for worker_resource in worker_resource_list: + worker_resource_infos = heatclient.resources.list( + worker_resource.physical_resource_id) + worker_host_reource_info = heatclient.resource_get( + worker_resource.physical_resource_id, + worker_node.get('ssh_cp_name')) + for worker_resource_info in worker_resource_infos: + if worker_resource_info.resource_type == \ + 'OS::Nova::Server' and \ + worker_resource_info.physical_resource_id in \ + target_physical_resource_ids: + flag_worker = True + if worker_host_reource_info.attributes.get( + 'floating_ip_address'): + self.FLOATING_IP_FLAG = True + worker_ssh_ip = worker_host_reource_info.attributes.\ + get('floating_ip_address') + else: + worker_ssh_ip = heatclient.resource_get( + worker_resource.physical_resource_id, + worker_node.get('ssh_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + worker_nic_ip = heatclient.resource_get( + worker_resource.physical_resource_id, + worker_node.get('nic_cp_name')).attributes. \ + get('fixed_ips')[0].get('ip_address') + worker_name = 'worker' + worker_nic_ip.split('.')[-1] + fixed_worker_infos[worker_name] = {} + fixed_worker_infos[worker_name]['worker_ssh_ip'] = \ + worker_ssh_ip + fixed_worker_infos[worker_name]['worker_nic_ip'] = \ + worker_nic_ip + elif worker_resource_info.resource_type == \ + 'OS::Nova::Server' and \ + worker_resource_info.physical_resource_id not in \ + target_physical_resource_ids: + if worker_host_reource_info.attributes.get( + 'floating_ip_address'): + self.FLOATING_IP_FLAG = True + worker_ssh_ip = worker_host_reource_info.attributes.\ + get('floating_ip_address') + else: + worker_ssh_ip = heatclient.resource_get( + worker_resource.physical_resource_id, + worker_node.get('ssh_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + worker_nic_ip = heatclient.resource_get( + worker_resource.physical_resource_id, + worker_node.get('nic_cp_name')).attributes. \ + get('fixed_ips')[0].get('ip_address') + worker_name = 'worker' + worker_nic_ip.split('.')[-1] + not_fixed_worker_infos[worker_name] = {} + not_fixed_worker_infos[worker_name]['worker_ssh_ip'] = \ + worker_ssh_ip + not_fixed_worker_infos[worker_name]['worker_nic_ip'] = \ + worker_nic_ip + return flag_worker, fixed_worker_infos, not_fixed_worker_infos + + def _get_worker_ssh_ip( + self, heatclient, stack_id, master_resource_name, + worker_resource_name, target_physical_resource_ids): + flag_worker = False + fixed_worker_infos = dict() + not_fixed_master_infos = dict() + stack_resource_list = heatclient.resources.list(stack_id) + worker_ip = heatclient.resource_get( + stack_id, worker_resource_name).attributes.get( + 'fixed_ips')[0].get('ip_address') + master_ip = heatclient.resource_get( + stack_id, master_resource_name).attributes.get( + 'fixed_ips')[0].get('ip_address') + master_name = 'master' + master_ip.split('.')[-1] + for stack_resource in stack_resource_list: + if stack_resource.resource_type == 'OS::Nova::Server': + current_ip_list = [] + current_address = heatclient.resource_get( + stack_id, stack_resource.resource_name).attributes.get( + 'addresses', {}) + for network, network_info in current_address.items(): + for network_ip_info in network_info: + current_ip_list.append(network_ip_info.get('addr')) + + if stack_resource.physical_resource_id in \ + target_physical_resource_ids and \ + master_ip in current_ip_list: + LOG.error("An error occurred in MgmtDriver:{" + "The number of Master-Nodes is 1 " + "or less. If you want to heal, " + "please respawn.}") + raise exceptions.MgmtDriverOtherError( + error_message="An error occurred in MgmtDriver:{" + "The number of Master-Nodes is 1 " + "or less. If you want to heal, " + "please respawn.}") + elif stack_resource.physical_resource_id not in \ + target_physical_resource_ids and \ + master_ip in current_ip_list: + not_fixed_master_infos.update( + {master_name: {'master_ssh_ip': master_ip}}) + not_fixed_master_infos[master_name].update( + {'master_nic_ip': master_ip}) + elif stack_resource.physical_resource_id in \ + target_physical_resource_ids and \ + worker_ip in current_ip_list: + worker_name = 'worker' + worker_ip.split('.')[-1] + fixed_worker_infos.update( + {worker_name: {'worker_ssh_ip': worker_ip}}) + fixed_worker_infos[worker_name].update( + {'worker_nic_ip': worker_ip}) + flag_worker = True + return flag_worker, fixed_worker_infos, not_fixed_master_infos, {} + + def _delete_master_node( + self, fixed_master_infos, not_fixed_master_infos, + master_username, master_password): + not_fixed_master_ssh_ips = [ + master_ips.get('master_ssh_ip') + for master_ips in not_fixed_master_infos.values()] + + for fixed_master_name in fixed_master_infos.keys(): + # delete heal master node info from haproxy.cfg + # on other master node + for not_fixed_master_ssh_ip in not_fixed_master_ssh_ips: + commander = cmd_executer.RemoteCommandExecutor( + user=master_username, password=master_password, + host=not_fixed_master_ssh_ip, + timeout=K8S_CMD_TIMEOUT) + master_ssh_ip = not_fixed_master_ssh_ip + ssh_command = "sudo sed -i '/server {}/d' " \ + "/etc/haproxy/haproxy.cfg;" \ + "sudo service haproxy restart;" \ + "".format(fixed_master_name) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3) + + # delete master node + ssh_command = "kubectl delete node " + \ + fixed_master_name + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3) + connect_master_name = '' + for not_master_name, not_master_ip_info in \ + not_fixed_master_infos.items(): + if not_master_ip_info['master_ssh_ip'] == master_ssh_ip: + connect_master_name = not_master_name + ssh_command = \ + "kubectl get pods -n kube-system | " \ + "grep %(connect_master_name)s | " \ + "awk '{print $1}'" \ + "" % {'connect_master_name': connect_master_name} + etcd_name = self._execute_command( + commander, ssh_command, + K8S_CMD_TIMEOUT, 'common', 3)[0].replace('\n', '') + ssh_command = \ + "kubectl exec -i %(etcd_name)s -n kube-system " \ + "-- sh<< EOF\n" \ + "etcdctl --endpoints 127.0.0.1:2379 " \ + "--cacert /etc/kubernetes/pki/etcd/ca.crt " \ + "--cert /etc/kubernetes/pki/etcd/server.crt " \ + "--key /etc/kubernetes/pki/etcd/server.key " \ + "member list\nEOF" \ + "" % {'etcd_name': etcd_name} + results = self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'etcd', 3) + etcd_id = [res for res in results + if fixed_master_name + in res][0].split(',')[0] + ssh_command = \ + "kubectl exec -i %(etcd_name)s -n kube-system " \ + "-- sh<< EOF\n" \ + "etcdctl --endpoints 127.0.0.1:2379 " \ + "--cacert /etc/kubernetes/pki/etcd/ca.crt " \ + "--cert /etc/kubernetes/pki/etcd/server.crt " \ + "--key /etc/kubernetes/pki/etcd/server.key " \ + "member remove %(etcd_id)s\nEOF" % \ + {'etcd_name': etcd_name, "etcd_id": etcd_id} + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'etcd', 3) + commander.close_session() + + def _delete_worker_node( + self, fixed_worker_infos, not_fixed_master_infos, + master_username, master_password): + not_fixed_master_ssh_ips = [ + master_ips.get('master_ssh_ip') + for master_ips in not_fixed_master_infos.values()] + for fixed_worker_name in fixed_worker_infos.keys(): + commander, master_ssh_ip = self._connect_ssh_scale( + not_fixed_master_ssh_ips, master_username, + master_password) + ssh_command = "kubectl get pods --field-selector=" \ + "spec.nodeName={} -o json" \ + "".format(fixed_worker_name) + result = self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3) + worker_node_pod_info_str = ''.join(result) + worker_node_pod_info = json.loads( + worker_node_pod_info_str) + ssh_command = "kubectl drain {} " \ + "--ignore-daemonsets " \ + "--timeout={}s" \ + "".format(fixed_worker_name, + K8S_CMD_TIMEOUT) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'drain', 3) + self.evacuate_wait( + commander, worker_node_pod_info) + ssh_command = "kubectl delete node {}".format( + fixed_worker_name) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3) + commander.close_session() + + def _delete_node_to_be_healed( + self, heatclient, stack_id, target_physical_resource_ids, + master_username, master_password, worker_resource_name, + master_resource_name, master_node, worker_node): + master_ssh_cp_name = master_node.get('nic_cp_name') + flag_master = False + flag_worker = False + if master_resource_name == master_ssh_cp_name: + (flag_worker, fixed_worker_infos, not_fixed_master_infos, + not_fixed_worker_infos) = \ + self._get_worker_ssh_ip( + heatclient, stack_id, master_resource_name, + worker_resource_name, target_physical_resource_ids) + else: + master_resource_list = self._get_resources_list( + heatclient, stack_id, master_resource_name) + flag_master, fixed_master_infos, not_fixed_master_infos = \ + self._get_master_node_name( + heatclient, master_resource_list, + target_physical_resource_ids, + master_node) + if len(master_resource_list) == 1 and flag_master: + LOG.error("An error occurred in MgmtDriver:{" + "The number of Master-Nodes is 1 " + "or less. If you want to heal, " + "please respawn.}") + raise exceptions.MgmtDriverOtherError( + error_message="An error occurred in MgmtDriver:{" + "The number of Master-Nodes is 1 " + "or less. If you want to heal, " + "please respawn.}") + worker_resource_list = self._get_resources_list( + heatclient, stack_id, worker_resource_name) + flag_worker, fixed_worker_infos, not_fixed_worker_infos = \ + self._get_worker_node_name( + heatclient, worker_resource_list, + target_physical_resource_ids, + worker_node) + if flag_master: + self._delete_master_node( + fixed_master_infos, not_fixed_master_infos, + master_username, master_password) + if flag_worker: + self._delete_worker_node( + fixed_worker_infos, not_fixed_master_infos, + master_username, master_password) + + def _get_node_resource_name(self, vnf_additional_params, node): + if node.get('aspect_id'): + # in case of Userdata format + if 'lcm-operation-user-data' in vnf_additional_params.keys() and \ + 'lcm-operation-user-data-class' in \ + vnf_additional_params.keys(): + resource_name = node.get('aspect_id') + '_group' + # in case of SOL001 TOSCA-based VNFD with HA master node + else: + resource_name = node.get('aspect_id') + else: + # in case of SOL001 TOSCA-based VNFD with single master node + resource_name = node.get('nic_cp_name') + return resource_name + + def _get_target_physical_resource_ids(self, vnf_instance, + heal_vnf_request): + target_physical_resource_ids = [] + for vnfc_instance_id in heal_vnf_request.vnfc_instance_id: + instantiated_vnf_info = vnf_instance.instantiated_vnf_info + vnfc_resource_info = instantiated_vnf_info.vnfc_resource_info + vnfc_resource = self._get_vnfc_resource_id( + vnfc_resource_info, vnfc_instance_id) + if vnfc_resource: + target_physical_resource_ids.append( + vnfc_resource.compute_resource.resource_id) + + return target_physical_resource_ids + + def heal_start(self, context, vnf_instance, + heal_vnf_request, grant, + grant_request, **kwargs): + stack_id = vnf_instance.instantiated_vnf_info.instance_id + vnf_additional_params = \ + vnf_instance.instantiated_vnf_info.additional_params + master_node = vnf_additional_params.get( + 'k8s_cluster_installation_param', {}).get( + 'master_node', {}) + worker_node = vnf_additional_params.get( + 'k8s_cluster_installation_param', {}).get( + 'worker_node', {}) + master_resource_name = self._get_node_resource_name( + vnf_additional_params, master_node) + worker_resource_name = self._get_node_resource_name( + vnf_additional_params, worker_node) + master_username, master_password = self._get_username_pwd( + heal_vnf_request, vnf_instance, 'master') + vim_connection_info = self._get_vim_connection_info( + context, vnf_instance) + heatclient = hc.HeatClient(vim_connection_info.access_info) + if not heal_vnf_request.vnfc_instance_id: + k8s_params = vnf_additional_params.get( + 'k8s_cluster_installation_param', {}) + k8s_vim_name = k8s_params.get('vim_name') + if not k8s_vim_name: + k8s_vim_name = 'kubernetes_vim_' + vnf_instance.id + k8s_vim_info = self._get_vim_by_name( + context, k8s_vim_name) + if k8s_vim_info: + nfvo_plugin = NfvoPlugin() + nfvo_plugin.delete_vim(context, k8s_vim_info.id) + for vim_info in vnf_instance.vim_connection_info: + if vim_info.vim_id == k8s_vim_info.id: + vnf_instance.vim_connection_info.remove(vim_info) + else: + target_physical_resource_ids = \ + self._get_target_physical_resource_ids( + vnf_instance, heal_vnf_request) + self._delete_node_to_be_healed( + heatclient, stack_id, target_physical_resource_ids, + master_username, master_password, worker_resource_name, + master_resource_name, master_node, worker_node) + + def _fix_master_node( + self, not_fixed_master_infos, hosts_str, + fixed_master_infos, proxy, + master_username, master_password, vnf_package_path, + script_path, cluster_ip, pod_cidr, cluster_cidr, + kubeadm_token, ssl_ca_cert_hash, ha_flag): + not_fixed_master_nic_ips = [ + master_ips.get('master_nic_ip') + for master_ips in not_fixed_master_infos.values()] + not_fixed_master_ssh_ips = [ + master_ips.get('master_ssh_ip') + for master_ips in not_fixed_master_infos.values()] + fixed_master_nic_ips = [ + master_ips.get('master_nic_ip') + for master_ips in fixed_master_infos.values()] + master_ssh_ips_str = ','.join( + not_fixed_master_nic_ips + fixed_master_nic_ips) + for fixed_master_name, fixed_master_info in \ + fixed_master_infos.items(): + commander, master_ip = self._connect_ssh_scale( + not_fixed_master_ssh_ips, + master_username, master_password) + ssh_command = "sudo kubeadm init phase upload-certs " \ + "--upload-certs" + result = self._execute_command( + commander, ssh_command, + K8S_CMD_TIMEOUT, 'certificate_key', 3) + certificate_key = result[-1].replace('\n', '') + commander.close_session() + commander = self._init_commander_and_send_install_scripts( + master_username, master_password, + fixed_master_info.get('master_ssh_ip'), + vnf_package_path, script_path) + self._set_node_ip_in_hosts( + commander, 'heal_end', hosts_str=hosts_str) + if proxy.get('http_proxy') and proxy.get('https_proxy'): + ssh_command = \ + "export http_proxy={http_proxy};" \ + "export https_proxy={https_proxy};" \ + "export no_proxy={no_proxy};" \ + "export ha_flag={ha_flag};" \ + "bash /tmp/install_k8s_cluster.sh " \ + "-m {master_ip} -i {cluster_ip} " \ + "-p {pod_cidr} -a {k8s_cluster_cidr} " \ + "-t {kubeadm_token} -s {ssl_ca_cert_hash} " \ + "-k {certificate_key}".format( + http_proxy=proxy.get('http_proxy'), + https_proxy=proxy.get('https_proxy'), + no_proxy=proxy.get('no_proxy'), + ha_flag=ha_flag, + master_ip=master_ssh_ips_str, + cluster_ip=cluster_ip, + pod_cidr=pod_cidr, + k8s_cluster_cidr=cluster_cidr, + kubeadm_token=kubeadm_token, + ssl_ca_cert_hash=ssl_ca_cert_hash, + certificate_key=certificate_key) + else: + ssh_command = \ + "export ha_flag={ha_flag};" \ + "bash /tmp/install_k8s_cluster.sh " \ + "-m {master_ip} -i {cluster_ip} " \ + "-p {pod_cidr} -a {k8s_cluster_cidr} " \ + "-t {kubeadm_token} -s {ssl_ca_cert_hash} " \ + "-k {certificate_key}".format( + ha_flag=ha_flag, + master_ip=master_ssh_ips_str, + cluster_ip=cluster_ip, + pod_cidr=pod_cidr, + k8s_cluster_cidr=cluster_cidr, + kubeadm_token=kubeadm_token, + ssl_ca_cert_hash=ssl_ca_cert_hash, + certificate_key=certificate_key) + self._execute_command( + commander, ssh_command, K8S_INSTALL_TIMEOUT, 'install', 0) + commander.close_session() + for not_fixed_master_name, not_fixed_master in \ + not_fixed_master_infos.items(): + commander = self._init_commander_and_send_install_scripts( + master_username, master_password, + not_fixed_master.get('master_ssh_ip')) + ssh_command = r"sudo sed -i '/server * check/a\ server " \ + "{} {}:6443 check' " \ + "/etc/haproxy/haproxy.cfg" \ + "".format(fixed_master_name, + fixed_master_info.get( + 'master_nic_ip')) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3) + commander.close_session() + + def _fix_worker_node( + self, fixed_worker_infos, + hosts_str, worker_username, worker_password, + vnf_package_path, script_path, proxy, cluster_ip, + kubeadm_token, ssl_ca_cert_hash, ha_flag): + for fixed_worker_name, fixed_worker in fixed_worker_infos.items(): + commander = self._init_commander_and_send_install_scripts( + worker_username, worker_password, + fixed_worker.get('worker_ssh_ip'), + vnf_package_path, script_path) + self._install_worker_node( + commander, proxy, ha_flag, + fixed_worker.get('worker_nic_ip'), + cluster_ip, kubeadm_token, ssl_ca_cert_hash) + self._set_node_ip_in_hosts( + commander, 'heal_end', hosts_str=hosts_str) + commander.close_session() + + def _heal_and_join_k8s_node( + self, heatclient, stack_id, target_physical_resource_ids, + vnf_additional_params, master_resource_name, master_username, + master_password, vnf_package_path, worker_resource_name, + worker_username, worker_password, cluster_resource_name, + master_node, worker_node): + master_ssh_cp_name = master_node.get('nic_cp_name') + flag_master = False + flag_worker = False + fixed_master_infos = {} + if master_resource_name == master_ssh_cp_name: + (flag_worker, fixed_worker_infos, not_fixed_master_infos, + not_fixed_worker_infos) = \ + self._get_worker_ssh_ip( + heatclient, stack_id, master_resource_name, + worker_resource_name, target_physical_resource_ids) + cluster_ip = heatclient.resource_get( + stack_id, master_node.get('cluster_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + else: + master_resource_list = self._get_resources_list( + heatclient, stack_id, master_resource_name) + flag_master, fixed_master_infos, not_fixed_master_infos = \ + self._get_master_node_name( + heatclient, master_resource_list, + target_physical_resource_ids, master_node) + worker_resource_list = self._get_resources_list( + heatclient, stack_id, worker_resource_name) + flag_worker, fixed_worker_infos, not_fixed_worker_infos = \ + self._get_worker_node_name( + heatclient, worker_resource_list, + target_physical_resource_ids, worker_node) + if len(master_resource_list) > 1: + cluster_resource = heatclient.resource_get( + stack_id, cluster_resource_name) + cluster_ip = cluster_resource.attributes.get( + 'fixed_ips')[0].get('ip_address') + else: + cluster_ip = list(not_fixed_master_infos.values())[0].get( + 'master_nic_ip') + vm_cidr_list = [] + k8s_cluster_installation_param = vnf_additional_params.get( + 'k8s_cluster_installation_param', {}) + proxy = k8s_cluster_installation_param.get('proxy', {}) + if proxy.get('k8s_node_cidr'): + cidr = proxy.get('k8s_node_cidr') + else: + cidr = list(not_fixed_master_infos.values())[0].get( + 'master_nic_ip') + '/24' + network_ips = ipaddress.ip_network(cidr, False) + for network_ip in network_ips: + vm_cidr_list.append(str(network_ip)) + master_node = k8s_cluster_installation_param.get('master_node') + script_path = k8s_cluster_installation_param.get('script_path') + pod_cidr = master_node.get('pod_cidr', '192.168.0.0/16') + cluster_cidr = master_node.get("cluster_cidr", '10.96.0.0/12') + if proxy.get("http_proxy") and proxy.get("https_proxy"): + no_proxy = ','.join(list(filter(None, [ + proxy.get("no_proxy"), pod_cidr, cluster_cidr, + "127.0.0.1", "localhost", + cluster_ip] + vm_cidr_list))) + proxy['no_proxy'] = no_proxy + not_fixed_master_ssh_ips = [ + master_ips.get('master_ssh_ip') + for master_ips in not_fixed_master_infos.values()] + commander, master_ip = self._connect_ssh_scale( + not_fixed_master_ssh_ips, + master_username, master_password) + ssh_command = "sudo kubeadm token create" + kubeadm_token = self._execute_command( + commander, ssh_command, + K8S_CMD_TIMEOUT, 'common', 3)[0].replace('\n', '') + + # get hash from one of master node + ssh_command = "sudo openssl x509 -pubkey -in " \ + "/etc/kubernetes/pki/ca.crt | openssl rsa " \ + "-pubin -outform der 2>/dev/null | " \ + "openssl dgst -sha256 -hex | sed 's/^.* //'" + ssl_ca_cert_hash = self._execute_command( + commander, ssh_command, + K8S_CMD_TIMEOUT, 'common', 3)[0].replace('\n', '') + commander.close_session() + if len(fixed_master_infos) + len(not_fixed_master_ssh_ips) == 1: + ha_flag = False + else: + ha_flag = True + + hosts_str = self._get_all_hosts( + not_fixed_master_infos, fixed_master_infos, + not_fixed_worker_infos, fixed_worker_infos) + if flag_master: + self._fix_master_node( + not_fixed_master_infos, hosts_str, + fixed_master_infos, proxy, + master_username, master_password, vnf_package_path, + script_path, cluster_ip, pod_cidr, cluster_cidr, + kubeadm_token, ssl_ca_cert_hash, ha_flag) + if flag_worker: + self._fix_worker_node( + fixed_worker_infos, + hosts_str, worker_username, worker_password, + vnf_package_path, script_path, proxy, cluster_ip, + kubeadm_token, ssl_ca_cert_hash, ha_flag) + + def _get_all_hosts(self, not_fixed_master_infos, fixed_master_infos, + not_fixed_worker_infos, fixed_worker_infos): + master_hosts = [] + worker_hosts = [] + + not_fixed_master_nic_ips = [ + master_ips.get('master_nic_ip') + for master_ips in not_fixed_master_infos.values()] + fixed_master_nic_ips = [ + master_ips.get('master_nic_ip') + for master_ips in fixed_master_infos.values()] + not_fixed_worker_nic_ips = [ + worker_ips.get('worker_nic_ip') + for worker_ips in not_fixed_worker_infos.values()] + fixed_worker_nic_ips = [ + worker_ips.get('worker_nic_ip') + for worker_ips in fixed_worker_infos.values()] + + for not_fixed_master_ip in not_fixed_master_nic_ips: + master_ip_str = \ + not_fixed_master_ip + ' master' + \ + not_fixed_master_ip.split('.')[-1] + master_hosts.append(master_ip_str) + + for fixed_master_nic_ip in fixed_master_nic_ips: + master_ip_str = \ + fixed_master_nic_ip + ' master' + \ + fixed_master_nic_ip.split('.')[-1] + master_hosts.append(master_ip_str) + + for not_fixed_worker_ip in not_fixed_worker_nic_ips: + worker_ip_str = \ + not_fixed_worker_ip + ' worker' + \ + not_fixed_worker_ip.split('.')[-1] + worker_hosts.append(worker_ip_str) + + for fixed_worker_nic_ip in fixed_worker_nic_ips: + worker_ip_str = \ + fixed_worker_nic_ip + ' worker' + \ + fixed_worker_nic_ip.split('.')[-1] + worker_hosts.append(worker_ip_str) + + hosts_str = '\\n'.join(master_hosts + worker_hosts) + + return hosts_str + + def heal_end(self, context, vnf_instance, + heal_vnf_request, grant, + grant_request, **kwargs): + vnf_package_path = vnflcm_utils._get_vnf_package_path( + context, vnf_instance.vnfd_id) + vnf_additional_params = \ + vnf_instance.instantiated_vnf_info.additional_params + master_node = \ + vnf_additional_params.get( + 'k8s_cluster_installation_param', {}).get( + 'master_node', {}) + worker_node = \ + vnf_additional_params.get( + 'k8s_cluster_installation_param', {}).get( + 'worker_node', {}) + if not heal_vnf_request.vnfc_instance_id: + self.instantiate_end(context, vnf_instance, + vnf_instance.instantiated_vnf_info, + grant=grant, + grant_request=grant_request, **kwargs) + else: + stack_id = vnf_instance.instantiated_vnf_info.instance_id + master_resource_name = self._get_node_resource_name( + vnf_additional_params, master_node) + worker_resource_name = self._get_node_resource_name( + vnf_additional_params, worker_node) + cluster_resource_name = master_node.get('cluster_cp_name') + master_username, master_password = self._get_username_pwd( + heal_vnf_request, vnf_instance, 'master') + worker_username, worker_password = self._get_username_pwd( + heal_vnf_request, vnf_instance, 'worker') + vim_connection_info = self._get_vim_connection_info( + context, vnf_instance) + heatclient = hc.HeatClient(vim_connection_info.access_info) + + # get all target physical resource id + target_physical_resource_ids = \ + self._get_target_physical_resource_ids( + vnf_instance, heal_vnf_request) + self._heal_and_join_k8s_node( + heatclient, stack_id, target_physical_resource_ids, + vnf_additional_params, master_resource_name, + master_username, master_password, vnf_package_path, + worker_resource_name, worker_username, worker_password, + cluster_resource_name, master_node, worker_node) diff --git a/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/complex/complex_hot_top.yaml b/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/complex/complex_hot_top.yaml new file mode 100644 index 000000000..7c26438a4 --- /dev/null +++ b/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/complex/complex_hot_top.yaml @@ -0,0 +1,73 @@ +heat_template_version: 2013-05-23 +description: 'Simple Base HOT for Sample VNF' + +parameters: + nfv: + type: json + +resources: + master_instance_group: + type: OS::Heat::AutoScalingGroup + properties: + min_size: 3 + max_size: 5 + desired_capacity: 3 + resource: + type: complex_nested_master.yaml + properties: + flavor: { get_param: [ nfv, VDU, masterNode, flavor ] } + image: { get_param: [ nfv, VDU, masterNode, image ] } + net1: { get_param: [ nfv, CP, masterNode_CP1, network ] } + vip_port_ip: { get_attr: [vip_CP, fixed_ips, 0, ip_address] } + + master_instance_scale_out: + type: OS::Heat::ScalingPolicy + properties: + scaling_adjustment: 1 + auto_scaling_group_id: + get_resource: master_instance_group + adjustment_type: change_in_capacity + + master_instance_scale_in: + type: OS::Heat::ScalingPolicy + properties: + scaling_adjustment: -1 + auto_scaling_group_id: + get_resource: master_instance_group + adjustment_type: change_in_capacity + + worker_instance_group: + type: OS::Heat::AutoScalingGroup + properties: + min_size: 2 + max_size: 4 + desired_capacity: 2 + resource: + type: complex_nested_worker.yaml + properties: + flavor: { get_param: [ nfv, VDU, workerNode, flavor ] } + image: { get_param: [ nfv, VDU, workerNode, image ] } + net1: { get_param: [ nfv, CP, workerNode_CP2, network ] } + + worker_instance_scale_out: + type: OS::Heat::ScalingPolicy + properties: + scaling_adjustment: 1 + auto_scaling_group_id: + get_resource: worker_instance_group + adjustment_type: change_in_capacity + + worker_instance_scale_in: + type: OS::Heat::ScalingPolicy + properties: + scaling_adjustment: -1 + auto_scaling_group_id: + get_resource: worker_instance_group + adjustment_type: change_in_capacity + + vip_CP: + type: OS::Neutron::Port + properties: + network: net0 + +outputs: {} \ No newline at end of file diff --git a/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/complex/nested/complex_nested_master.yaml b/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/complex/nested/complex_nested_master.yaml new file mode 100644 index 000000000..71de90a72 --- /dev/null +++ b/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/complex/nested/complex_nested_master.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2013-05-23 +description: 'masterNode HOT for Sample VNF' + +parameters: + flavor: + type: string + image: + type: string + net1: + type: string + vip_port_ip: + type: string + +resources: + masterNode: + type: OS::Nova::Server + properties: + flavor: { get_param: flavor } + name: masterNode + image: { get_param: image } + networks: + - port: + get_resource: masterNode_CP1 + + masterNode_CP1: + type: OS::Neutron::Port + properties: + network: { get_param: net1 } + allowed_address_pairs: + - ip_address: { get_param: vip_port_ip } \ No newline at end of file diff --git a/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/complex/nested/complex_nested_worker.yaml b/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/complex/nested/complex_nested_worker.yaml new file mode 100644 index 000000000..777d3ed8e --- /dev/null +++ b/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/complex/nested/complex_nested_worker.yaml @@ -0,0 +1,26 @@ +heat_template_version: 2013-05-23 +description: 'workerNode HOT for Sample VNF' + +parameters: + flavor: + type: string + image: + type: string + net1: + type: string + +resources: + workerNode: + type: OS::Nova::Server + properties: + flavor: { get_param: flavor } + name: workerNode + image: { get_param: image } + networks: + - port: + get_resource: workerNode_CP2 + + workerNode_CP2: + type: OS::Neutron::Port + properties: + network: { get_param: net1 } \ No newline at end of file diff --git a/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/simple/nested/simple_nested_master.yaml b/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/simple/nested/simple_nested_master.yaml new file mode 100644 index 000000000..c62152d34 --- /dev/null +++ b/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/simple/nested/simple_nested_master.yaml @@ -0,0 +1,26 @@ +heat_template_version: 2013-05-23 +description: 'masterNode HOT for Sample VNF' + +parameters: + flavor: + type: string + image: + type: string + net1: + type: string + +resources: + masterNode: + type: OS::Nova::Server + properties: + flavor: { get_param: flavor } + name: masterNode + image: { get_param: image } + networks: + - port: + get_resource: masterNode_CP1 + + masterNode_CP1: + type: OS::Neutron::Port + properties: + network: { get_param: net1 } \ No newline at end of file diff --git a/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/simple/nested/simple_nested_worker.yaml b/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/simple/nested/simple_nested_worker.yaml new file mode 100644 index 000000000..777d3ed8e --- /dev/null +++ b/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/simple/nested/simple_nested_worker.yaml @@ -0,0 +1,26 @@ +heat_template_version: 2013-05-23 +description: 'workerNode HOT for Sample VNF' + +parameters: + flavor: + type: string + image: + type: string + net1: + type: string + +resources: + workerNode: + type: OS::Nova::Server + properties: + flavor: { get_param: flavor } + name: workerNode + image: { get_param: image } + networks: + - port: + get_resource: workerNode_CP2 + + workerNode_CP2: + type: OS::Neutron::Port + properties: + network: { get_param: net1 } \ No newline at end of file diff --git a/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/simple/simple_hot_top.yaml b/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/simple/simple_hot_top.yaml new file mode 100644 index 000000000..08dca9652 --- /dev/null +++ b/samples/mgmt_driver/kubernetes_vnf_package/BaseHOT/simple/simple_hot_top.yaml @@ -0,0 +1,67 @@ +heat_template_version: 2013-05-23 +description: 'Simple Base HOT for Sample VNF' + +parameters: + nfv: + type: json + +resources: + master_instance_group: + type: OS::Heat::AutoScalingGroup + properties: + min_size: 1 + max_size: 3 + desired_capacity: 1 + resource: + type: simple_nested_master.yaml + properties: + flavor: { get_param: [ nfv, VDU, masterNode, flavor ] } + image: { get_param: [ nfv, VDU, masterNode, image ] } + net1: { get_param: [ nfv, CP, masterNode_CP1, network ] } + + master_instance_scale_out: + type: OS::Heat::ScalingPolicy + properties: + scaling_adjustment: 1 + auto_scaling_group_id: + get_resource: master_instance_group + adjustment_type: change_in_capacity + + master_instance_scale_in: + type: OS::Heat::ScalingPolicy + properties: + scaling_adjustment: -1 + auto_scaling_group_id: + get_resource: master_instance_group + adjustment_type: change_in_capacity + + worker_instance_group: + type: OS::Heat::AutoScalingGroup + properties: + min_size: 2 + max_size: 4 + desired_capacity: 2 + resource: + type: simple_nested_worker.yaml + properties: + flavor: { get_param: [ nfv, VDU, workerNode, flavor ] } + image: { get_param: [ nfv, VDU, workerNode, image ] } + net1: { get_param: [ nfv, CP, workerNode_CP2, network ] } + + worker_instance_scale_out: + type: OS::Heat::ScalingPolicy + properties: + scaling_adjustment: 1 + auto_scaling_group_id: + get_resource: worker_instance_group + adjustment_type: change_in_capacity + + worker_instance_scale_in: + type: OS::Heat::ScalingPolicy + properties: + scaling_adjustment: -1 + auto_scaling_group_id: + get_resource: worker_instance_group + adjustment_type: change_in_capacity + +outputs: {} \ No newline at end of file diff --git a/samples/mgmt_driver/kubernetes_vnf_package/Definitions/sample_kubernetes_df_complex.yaml b/samples/mgmt_driver/kubernetes_vnf_package/Definitions/sample_kubernetes_df_complex.yaml new file mode 100644 index 000000000..aafbc8685 --- /dev/null +++ b/samples/mgmt_driver/kubernetes_vnf_package/Definitions/sample_kubernetes_df_complex.yaml @@ -0,0 +1,254 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 + +description: Simple deployment flavour for Sample VNF + +imports: + - etsi_nfv_sol001_common_types.yaml + - etsi_nfv_sol001_vnfd_types.yaml + - sample_kubernetes_types.yaml + +topology_template: + inputs: + id: + type: string + vendor: + type: string + version: + type: version + descriptor_id: + type: string + descriptor_version: + type: string + provider: + type: string + product_name: + type: string + software_version: + type: string + vnfm_info: + type: list + entry_schema: + type: string + flavour_id: + type: string + flavour_description: + type: string + + substitution_mappings: + node_type: company.provider.VNF + properties: + flavour_id: complex + requirements: + virtual_link_external1_1: [ masterNode_CP1, virtual_link ] + virtual_link_external1_2: [ workerNode_CP2, virtual_link ] + + node_templates: + VNF: + type: company.provider.VNF + properties: + flavour_description: A complex flavour + interfaces: + Vnflcm: + instantiate_end: + implementation: mgmt-drivers-kubernetes + terminate_end: + implementation: mgmt-drivers-kubernetes + heal_start: + implementation: mgmt-drivers-kubernetes + heal_end: + implementation: mgmt-drivers-kubernetes + scale_start: + implementation: mgmt-drivers-kubernetes + scale_end: + implementation: mgmt-drivers-kubernetes + artifacts: + mgmt-drivers-kubernetes: + description: Management driver for kubernetes cluster + type: tosca.artifacts.Implementation.Python + file: Scripts/kubernetes_mgmt.py + + masterNode: + type: tosca.nodes.nfv.Vdu.Compute + properties: + name: masterNode + description: masterNode compute node + vdu_profile: + min_number_of_instances: 3 + max_number_of_instances: 5 + sw_image_data: + name: Image for masterNode HA kubernetes + version: '20.04' + checksum: + algorithm: sha-512 + hash: fb1a1e50f9af2df6ab18a69b6bc5df07ebe8ef962b37e556ce95350ffc8f4a1118617d486e2018d1b3586aceaeda799e6cc073f330a7ad8f0ec0416cbd825452 + container_format: bare + disk_format: qcow2 + min_disk: 0 GB + size: 2 GB + + artifacts: + sw_image: + type: tosca.artifacts.nfv.SwImage + file: ../Files/images/ubuntu-20.04-server-cloudimg-amd64.img + + capabilities: + virtual_compute: + properties: + requested_additional_capabilities: + properties: + requested_additional_capability_name: m1.medium + support_mandatory: true + target_performance_parameters: + entry_schema: test + virtual_memory: + virtual_mem_size: 4 GB + virtual_cpu: + num_virtual_cpu: 2 + virtual_local_storage: + - size_of_storage: 45 GB + + workerNode: + type: tosca.nodes.nfv.Vdu.Compute + properties: + name: workerNode + description: workerNode compute node + vdu_profile: + min_number_of_instances: 2 + max_number_of_instances: 4 + sw_image_data: + name: Image for workerNode HA kubernetes + version: '20.04' + checksum: + algorithm: sha-512 + hash: fb1a1e50f9af2df6ab18a69b6bc5df07ebe8ef962b37e556ce95350ffc8f4a1118617d486e2018d1b3586aceaeda799e6cc073f330a7ad8f0ec0416cbd825452 + container_format: bare + disk_format: qcow2 + min_disk: 0 GB + size: 2 GB + + artifacts: + sw_image: + type: tosca.artifacts.nfv.SwImage + file: ../Files/images/ubuntu-20.04-server-cloudimg-amd64.img + + capabilities: + virtual_compute: + properties: + requested_additional_capabilities: + properties: + requested_additional_capability_name: m1.medium + support_mandatory: true + target_performance_parameters: + entry_schema: test + virtual_memory: + virtual_mem_size: 4 GB + virtual_cpu: + num_virtual_cpu: 2 + virtual_local_storage: + - size_of_storage: 45 GB + + masterNode_CP1: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 0 + requirements: + - virtual_binding: masterNode + + workerNode_CP2: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 0 + requirements: + - virtual_binding: workerNode + + policies: + - scaling_aspects: + type: tosca.policies.nfv.ScalingAspects + properties: + aspects: + master_instance: + name: master_instance + description: master_instance scaling aspect + max_scale_level: 2 + step_deltas: + - delta_1 + worker_instance: + name: worker_instance + description: worker_instance scaling aspect + max_scale_level: 2 + step_deltas: + - delta_1 + + - masterNode_initial_delta: + type: tosca.policies.nfv.VduInitialDelta + properties: + initial_delta: + number_of_instances: 3 + targets: [ masterNode ] + + - workerNode_initial_delta: + type: tosca.policies.nfv.VduInitialDelta + properties: + initial_delta: + number_of_instances: 2 + targets: [ workerNode ] + + - masterNode_scaling_deltas: + type: tosca.policies.nfv.VduScalingAspectDeltas + properties: + aspect: master_instance + deltas: + delta_1: + number_of_instances: 1 + targets: [ masterNode ] + + - workerNode_scaling_deltas: + type: tosca.policies.nfv.VduScalingAspectDeltas + properties: + aspect: worker_instance + deltas: + delta_1: + number_of_instances: 1 + targets: [ workerNode ] + + - instantiation_levels: + type: tosca.policies.nfv.InstantiationLevels + properties: + levels: + instantiation_level_1: + description: Smallest size + scale_info: + master_instance: + scale_level: 0 + worker_instance: + scale_level: 0 + instantiation_level_2: + description: Largest size + scale_info: + master_instance: + scale_level: 2 + worker_instance: + scale_level: 2 + default_level: instantiation_level_1 + + - masterNode_instantiation_levels: + type: tosca.policies.nfv.VduInstantiationLevels + properties: + levels: + instantiation_level_1: + number_of_instances: 3 + instantiation_level_2: + number_of_instances: 5 + targets: [ masterNode ] + + - workerNode_instantiation_levels: + type: tosca.policies.nfv.VduInstantiationLevels + properties: + levels: + instantiation_level_1: + number_of_instances: 2 + instantiation_level_2: + number_of_instances: 4 + targets: [ workerNode ] \ No newline at end of file diff --git a/samples/mgmt_driver/kubernetes_vnf_package/Definitions/sample_kubernetes_df_simple.yaml b/samples/mgmt_driver/kubernetes_vnf_package/Definitions/sample_kubernetes_df_simple.yaml new file mode 100644 index 000000000..a0ea57421 --- /dev/null +++ b/samples/mgmt_driver/kubernetes_vnf_package/Definitions/sample_kubernetes_df_simple.yaml @@ -0,0 +1,254 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 + +description: Simple deployment flavour for Sample VNF + +imports: + - etsi_nfv_sol001_common_types.yaml + - etsi_nfv_sol001_vnfd_types.yaml + - sample_kubernetes_types.yaml + +topology_template: + inputs: + id: + type: string + vendor: + type: string + version: + type: version + descriptor_id: + type: string + descriptor_version: + type: string + provider: + type: string + product_name: + type: string + software_version: + type: string + vnfm_info: + type: list + entry_schema: + type: string + flavour_id: + type: string + flavour_description: + type: string + + substitution_mappings: + node_type: company.provider.VNF + properties: + flavour_id: simple + requirements: + virtual_link_external1_1: [ masterNode_CP1, virtual_link ] + virtual_link_external1_2: [ workerNode_CP2, virtual_link ] + + node_templates: + VNF: + type: company.provider.VNF + properties: + flavour_description: A simple flavour + interfaces: + Vnflcm: + instantiate_end: + implementation: mgmt-drivers-kubernetes + terminate_end: + implementation: mgmt-drivers-kubernetes + heal_start: + implementation: mgmt-drivers-kubernetes + heal_end: + implementation: mgmt-drivers-kubernetes + scale_start: + implementation: mgmt-drivers-kubernetes + scale_end: + implementation: mgmt-drivers-kubernetes + artifacts: + mgmt-drivers-kubernetes: + description: Management driver for kubernetes cluster + type: tosca.artifacts.Implementation.Python + file: Scripts/kubernetes_mgmt.py + + masterNode: + type: tosca.nodes.nfv.Vdu.Compute + properties: + name: masterNode + description: masterNode compute node + vdu_profile: + min_number_of_instances: 1 + max_number_of_instances: 3 + sw_image_data: + name: Image for masterNode kubernetes + version: '20.04' + checksum: + algorithm: sha-512 + hash: fb1a1e50f9af2df6ab18a69b6bc5df07ebe8ef962b37e556ce95350ffc8f4a1118617d486e2018d1b3586aceaeda799e6cc073f330a7ad8f0ec0416cbd825452 + container_format: bare + disk_format: qcow2 + min_disk: 0 GB + size: 2 GB + + artifacts: + sw_image: + type: tosca.artifacts.nfv.SwImage + file: ../Files/images/ubuntu-20.04-server-cloudimg-amd64.img + + capabilities: + virtual_compute: + properties: + requested_additional_capabilities: + properties: + requested_additional_capability_name: m1.medium + support_mandatory: true + target_performance_parameters: + entry_schema: test + virtual_memory: + virtual_mem_size: 4 GB + virtual_cpu: + num_virtual_cpu: 2 + virtual_local_storage: + - size_of_storage: 45 GB + + workerNode: + type: tosca.nodes.nfv.Vdu.Compute + properties: + name: workerNode + description: workerNode compute node + vdu_profile: + min_number_of_instances: 2 + max_number_of_instances: 4 + sw_image_data: + name: Image for workerNode kubernetes + version: '20.04' + checksum: + algorithm: sha-512 + hash: fb1a1e50f9af2df6ab18a69b6bc5df07ebe8ef962b37e556ce95350ffc8f4a1118617d486e2018d1b3586aceaeda799e6cc073f330a7ad8f0ec0416cbd825452 + container_format: bare + disk_format: qcow2 + min_disk: 0 GB + size: 2 GB + + artifacts: + sw_image: + type: tosca.artifacts.nfv.SwImage + file: ../Files/images/ubuntu-20.04-server-cloudimg-amd64.img + + capabilities: + virtual_compute: + properties: + requested_additional_capabilities: + properties: + requested_additional_capability_name: m1.medium + support_mandatory: true + target_performance_parameters: + entry_schema: test + virtual_memory: + virtual_mem_size: 4 GB + virtual_cpu: + num_virtual_cpu: 2 + virtual_local_storage: + - size_of_storage: 45 GB + + masterNode_CP1: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 0 + requirements: + - virtual_binding: masterNode + + workerNode_CP2: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 0 + requirements: + - virtual_binding: workerNode + + policies: + - scaling_aspects: + type: tosca.policies.nfv.ScalingAspects + properties: + aspects: + master_instance: + name: master_instance + description: master_instance scaling aspect + max_scale_level: 2 + step_deltas: + - delta_1 + worker_instance: + name: worker_instance + description: worker_instance scaling aspect + max_scale_level: 2 + step_deltas: + - delta_1 + + - masterNode_initial_delta: + type: tosca.policies.nfv.VduInitialDelta + properties: + initial_delta: + number_of_instances: 1 + targets: [ masterNode ] + + - workerNode_initial_delta: + type: tosca.policies.nfv.VduInitialDelta + properties: + initial_delta: + number_of_instances: 2 + targets: [ workerNode ] + + - masterNode_scaling_deltas: + type: tosca.policies.nfv.VduScalingAspectDeltas + properties: + aspect: master_instance + deltas: + delta_1: + number_of_instances: 1 + targets: [ masterNode ] + + - workerNode_scaling_deltas: + type: tosca.policies.nfv.VduScalingAspectDeltas + properties: + aspect: worker_instance + deltas: + delta_1: + number_of_instances: 1 + targets: [ workerNode ] + + - instantiation_levels: + type: tosca.policies.nfv.InstantiationLevels + properties: + levels: + instantiation_level_1: + description: Smallest size + scale_info: + master_instance: + scale_level: 0 + worker_instance: + scale_level: 0 + instantiation_level_2: + description: Largest size + scale_info: + master_instance: + scale_level: 2 + worker_instance: + scale_level: 2 + default_level: instantiation_level_1 + + - masterNode_instantiation_levels: + type: tosca.policies.nfv.VduInstantiationLevels + properties: + levels: + instantiation_level_1: + number_of_instances: 1 + instantiation_level_2: + number_of_instances: 3 + targets: [ masterNode ] + + - workerNode_instantiation_levels: + type: tosca.policies.nfv.VduInstantiationLevels + properties: + levels: + instantiation_level_1: + number_of_instances: 2 + instantiation_level_2: + number_of_instances: 4 + targets: [ workerNode ] \ No newline at end of file diff --git a/samples/mgmt_driver/kubernetes_vnf_package/Definitions/sample_kubernetes_top.vnfd.yaml b/samples/mgmt_driver/kubernetes_vnf_package/Definitions/sample_kubernetes_top.vnfd.yaml new file mode 100644 index 000000000..d936fb6cf --- /dev/null +++ b/samples/mgmt_driver/kubernetes_vnf_package/Definitions/sample_kubernetes_top.vnfd.yaml @@ -0,0 +1,32 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 + +description: Sample VNF. + +imports: + - etsi_nfv_sol001_common_types.yaml + - etsi_nfv_sol001_vnfd_types.yaml + - sample_kubernetes_types.yaml + - sample_kubernetes_df_simple.yaml + - sample_kubernetes_df_complex.yaml + +topology_template: + inputs: + selected_flavour: + type: string + description: VNF deployment flavour selected by the consumer. It is provided in the API + + node_templates: + VNF: + type: company.provider.VNF + properties: + flavour_id: { get_input: selected_flavour } + descriptor_id: b1db0ce7-ebca-1fb7-95ed-4840d70a1163 + provider: Company + product_name: Sample VNF + software_version: '1.0' + descriptor_version: '1.0' + vnfm_info: + - Tacker + requirements: + #- virtual_link_external # mapped in lower-level templates + #- virtual_link_internal # mapped in lower-level templates \ No newline at end of file diff --git a/samples/mgmt_driver/kubernetes_vnf_package/Definitions/sample_kubernetes_types.yaml b/samples/mgmt_driver/kubernetes_vnf_package/Definitions/sample_kubernetes_types.yaml new file mode 100644 index 000000000..a43f8500b --- /dev/null +++ b/samples/mgmt_driver/kubernetes_vnf_package/Definitions/sample_kubernetes_types.yaml @@ -0,0 +1,63 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 + +description: VNF type definition + +imports: + - etsi_nfv_sol001_common_types.yaml + - etsi_nfv_sol001_vnfd_types.yaml + +node_types: + company.provider.VNF: + derived_from: tosca.nodes.nfv.VNF + properties: + id: + type: string + description: ID of this VNF + default: vnf_id + vendor: + type: string + description: name of the vendor who generate this VNF + default: vendor + version: + type: version + description: version of the software for this VNF + default: 1.0 + descriptor_id: + type: string + constraints: [ valid_values: [ b1db0ce7-ebca-1fb7-95ed-4840d70a1163 ] ] + default: b1db0ce7-ebca-1fb7-95ed-4840d70a1163 + descriptor_version: + type: string + constraints: [ valid_values: [ '1.0' ] ] + default: '1.0' + provider: + type: string + constraints: [ valid_values: [ 'Company' ] ] + default: 'Company' + product_name: + type: string + constraints: [ valid_values: [ 'Sample VNF' ] ] + default: 'Sample VNF' + software_version: + type: string + constraints: [ valid_values: [ '1.0' ] ] + default: '1.0' + vnfm_info: + type: list + entry_schema: + type: string + constraints: [ valid_values: [ Tacker ] ] + default: [ Tacker ] + flavour_id: + type: string + constraints: [ valid_values: [ simple,complex ] ] + default: simple + flavour_description: + type: string + default: "This is the default flavour description" + requirements: + - virtual_link_internal: + capability: tosca.capabilities.nfv.VirtualLinkable + interfaces: + Vnflcm: + type: tosca.interfaces.nfv.Vnflcm \ No newline at end of file diff --git a/samples/mgmt_driver/kubernetes_vnf_package/TOSCA-Metadata/TOSCA.meta b/samples/mgmt_driver/kubernetes_vnf_package/TOSCA-Metadata/TOSCA.meta new file mode 100644 index 000000000..49898635d --- /dev/null +++ b/samples/mgmt_driver/kubernetes_vnf_package/TOSCA-Metadata/TOSCA.meta @@ -0,0 +1,17 @@ +TOSCA-Meta-File-Version: 1.0 +Created-by: Dummy User +CSAR-Version: 1.1 +Entry-Definitions: Definitions/sample_kubernetes_top.vnfd.yaml + +Name: Files/images/ubuntu-20.04-server-cloudimg-amd64.img +Content-Type: application/x-iso9066-image + +Name: Scripts/install_k8s_cluster.sh +Content-Type: application/sh +Algorithm: SHA-256 +Hash: 2489f6162817cce794a6f19a88c3c76ce83fa19cfcb75ad1204d76aaba4a9d1c + +Name: Scripts/kubernetes_mgmt.py +Content-Type: text/x-python +Algorithm: SHA-256 +Hash: b292bc47d4c28a62b1261e6481498118e5dd93aa988c498568560f67c510003b \ No newline at end of file diff --git a/samples/mgmt_driver/kubernetes_vnf_package/UserData/__init__.py b/samples/mgmt_driver/kubernetes_vnf_package/UserData/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/samples/mgmt_driver/kubernetes_vnf_package/UserData/k8s_cluster_user_data.py b/samples/mgmt_driver/kubernetes_vnf_package/UserData/k8s_cluster_user_data.py new file mode 100644 index 000000000..0bca8c228 --- /dev/null +++ b/samples/mgmt_driver/kubernetes_vnf_package/UserData/k8s_cluster_user_data.py @@ -0,0 +1,35 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from tacker.vnfm.lcm_user_data.abstract_user_data import AbstractUserData +import tacker.vnfm.lcm_user_data.utils as UserDataUtil + + +class KubernetesClusterUserData(AbstractUserData): + @staticmethod + def instantiate(base_hot_dict=None, + vnfd_dict=None, + inst_req_info=None, + grant_info=None): + api_param = UserDataUtil.get_diff_base_hot_param_from_api( + base_hot_dict, inst_req_info) + initial_param_dict = \ + UserDataUtil.create_initial_param_server_port_dict( + base_hot_dict) + vdu_flavor_dict = \ + UserDataUtil.create_vdu_flavor_capability_name_dict(vnfd_dict) + vdu_image_dict = UserDataUtil.create_sw_image_dict(vnfd_dict) + cpd_vl_dict = UserDataUtil.create_network_dict( + inst_req_info, initial_param_dict) + final_param_dict = UserDataUtil.create_final_param_dict( + initial_param_dict, vdu_flavor_dict, vdu_image_dict, cpd_vl_dict) + return {**final_param_dict, **api_param}