diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/BaseHOT/simple/base_hot_top.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/BaseHOT/simple/base_hot_top.yaml new file mode 100644 index 000000000..c14cb1b2d --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/BaseHOT/simple/base_hot_top.yaml @@ -0,0 +1,54 @@ +heat_template_version: 2013-05-23 +description: 'Simple Base HOT for Sample VNF' + +parameters: + nfv: + type: json + +resources: + master_instance_group: + type: OS::Heat::AutoScalingGroup + properties: + min_size: 1 + max_size: 1 + desired_capacity: 1 + resource: + type: base_hot_nested_master.yaml + properties: + flavor: { get_param: [ nfv, VDU, masterNode, flavor ] } + image: { get_param: [ nfv, VDU, masterNode, image ] } + net1: { get_param: [ nfv, CP, masterNode_CP1, network ] } + + worker_instance_group: + type: OS::Heat::AutoScalingGroup + properties: + min_size: 1 + max_size: 2 + desired_capacity: 1 + resource: + type: base_hot_nested_worker.yaml + properties: + flavor: { get_param: [ nfv, VDU, workerNode, flavor ] } + image: { get_param: [ nfv, VDU, workerNode, image ] } + net1: { get_param: [ nfv, CP, workerNode_CP1, network ] } + net2: { get_param: [ nfv, CP, workerNode_CP2, network ] } + net3: { get_param: [ nfv, CP, workerNode_CP3, network ] } + net4: { get_param: [ nfv, CP, workerNode_CP4, network ] } + + worker_instance_scale_out: + type: OS::Heat::ScalingPolicy + properties: + scaling_adjustment: 1 + auto_scaling_group_id: + get_resource: worker_instance_group + adjustment_type: change_in_capacity + + worker_instance_scale_in: + type: OS::Heat::ScalingPolicy + properties: + scaling_adjustment: -1 + auto_scaling_group_id: + get_resource: worker_instance_group + adjustment_type: change_in_capacity + +outputs: {} diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/BaseHOT/simple/nested/base_hot_nested_master.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/BaseHOT/simple/nested/base_hot_nested_master.yaml new file mode 100644 index 000000000..afcf24e20 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/BaseHOT/simple/nested/base_hot_nested_master.yaml @@ -0,0 +1,26 @@ +heat_template_version: 2013-05-23 +description: 'masterNode HOT for Sample VNF' + +parameters: + flavor: + type: string + image: + type: string + net1: + type: string + +resources: + masterNode: + type: OS::Nova::Server + properties: + flavor: { get_param: flavor } + name: masterNode + image: { get_param: image } + networks: + - port: + get_resource: masterNode_CP1 + + masterNode_CP1: + type: OS::Neutron::Port + properties: + network: { get_param: net1 } diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/BaseHOT/simple/nested/base_hot_nested_worker.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/BaseHOT/simple/nested/base_hot_nested_worker.yaml new file mode 100644 index 000000000..e20824bb8 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/BaseHOT/simple/nested/base_hot_nested_worker.yaml @@ -0,0 +1,53 @@ +heat_template_version: 2013-05-23 +description: 'workerNode HOT for Sample VNF' + +parameters: + flavor: + type: string + image: + type: string + net1: + type: string + net2: + type: string + net3: + type: string + net4: + type: string + +resources: + workerNode: + type: OS::Nova::Server + properties: + flavor: { get_param: flavor } + name: workerNode + image: { get_param: image } + networks: + - port: + get_resource: workerNode_CP1 + - port: + get_resource: workerNode_CP2 + - port: + get_resource: workerNode_CP3 + - port: + get_resource: workerNode_CP4 + + workerNode_CP1: + type: OS::Neutron::Port + properties: + network: { get_param: net1 } + + workerNode_CP2: + type: OS::Neutron::Port + properties: + network: { get_param: net2 } + + workerNode_CP3: + type: OS::Neutron::Port + properties: + network: { get_param: net3 } + + workerNode_CP4: + type: OS::Neutron::Port + properties: + network: { get_param: net4 } diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Definitions/etsi_nfv_sol001_common_types.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Definitions/etsi_nfv_sol001_common_types.yaml new file mode 100644 index 000000000..15ab39b13 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Definitions/etsi_nfv_sol001_common_types.yaml @@ -0,0 +1,202 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 +description: ETSI NFV SOL 001 common types definitions version 2.6.1 +metadata: + template_name: etsi_nfv_sol001_common_types + template_author: ETSI_NFV + template_version: 2.6.1 + +data_types: + tosca.datatypes.nfv.L2AddressData: + derived_from: tosca.datatypes.Root + description: Describes the information on the MAC addresses to be assigned to a connection point. + properties: + mac_address_assignment: + type: boolean + description: Specifies if the address assignment is the responsibility of management and orchestration function or not. If it is set to True, it is the management and orchestration function responsibility + required: true + + tosca.datatypes.nfv.L3AddressData: + derived_from: tosca.datatypes.Root + description: Provides information about Layer 3 level addressing scheme and parameters applicable to a CP + properties: + ip_address_assignment: + type: boolean + description: Specifies if the address assignment is the responsibility of management and orchestration function or not. If it is set to True, it is the management and orchestration function responsibility + required: true + floating_ip_activated: + type: boolean + description: Specifies if the floating IP scheme is activated on the Connection Point or not + required: true + ip_address_type: + type: string + description: Defines address type. The address type should be aligned with the address type supported by the layer_protocols properties of the parent VnfExtCp + required: false + constraints: + - valid_values: [ ipv4, ipv6 ] + number_of_ip_address: + type: integer + description: Minimum number of IP addresses to be assigned + required: false + constraints: + - greater_than: 0 + + tosca.datatypes.nfv.AddressData: + derived_from: tosca.datatypes.Root + description: Describes information about the addressing scheme and parameters applicable to a CP + properties: + address_type: + type: string + description: Describes the type of the address to be assigned to a connection point. The content type shall be aligned with the address type supported by the layerProtocol property of the connection point + required: true + constraints: + - valid_values: [ mac_address, ip_address ] + l2_address_data: + type: tosca.datatypes.nfv.L2AddressData + description: Provides the information on the MAC addresses to be assigned to a connection point. + required: false + l3_address_data: + type: tosca.datatypes.nfv.L3AddressData + description: Provides the information on the IP addresses to be assigned to a connection point + required: false + + tosca.datatypes.nfv.ConnectivityType: + derived_from: tosca.datatypes.Root + description: describes additional connectivity information of a virtualLink + properties: + layer_protocols: + type: list + description: Identifies the protocol a virtualLink gives access to (ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire).The top layer protocol of the virtualLink protocol stack shall always be provided. The lower layer protocols may be included when there are specific requirements on these layers. + required: true + entry_schema: + type: string + constraints: + - valid_values: [ ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire ] + flow_pattern: + type: string + description: Identifies the flow pattern of the connectivity + required: false + constraints: + - valid_values: [ line, tree, mesh ] + + tosca.datatypes.nfv.LinkBitrateRequirements: + derived_from: tosca.datatypes.Root + description: describes the requirements in terms of bitrate for a virtual link + properties: + root: + type: integer # in bits per second + description: Specifies the throughput requirement in bits per second of the link (e.g. bitrate of E-Line, root bitrate of E-Tree, aggregate capacity of E-LAN). + required: true + constraints: + - greater_or_equal: 0 + leaf: + type: integer # in bits per second + description: Specifies the throughput requirement in bits per second of leaf connections to the link when applicable to the connectivity type (e.g. for E-Tree and E LAN branches). + required: false + constraints: + - greater_or_equal: 0 + + tosca.datatypes.nfv.CpProtocolData: + derived_from: tosca.datatypes.Root + description: Describes and associates the protocol layer that a CP uses together with other protocol and connection point information + properties: + associated_layer_protocol: + type: string + required: true + description: One of the values of the property layer_protocols of the CP + constraints: + - valid_values: [ ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire ] + address_data: + type: list + description: Provides information on the addresses to be assigned to the CP + entry_schema: + type: tosca.datatypes.nfv.AddressData + required: false + + tosca.datatypes.nfv.VnfProfile: + derived_from: tosca.datatypes.Root + description: describes a profile for instantiating VNFs of a particular NS DF according to a specific VNFD and VNF DF. + properties: + instantiation_level: + type: string + description: Identifier of the instantiation level of the VNF DF to be used for instantiation. If not present, the default instantiation level as declared in the VNFD shall be used. + required: false + min_number_of_instances: + type: integer + description: Minimum number of instances of the VNF based on this VNFD that is permitted to exist for this VnfProfile. + required: true + constraints: + - greater_or_equal: 0 + max_number_of_instances: + type: integer + description: Maximum number of instances of the VNF based on this VNFD that is permitted to exist for this VnfProfile. + required: true + constraints: + - greater_or_equal: 0 + + tosca.datatypes.nfv.Qos: + derived_from: tosca.datatypes.Root + description: describes QoS data for a given VL used in a VNF deployment flavour + properties: + latency: + type: scalar-unit.time #Number + description: Specifies the maximum latency + required: true + constraints: + - greater_than: 0 s + packet_delay_variation: + type: scalar-unit.time #Number + description: Specifies the maximum jitter + required: true + constraints: + - greater_or_equal: 0 s + packet_loss_ratio: + type: float + description: Specifies the maximum packet loss ratio + required: false + constraints: + - in_range: [ 0.0, 1.0 ] + +capability_types: + tosca.capabilities.nfv.VirtualLinkable: + derived_from: tosca.capabilities.Node + description: A node type that includes the VirtualLinkable capability indicates that it can be pointed by tosca.relationships.nfv.VirtualLinksTo relationship type + +relationship_types: + tosca.relationships.nfv.VirtualLinksTo: + derived_from: tosca.relationships.DependsOn + description: Represents an association relationship between the VduCp and VnfVirtualLink node types + valid_target_types: [ tosca.capabilities.nfv.VirtualLinkable ] + +node_types: + tosca.nodes.nfv.Cp: + derived_from: tosca.nodes.Root + description: Provides information regarding the purpose of the connection point + properties: + layer_protocols: + type: list + description: Identifies which protocol the connection point uses for connectivity purposes + required: true + entry_schema: + type: string + constraints: + - valid_values: [ ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire ] + role: #Name in ETSI NFV IFA011 v0.7.3: cpRole + type: string + description: Identifies the role of the port in the context of the traffic flow patterns in the VNF or parent NS + required: false + constraints: + - valid_values: [ root, leaf ] + description: + type: string + description: Provides human-readable information on the purpose of the connection point + required: false + protocol: + type: list + description: Provides information on the addresses to be assigned to the connection point(s) instantiated from this Connection Point Descriptor + required: false + entry_schema: + type: tosca.datatypes.nfv.CpProtocolData + trunk_mode: + type: boolean + description: Provides information about whether the CP instantiated from this Cp is in Trunk mode (802.1Q or other), When operating in "trunk mode", the Cp is capable of carrying traffic for several VLANs. Absence of this property implies that trunkMode is not configured for the Cp i.e. It is equivalent to boolean value "false". + required: false diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Definitions/etsi_nfv_sol001_vnfd_types.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Definitions/etsi_nfv_sol001_vnfd_types.yaml new file mode 100644 index 000000000..23cdcc7ff --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Definitions/etsi_nfv_sol001_vnfd_types.yaml @@ -0,0 +1,1465 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 +description: ETSI NFV SOL 001 vnfd types definitions version 2.6.1 +metadata: + template_name: etsi_nfv_sol001_vnfd_types + template_author: ETSI_NFV + template_version: 2.6.1 + +imports: + - ./etsi_nfv_sol001_common_types.yaml + +data_types: + tosca.datatypes.nfv.VirtualNetworkInterfaceRequirements: + derived_from: tosca.datatypes.Root + description: Describes requirements on a virtual network interface + properties: + name: + type: string + description: Provides a human readable name for the requirement. + required: false + description: + type: string + description: Provides a human readable description of the requirement. + required: false + support_mandatory: + type: boolean + description: Indicates whether fulfilling the constraint is mandatory (TRUE) for successful operation or desirable (FALSE). + required: true + network_interface_requirements: + type: map + description: The network interface requirements. A map of strings that contain a set of key-value pairs that describes the hardware platform specific network interface deployment requirements. + required: true + entry_schema: + type: string + nic_io_requirements: + type: tosca.datatypes.nfv.LogicalNodeData + description: references (couples) the CP with any logical node I/O requirements (for network devices) that may have been created. Linking these attributes is necessary so that so that I/O requirements that need to be articulated at the logical node level can be associated with the network interface requirements associated with the CP. + required: false + + tosca.datatypes.nfv.RequestedAdditionalCapability: + derived_from: tosca.datatypes.Root + description: describes requested additional capability for a particular VDU + properties: + requested_additional_capability_name: + type: string + description: Identifies a requested additional capability for the VDU. + required: true + support_mandatory: + type: boolean + description: Indicates whether the requested additional capability is mandatory for successful operation. + required: true + min_requested_additional_capability_version: + type: string + description: Identifies the minimum version of the requested additional capability. + required: false + preferred_requested_additional_capability_version: + type: string + description: Identifies the preferred version of the requested additional capability. + required: false + target_performance_parameters: + type: map + description: Identifies specific attributes, dependent on the requested additional capability type. + required: true + entry_schema: + type: string + + tosca.datatypes.nfv.VirtualMemory: + derived_from: tosca.datatypes.Root + description: supports the specification of requirements related to virtual memory of a virtual compute resource + properties: + virtual_mem_size: + type: scalar-unit.size + description: Amount of virtual memory. + required: true + virtual_mem_oversubscription_policy: + type: string + description: The memory core oversubscription policy in terms of virtual memory to physical memory on the platform. + required: false + vdu_mem_requirements: + type: map + description: The hardware platform specific VDU memory requirements. A map of strings that contains a set of key-value pairs that describes hardware platform specific VDU memory requirements. + required: false + entry_schema: + type: string + numa_enabled: + type: boolean + description: It specifies the memory allocation to be cognisant of the relevant process/core allocation. + required: false + default: false + + tosca.datatypes.nfv.VirtualCpu: + derived_from: tosca.datatypes.Root + description: Supports the specification of requirements related to virtual CPU(s) of a virtual compute resource + properties: + cpu_architecture: + type: string + description: CPU architecture type. Examples are x86, ARM + required: false + num_virtual_cpu: + type: integer + description: Number of virtual CPUs + required: true + constraints: + - greater_than: 0 + virtual_cpu_clock: + type: scalar-unit.frequency + description: Minimum virtual CPU clock rate + required: false + virtual_cpu_oversubscription_policy: + type: string + description: CPU core oversubscription policy e.g. the relation of virtual CPU cores to physical CPU cores/threads. + required: false + vdu_cpu_requirements: + type: map + description: The hardware platform specific VDU CPU requirements. A map of strings that contains a set of key-value pairs describing VDU CPU specific hardware platform requirements. + required: false + entry_schema: + type: string + virtual_cpu_pinning: + type: tosca.datatypes.nfv.VirtualCpuPinning + description: The virtual CPU pinning configuration for the virtualised compute resource. + required: false + + tosca.datatypes.nfv.VirtualCpuPinning: + derived_from: tosca.datatypes.Root + description: Supports the specification of requirements related to the virtual CPU pinning configuration of a virtual compute resource + properties: + virtual_cpu_pinning_policy: + type: string + description: 'Indicates the policy for CPU pinning. The policy can take values of "static" or "dynamic". In case of "dynamic" the allocation of virtual CPU cores to logical CPU cores is decided by the VIM. (e.g.: SMT (Simultaneous Multi-Threading) requirements). In case of "static" the allocation is requested to be according to the virtual_cpu_pinning_rule.' + required: false + constraints: + - valid_values: [ static, dynamic ] + virtual_cpu_pinning_rule: + type: list + description: Provides the list of rules for allocating virtual CPU cores to logical CPU cores/threads + required: false + entry_schema: + type: string + + tosca.datatypes.nfv.VnfcConfigurableProperties: + derived_from: tosca.datatypes.Root + description: Defines the configurable properties of a VNFC + # properties: + # additional_vnfc_configurable_properties: + # type: tosca.datatypes.nfv.VnfcAdditionalConfigurableProperties + # description: Describes additional configuration for VNFC that + # can be modified using the ModifyVnfInfo operation + # required: false + # derived types are expected to introduce + # additional_vnfc_configurable_properties with its type derived from + # tosca.datatypes.nfv.VnfcAdditionalConfigurableProperties + + tosca.datatypes.nfv.VnfcAdditionalConfigurableProperties: + derived_from: tosca.datatypes.Root + description: VnfcAdditionalConfigurableProperties type is an empty base type for deriving data types for describing additional configurable properties for a given VNFC. + + tosca.datatypes.nfv.VduProfile: + derived_from: tosca.datatypes.Root + description: describes additional instantiation data for a given Vdu.Compute used in a specific deployment flavour. + properties: + min_number_of_instances: + type: integer + description: Minimum number of instances of the VNFC based on this Vdu.Compute that is permitted to exist for a particular VNF deployment flavour. + required: true + constraints: + - greater_or_equal: 0 + max_number_of_instances: + type: integer + description: Maximum number of instances of the VNFC based on this Vdu.Compute that is permitted to exist for a particular VNF deployment flavour. + required: true + constraints: + - greater_or_equal: 0 + + tosca.datatypes.nfv.VlProfile: + derived_from: tosca.datatypes.Root + description: Describes additional instantiation data for a given VL used in a specific VNF deployment flavour. + properties: + max_bitrate_requirements: + type: tosca.datatypes.nfv.LinkBitrateRequirements + description: Specifies the maximum bitrate requirements for a VL instantiated according to this profile. + required: true + min_bitrate_requirements: + type: tosca.datatypes.nfv.LinkBitrateRequirements + description: Specifies the minimum bitrate requirements for a VL instantiated according to this profile. + required: true + qos: + type: tosca.datatypes.nfv.Qos + description: Specifies the QoS requirements of a VL instantiated according to this profile. + required: false + virtual_link_protocol_data: + type: list + description: Specifies the protocol data for a virtual link. + required: false + entry_schema: + type: tosca.datatypes.nfv.VirtualLinkProtocolData + + tosca.datatypes.nfv.VirtualLinkProtocolData: + derived_from: tosca.datatypes.Root + description: describes one protocol layer and associated protocol data for a given virtual link used in a specific VNF deployment flavour + properties: + associated_layer_protocol: + type: string + description: Identifies one of the protocols a virtualLink gives access to (ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire) as specified by the connectivity_type property. + required: true + constraints: + - valid_values: [ ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire ] + l2_protocol_data: + type: tosca.datatypes.nfv.L2ProtocolData + description: Specifies the L2 protocol data for a virtual link. Shall be present when the associatedLayerProtocol attribute indicates a L2 protocol and shall be absent otherwise. + required: false + l3_protocol_data: + type: tosca.datatypes.nfv.L3ProtocolData + description: Specifies the L3 protocol data for this virtual link. Shall be present when the associatedLayerProtocol attribute indicates a L3 protocol and shall be absent otherwise. + required: false + + tosca.datatypes.nfv.L2ProtocolData: + derived_from: tosca.datatypes.Root + description: describes L2 protocol data for a given virtual link used in a specific VNF deployment flavour. + properties: + name: + type: string + description: Identifies the network name associated with this L2 protocol. + required: false + network_type: + type: string + description: Specifies the network type for this L2 protocol.The value may be overridden at run-time. + required: false + constraints: + - valid_values: [ flat, vlan, vxlan, gre ] + vlan_transparent: + type: boolean + description: Specifies whether to support VLAN transparency for this L2 protocol or not. + required: false + default: false + mtu: + type: integer + description: Specifies the maximum transmission unit (MTU) value for this L2 protocol. + required: false + constraints: + - greater_than: 0 + + tosca.datatypes.nfv.L3ProtocolData: + derived_from: tosca.datatypes.Root + description: describes L3 protocol data for a given virtual link used in a specific VNF deployment flavour. + properties: + name: + type: string + description: Identifies the network name associated with this L3 protocol. + required: false + ip_version: + type: string + description: Specifies IP version of this L3 protocol.The value of the ip_version property shall be consistent with the value of the layer_protocol in the connectivity_type property of the virtual link node. + required: true + constraints: + - valid_values: [ ipv4, ipv6 ] + cidr: + type: string + description: Specifies the CIDR (Classless Inter-Domain Routing) of this L3 protocol. The value may be overridden at run-time. + required: true + ip_allocation_pools: + type: list + description: Specifies the allocation pools with start and end IP addresses for this L3 protocol. The value may be overridden at run-time. + required: false + entry_schema: + type: tosca.datatypes.nfv.IpAllocationPool + gateway_ip: + type: string + description: Specifies the gateway IP address for this L3 protocol. The value may be overridden at run-time. + required: false + dhcp_enabled: + type: boolean + description: Indicates whether DHCP (Dynamic Host Configuration Protocol) is enabled or disabled for this L3 protocol. The value may be overridden at run-time. + required: false + ipv6_address_mode: + type: string + description: Specifies IPv6 address mode. May be present when the value of the ipVersion attribute is "ipv6" and shall be absent otherwise. The value may be overridden at run-time. + required: false + constraints: + - valid_values: [ slaac, dhcpv6-stateful, dhcpv6-stateless ] + + tosca.datatypes.nfv.IpAllocationPool: + derived_from: tosca.datatypes.Root + description: Specifies a range of IP addresses + properties: + start_ip_address: + type: string + description: The IP address to be used as the first one in a pool of addresses derived from the cidr block full IP range + required: true + end_ip_address: + type: string + description: The IP address to be used as the last one in a pool of addresses derived from the cidr block full IP range + required: true + + tosca.datatypes.nfv.InstantiationLevel: + derived_from: tosca.datatypes.Root + description: Describes the scale level for each aspect that corresponds to a given level of resources to be instantiated within a deployment flavour in term of the number VNFC instances + properties: + description: + type: string + description: Human readable description of the level + required: true + scale_info: + type: map # key: aspectId + description: Represents for each aspect the scale level that corresponds to this instantiation level. scale_info shall be present if the VNF supports scaling. + required: false + entry_schema: + type: tosca.datatypes.nfv.ScaleInfo + + tosca.datatypes.nfv.VduLevel: + derived_from: tosca.datatypes.Root + description: Indicates for a given Vdu.Compute in a given level the number of instances to deploy + properties: + number_of_instances: + type: integer + description: Number of instances of VNFC based on this VDU to deploy for this level. + required: true + constraints: + - greater_or_equal: 0 + + tosca.datatypes.nfv.VnfLcmOperationsConfiguration: + derived_from: tosca.datatypes.Root + description: Represents information to configure lifecycle management operations + properties: + instantiate: + type: tosca.datatypes.nfv.VnfInstantiateOperationConfiguration + description: Configuration parameters for the InstantiateVnf operation + required: false + scale: + type: tosca.datatypes.nfv.VnfScaleOperationConfiguration + description: Configuration parameters for the ScaleVnf operation + required: false + scale_to_level: + type: tosca.datatypes.nfv.VnfScaleToLevelOperationConfiguration + description: Configuration parameters for the ScaleVnfToLevel operation + required: false + change_flavour: + type: tosca.datatypes.nfv.VnfChangeFlavourOperationConfiguration + description: Configuration parameters for the changeVnfFlavourOpConfig operation + required: false + heal: + type: tosca.datatypes.nfv.VnfHealOperationConfiguration + description: Configuration parameters for the HealVnf operation + required: false + terminate: + type: tosca.datatypes.nfv.VnfTerminateOperationConfiguration + description: Configuration parameters for the TerminateVnf operation + required: false + operate: + type: tosca.datatypes.nfv.VnfOperateOperationConfiguration + description: Configuration parameters for the OperateVnf operation + required: false + change_ext_connectivity: + type: tosca.datatypes.nfv.VnfChangeExtConnectivityOperationConfiguration + description: Configuration parameters for the changeExtVnfConnectivityOpConfig operation + required: false + + tosca.datatypes.nfv.VnfInstantiateOperationConfiguration: + derived_from: tosca.datatypes.Root + description: represents information that affect the invocation of the InstantiateVnf operation. + + tosca.datatypes.nfv.VnfScaleOperationConfiguration: + derived_from: tosca.datatypes.Root + description: Represents information that affect the invocation of the ScaleVnf operation + properties: + scaling_by_more_than_one_step_supported: + type: boolean + description: Signals whether passing a value larger than one in the numScalingSteps parameter of the ScaleVnf operation is supported by this VNF. + required: false + default: false + + tosca.datatypes.nfv.VnfScaleToLevelOperationConfiguration: + derived_from: tosca.datatypes.Root + description: represents information that affect the invocation of the ScaleVnfToLevel operation + properties: + arbitrary_target_levels_supported: + type: boolean + description: Signals whether scaling according to the parameter "scaleInfo" is supported by this VNF + required: true + + tosca.datatypes.nfv.VnfHealOperationConfiguration: + derived_from: tosca.datatypes.Root + description: represents information that affect the invocation of the HealVnf operation + properties: + causes: + type: list + description: Supported "cause" parameter values + required: false + entry_schema: + type: string + + tosca.datatypes.nfv.VnfTerminateOperationConfiguration: + derived_from: tosca.datatypes.Root + description: represents information that affect the invocation of the TerminateVnf + properties: + min_graceful_termination_timeout: + type: scalar-unit.time + description: Minimum timeout value for graceful termination of a VNF instance + required: true + max_recommended_graceful_termination_timeout: + type: scalar-unit.time + description: Maximum recommended timeout value that can be needed to gracefully terminate a VNF instance of a particular type under certain conditions, such as maximum load condition. This is provided by VNF provider as information for the operator facilitating the selection of optimal timeout value. This value is not used as constraint + required: false + + tosca.datatypes.nfv.VnfOperateOperationConfiguration: + derived_from: tosca.datatypes.Root + description: represents information that affect the invocation of the OperateVnf operation + properties: + min_graceful_stop_timeout: + type: scalar-unit.time + description: Minimum timeout value for graceful stop of a VNF instance + required: true + max_recommended_graceful_stop_timeout: + type: scalar-unit.time + description: Maximum recommended timeout value that can be needed to gracefully stop a VNF instance of a particular type under certain conditions, such as maximum load condition. This is provided by VNF provider as information for the operator facilitating the selection of optimal timeout value. This value is not used as constraint + required: false + + tosca.datatypes.nfv.ScaleInfo: + derived_from: tosca.datatypes.Root + description: Indicates for a given scaleAspect the corresponding scaleLevel + properties: + scale_level: + type: integer + description: The scale level for a particular aspect + required: true + constraints: + - greater_or_equal: 0 + + tosca.datatypes.nfv.ScalingAspect: + derived_from: tosca.datatypes.Root + properties: + name: + type: string + required: true + description: + type: string + required: true + max_scale_level: + type: integer # positiveInteger + required: true + constraints: + - greater_or_equal: 0 + step_deltas: + type: list + required: false + entry_schema: + type: string # Identifier + + tosca.datatypes.nfv.VnfConfigurableProperties: + derived_from: tosca.datatypes.Root + description: indicates configuration properties for a given VNF (e.g. related to auto scaling and auto healing). + properties: + is_autoscale_enabled: + type: boolean + description: It permits to enable (TRUE)/disable (FALSE) the auto-scaling functionality. If the properties is not present for configuring, then VNF property is not supported + required: false + is_autoheal_enabled: + type: boolean + description: It permits to enable (TRUE)/disable (FALSE) the auto-healing functionality. If the properties is not present for configuring, then VNF property is not supported + required: false + # additional_configurable_properties: + # description: It provides VNF specific configurable properties that + # can be modified using the ModifyVnfInfo operation + # required: false + # type: tosca.datatypes.nfv.VnfAdditionalConfigurableProperties + # derived types are expected to introduce + # additional_configurable_properties with its type derived from + # tosca.datatypes.nfv.VnfAdditionalConfigurableProperties + + tosca.datatypes.nfv.VnfAdditionalConfigurableProperties: + derived_from: tosca.datatypes.Root + description: is an empty base type for deriving data types for describing additional configurable properties for a given VNF + + tosca.datatypes.nfv.VnfInfoModifiableAttributes: + derived_from: tosca.datatypes.Root + description: Describes VNF-specific extension and metadata for a given VNF + #properties: + #extensions: + #type: tosca.datatypes.nfv.VnfInfoModifiableAttributesExtensions + #description: "Extension" properties of VnfInfo that are writeable + #required: false + # derived types are expected to introduce + # extensions with its type derived from + # tosca.datatypes.nfv.VnfInfoModifiableAttributesExtensions + #metadata: + #type: tosca.datatypes.nfv.VnfInfoModifiableAttributesMetadata + #description: "Metadata" properties of VnfInfo that are writeable + #required: false + # derived types are expected to introduce + # metadata with its type derived from + # tosca.datatypes.nfv.VnfInfoModifiableAttributesMetadata + + tosca.datatypes.nfv.VnfInfoModifiableAttributesExtensions: + derived_from: tosca.datatypes.Root + description: is an empty base type for deriving data types for describing VNF-specific extension + + tosca.datatypes.nfv.VnfInfoModifiableAttributesMetadata: + derived_from: tosca.datatypes.Root + description: is an empty base type for deriving data types for describing VNF-specific metadata + + tosca.datatypes.nfv.LogicalNodeData: + derived_from: tosca.datatypes.Root + description: Describes compute, memory and I/O requirements associated with a particular VDU. + properties: + logical_node_requirements: + type: map + description: The logical node-level compute, memory and I/O requirements. A map of strings that contains a set of key-value pairs that describes hardware platform specific deployment requirements, including the number of CPU cores on this logical node, a memory configuration specific to a logical node or a requirement related to the association of an I/O device with the logical node. + required: false + entry_schema: + type: string + + tosca.datatypes.nfv.SwImageData: + derived_from: tosca.datatypes.Root + description: describes information related to a software image artifact + properties: # in SOL001 v0.8.0: "properties or metadata:" + name: + type: string + description: Name of this software image + required: true + version: + type: string + description: Version of this software image + required: true + checksum: + type: tosca.datatypes.nfv.ChecksumData + description: Checksum of the software image file + required: true + container_format: + type: string + description: The container format describes the container file format in which software image is provided + required: true + constraints: + - valid_values: [ aki, ami, ari, bare, docker, ova, ovf ] + disk_format: + type: string + description: The disk format of a software image is the format of the underlying disk image + required: true + constraints: + - valid_values: [ aki, ami, ari, iso, qcow2, raw, vdi, vhd, vhdx, vmdk ] + min_disk: + type: scalar-unit.size # Number + description: The minimal disk size requirement for this software image + required: true + constraints: + - greater_or_equal: 0 B + min_ram: + type: scalar-unit.size # Number + description: The minimal RAM requirement for this software image + required: false + constraints: + - greater_or_equal: 0 B + size: + type: scalar-unit.size # Number + description: The size of this software image + required: true + operating_system: + type: string + description: Identifies the operating system used in the software image + required: false + supported_virtualisation_environments: + type: list + description: Identifies the virtualisation environments (e.g. hypervisor) compatible with this software image + required: false + entry_schema: + type: string + + tosca.datatypes.nfv.VirtualBlockStorageData: + derived_from: tosca.datatypes.Root + description: VirtualBlockStorageData describes block storage requirements associated with compute resources in a particular VDU, either as a local disk or as virtual attached storage + properties: + size_of_storage: + type: scalar-unit.size + description: Size of virtualised storage resource + required: true + constraints: + - greater_or_equal: 0 B + vdu_storage_requirements: + type: map + description: The hardware platform specific storage requirements. A map of strings that contains a set of key-value pairs that represents the hardware platform specific storage deployment requirements. + required: false + entry_schema: + type: string + rdma_enabled: + type: boolean + description: Indicates if the storage support RDMA + required: false + default: false + + tosca.datatypes.nfv.VirtualObjectStorageData: + derived_from: tosca.datatypes.Root + description: VirtualObjectStorageData describes object storage requirements associated with compute resources in a particular VDU + properties: + max_size_of_storage: + type: scalar-unit.size + description: Maximum size of virtualized storage resource + required: false + constraints: + - greater_or_equal: 0 B + + tosca.datatypes.nfv.VirtualFileStorageData: + derived_from: tosca.datatypes.Root + description: VirtualFileStorageData describes file storage requirements associated with compute resources in a particular VDU + properties: + size_of_storage: + type: scalar-unit.size + description: Size of virtualized storage resource + required: true + constraints: + - greater_or_equal: 0 B + file_system_protocol: + type: string + description: The shared file system protocol (e.g. NFS, CIFS) + required: true + + tosca.datatypes.nfv.VirtualLinkBitrateLevel: + derived_from: tosca.datatypes.Root + description: Describes bitrate requirements applicable to the virtual link instantiated from a particicular VnfVirtualLink + properties: + bitrate_requirements: + type: tosca.datatypes.nfv.LinkBitrateRequirements + description: Virtual link bitrate requirements for an instantiation level or bitrate delta for a scaling step + required: true + + tosca.datatypes.nfv.VnfOperationAdditionalParameters: + derived_from: tosca.datatypes.Root + description: Is an empty base type for deriving data type for describing VNF-specific parameters to be passed when invoking lifecycle management operations + #properties: + + tosca.datatypes.nfv.VnfChangeFlavourOperationConfiguration: + derived_from: tosca.datatypes.Root + description: represents information that affect the invocation of the ChangeVnfFlavour operation + #properties: + + tosca.datatypes.nfv.VnfChangeExtConnectivityOperationConfiguration: + derived_from: tosca.datatypes.Root + description: represents information that affect the invocation of the ChangeExtVnfConnectivity operation + #properties: + + tosca.datatypes.nfv.VnfMonitoringParameter: + derived_from: tosca.datatypes.Root + description: Represents information on virtualised resource related performance metrics applicable to the VNF. + properties: + name: + type: string + description: Human readable name of the monitoring parameter + required: true + performance_metric: + type: string + description: Identifies the performance metric, according to ETSI GS NFV-IFA 027. + required: true + constraints: + - valid_values: [ v_cpu_usage_mean_vnf, v_cpu_usage_peak_vnf, v_memory_usage_mean_vnf, v_memory_usage_peak_vnf, v_disk_usage_mean_vnf, v_disk_usage_peak_vnf, byte_incoming_vnf_ext_cp, byte_outgoing_vnf_ext_cp, +packet_incoming_vnf_ext_cp, packet_outgoing_vnf_ext_cp ] + collection_period: + type: scalar-unit.time + description: Describes the periodicity at which to collect the performance information. + required: false + constraints: + - greater_than: 0 s + + tosca.datatypes.nfv.VnfcMonitoringParameter: + derived_from: tosca.datatypes.Root + description: Represents information on virtualised resource related performance metrics applicable to the VNF. + properties: + name: + type: string + description: Human readable name of the monitoring parameter + required: true + performance_metric: + type: string + description: Identifies the performance metric, according to ETSI GS NFV-IFA 027. + required: true + constraints: + - valid_values: [ v_cpu_usage_mean_vnf, v_cpu_usage_peak_vnf, v_memory_usage_mean_vnf, v_memory_usage_peak_vnf, v_disk_usage_mean_vnf, v_disk_usage_peak_vnf, byte_incoming_vnf_int_cp, byte_outgoing_vnf_int_cp, packet_incoming_vnf_int_cp, packet_outgoing_vnf_int_cp ] + collection_period: + type: scalar-unit.time + description: Describes the periodicity at which to collect the performance information. + required: false + constraints: + - greater_than: 0 s + + tosca.datatypes.nfv.VirtualLinkMonitoringParameter: + derived_from: tosca.datatypes.Root + description: Represents information on virtualised resource related performance metrics applicable to the VNF. + properties: + name: + type: string + description: Human readable name of the monitoring parameter + required: true + performance_metric: + type: string + description: Identifies a performance metric derived from those defined in ETSI GS NFV-IFA 027.The packetOutgoingVirtualLink and packetIncomingVirtualLink metrics shall be obtained by aggregation the PacketOutgoing and PacketIncoming measurements defined in clause 7.1 of GS NFV-IFA 027 of all virtual link ports attached to the virtual link to which the metrics apply. + required: true + constraints: + - valid_values: [ packet_outgoing_virtual_link, packet_incoming_virtual_link ] + collection_period: + type: scalar-unit.time + description: Describes the periodicity at which to collect the performance information. + required: false + constraints: + - greater_than: 0 s + + tosca.datatypes.nfv.InterfaceDetails: + derived_from: tosca.datatypes.Root + description: information used to access an interface exposed by a VNF + properties: + uri_components: + type: tosca.datatypes.nfv.UriComponents + description: Provides components to build a Uniform Ressource Identifier (URI) where to access the interface end point. + required: false + interface_specific_data: + type: map + description: Provides additional details that are specific to the type of interface considered. + required: false + entry_schema: + type: string + + tosca.datatypes.nfv.UriComponents: + derived_from: tosca.datatypes.Root + description: information used to build a URI that complies with IETF RFC 3986 [8]. + properties: + scheme: + type: string # shall comply with IETF RFC3986 + description: scheme component of a URI. + required: true + authority: + type: tosca.datatypes.nfv.UriAuthority + description: Authority component of a URI + required: false + path: + type: string # shall comply with IETF RFC 3986 + description: path component of a URI. + required: false + query: + type: string # shall comply with IETF RFC 3986 + description: query component of a URI. + required: false + fragment: + type: string # shall comply with IETF RFC 3986 + description: fragment component of a URI. + required: false + + tosca.datatypes.nfv.UriAuthority: + derived_from: tosca.datatypes.Root + description: information that corresponds to the authority component of a URI as specified in IETF RFC 3986 [8] + properties: + user_info: + type: string # shall comply with IETF RFC 3986 + description: user_info field of the authority component of a URI + required: false + host: + type: string # shall comply with IETF RFC 3986 + description: host field of the authority component of a URI + required: false + port: + type: string # shall comply with IETF RFC 3986 + description: port field of the authority component of a URI + required: false + + tosca.datatypes.nfv.ChecksumData: + derived_from: tosca.datatypes.Root + description: Describes information about the result of performing a checksum operation over some arbitrary data + properties: + algorithm: + type: string + description: Describes the algorithm used to obtain the checksum value + required: true + constraints: + - valid_values: [sha-224, sha-256, sha-384, sha-512 ] + hash: + type: string + description: Contains the result of applying the algorithm indicated by the algorithm property to the data to which this ChecksumData refers + required: true + +artifact_types: + tosca.artifacts.nfv.SwImage: + derived_from: tosca.artifacts.Deployment.Image + description: describes the software image which is directly loaded on the virtualisation container realizing of the VDU or is to be loaded on a virtual storage resource. + + tosca.artifacts.Implementation.nfv.Mistral: + derived_from: tosca.artifacts.Implementation + description: artifacts for Mistral workflows + mime_type: application/x-yaml + file_ext: [ yaml ] + +capability_types: + tosca.capabilities.nfv.VirtualBindable: + derived_from: tosca.capabilities.Node + description: Indicates that the node that includes it can be pointed by a tosca.relationships.nfv.VirtualBindsTo relationship type which is used to model the VduHasCpd association + + tosca.capabilities.nfv.VirtualCompute: + derived_from: tosca.capabilities.Node + description: Describes the capabilities related to virtual compute resources + properties: + logical_node: + type: map + description: Describes the Logical Node requirements + required: false + entry_schema: + type: tosca.datatypes.nfv.LogicalNodeData + requested_additional_capabilities: + type: map + description: Describes additional capability for a particular VDU + required: false + entry_schema: + type: tosca.datatypes.nfv.RequestedAdditionalCapability + compute_requirements: + type: map + required: false + entry_schema: + type: string + virtual_memory: + type: tosca.datatypes.nfv.VirtualMemory + description: Describes virtual memory of the virtualized compute + required: true + virtual_cpu: + type: tosca.datatypes.nfv.VirtualCpu + description: Describes virtual CPU(s) of the virtualized compute + required: true + virtual_local_storage: + type: list + description: A list of virtual system disks created and destroyed as part of the VM lifecycle + required: false + entry_schema: + type: tosca.datatypes.nfv.VirtualBlockStorageData + description: virtual system disk definition + + tosca.capabilities.nfv.VirtualStorage: + derived_from: tosca.capabilities.Root + description: Describes the attachment capabilities related to Vdu.Storage + +relationship_types: + tosca.relationships.nfv.VirtualBindsTo: + derived_from: tosca.relationships.DependsOn + description: Represents an association relationship between Vdu.Compute and VduCp node types + valid_target_types: [ tosca.capabilities.nfv.VirtualBindable ] + + tosca.relationships.nfv.AttachesTo: + derived_from: tosca.relationships.Root + description: Represents an association relationship between the Vdu.Compute and one of the node types, Vdu.VirtualBlockStorage, Vdu.VirtualObjectStorage or Vdu.VirtualFileStorage + valid_target_types: [ tosca.capabilities.nfv.VirtualStorage ] + +interface_types: + tosca.interfaces.nfv.Vnflcm: + derived_from: tosca.interfaces.Root + description: This interface encompasses a set of TOSCA operations corresponding to the VNF LCM operations defined in ETSI GS NFV-IFA 007 as well as to preamble and postamble procedures to the execution of the VNF LCM operations. + instantiate: + description: Invoked upon receipt of an Instantiate VNF request + # inputs: + # additional_parameters: + # type: tosca.datatypes.nfv.VnfOperationAdditionalParameters + # required: false + # derived types are expected to introduce additional_parameters with + # its type derived from + # tosca.datatypes.nfv.VnfOperationAdditionalParameters + instantiate_start: + description: Invoked before instantiate + instantiate_end: + description: Invoked after instantiate + terminate: + description: Invoked upon receipt Terminate VNF request + # inputs: + # additional_parameters: + # type: tosca.datatypes.nfv.VnfOperationAdditionalParameters + # required: false + # derived types are expected to introduce additional_parameters with + # its type derived from + # tosca.datatypes.nfv.VnfOperationAdditionalParameters + terminate_start: + description: Invoked before terminate + terminate_end: + description: Invoked after terminate + modify_information: + description: Invoked upon receipt of a Modify VNF Information request + modify_information_start: + description: Invoked before modify_information + modify_information_end: + description: Invoked after modify_information + change_flavour: + description: Invoked upon receipt of a Change VNF Flavour request + # inputs: + # additional_parameters: + # type: tosca.datatypes.nfv.VnfOperationAdditionalParameters + # required: false + # derived types are expected to introduce additional_parameters with + # its type derived from + # tosca.datatypes.nfv.VnfOperationAdditionalParameters + change_flavour_start: + description: Invoked before change_flavour + change_flavour_end: + description: Invoked after change_flavour + change_external_connectivity: + description: Invoked upon receipt of a Change External VNF Connectivity request + # inputs: + # additional_parameters: + # type: tosca.datatypes.nfv.VnfOperationAdditionalParameters + # required: false + # derived types are expected to introduce additional_parameters with + # its type derived from + # tosca.datatypes.nfv.VnfOperationAdditionalParameters + change_external_connectivity_start: + description: Invoked before change_external_connectivity + change_external_connectivity_end: + description: Invoked after change_external_connectivity + operate: + description: Invoked upon receipt of an Operate VNF request + # inputs: + # additional_parameters: + # type: tosca.datatypes.nfv.VnfOperationAdditionalParameters + # required: false + # derived types are expected to introduce additional_parameters with + # its type derived from + # tosca.datatypes.nfv.VnfOperationAdditionalParameters + operate_start: + description: Invoked before operate + operate_end: + description: Invoked after operate + heal: + description: Invoked upon receipt of a Heal VNF request + # inputs: + # additional_parameters: + # type: tosca.datatypes.nfv.VnfOperationAdditionalParameters + # required: false + # derived types are expected to introduce additional_parameters with + # its type derived from + # tosca.datatypes.nfv.VnfOperationAdditionalParameters + heal_start: + description: Invoked before heal + heal_end: + description: Invoked after heal + scale: + description: Invoked upon receipt of a Scale VNF request + # inputs: + # additional_parameters: + # type: tosca.datatypes.nfv.VnfOperationAdditionalParameters + # required: false + # derived types are expected to introduce additional_parameters with + # its type derived from + # tosca.datatypes.nfv.VnfOperationAdditionalParameters + scale_start: + description: Invoked before scale + scale_end: + description: Invoked after scale + scale_to_level: + description: Invoked upon receipt of a Scale VNF to Level request + # inputs: + # additional_parameters: + # type: tosca.datatypes.nfv.VnfOperationAdditionalParameters + # required: false + # derived types are expected to introduce additional_parameters with + # its type derived from + # tosca.datatypes.nfv.VnfOperationAdditionalParameters + scale_to_level_start: + description: Invoked before scale_to_level + scale_to_level_end: + description: Invoked after scale_to_level + +node_types: + tosca.nodes.nfv.VNF: + derived_from: tosca.nodes.Root + description: The generic abstract type from which all VNF specific abstract node types shall be derived to form, together with other node types, the TOSCA service template(s) representing the VNFD + properties: + descriptor_id: # instead of vnfd_id + type: string # GUID + description: Globally unique identifier of the VNFD + required: true + descriptor_version: # instead of vnfd_version + type: string + description: Identifies the version of the VNFD + required: true + provider: # instead of vnf_provider + type: string + description: Provider of the VNF and of the VNFD + required: true + product_name: # instead of vnf_product_name + type: string + description: Human readable name for the VNF Product + required: true + software_version: # instead of vnf_software_version + type: string + description: Software version of the VNF + required: true + product_info_name: # instead of vnf_product_info_name + type: string + description: Human readable name for the VNF Product + required: false + product_info_description: # instead of vnf_product_info_description + type: string + description: Human readable description of the VNF Product + required: false + vnfm_info: + type: list + required: true + description: Identifies VNFM(s) compatible with the VNF + entry_schema: + type: string + constraints: + - pattern: (^etsivnfm:v[0-9]?[0-9]\.[0-9]?[0-9]\.[0-9]?[0-9]$)|(^[0-9]+:[a-zA-Z0-9.-]+$) + localization_languages: + type: list + description: Information about localization languages of the VNF + required: false + entry_schema: + type: string #IETF RFC 5646 string + default_localization_language: + type: string #IETF RFC 5646 string + description: Default localization language that is instantiated if no information about selected localization language is available + required: false + #configurable_properties: + #type: tosca.datatypes.nfv.VnfConfigurableProperties + #description: Describes the configurable properties of the VNF + #required: false + # derived types are expected to introduce configurable_properties + # with its type derived from + # tosca.datatypes.nfv.VnfConfigurableProperties + #modifiable_attributes: + #type: tosca.datatypes.nfv.VnfInfoModifiableAttributes + #description: Describes the modifiable attributes of the VNF + #required: false + # derived types are expected to introduce modifiable_attributes + # with its type derived from + # tosca.datatypes.nfv.VnfInfoModifiableAttributes + lcm_operations_configuration: + type: tosca.datatypes.nfv.VnfLcmOperationsConfiguration + description: Describes the configuration parameters for the VNF LCM operations + required: false + monitoring_parameters: + type: list + entry_schema: + type: tosca.datatypes.nfv.VnfMonitoringParameter + description: Describes monitoring parameters applicable to the VNF. + required: false + flavour_id: + type: string + description: Identifier of the Deployment Flavour within the VNFD + required: true + flavour_description: + type: string + description: Human readable description of the DF + required: true + vnf_profile: + type: tosca.datatypes.nfv.VnfProfile + description: Describes a profile for instantiating VNFs of a particular NS DF according to a specific VNFD and VNF DF + required: false + requirements: + - virtual_link: + capability: tosca.capabilities.nfv.VirtualLinkable + relationship: tosca.relationships.nfv.VirtualLinksTo + occurrences: [ 0, 1 ] + # Additional requirements shall be defined in the VNF specific node type (deriving from tosca.nodes.nfv.VNF) corresponding to NS virtual links that need to connect to VnfExtCps + interfaces: + Vnflcm: + type: tosca.interfaces.nfv.Vnflcm + + tosca.nodes.nfv.VnfExtCp: + derived_from: tosca.nodes.nfv.Cp + description: Describes a logical external connection point, exposed by the VNF enabling connection with an external Virtual Link + properties: + virtual_network_interface_requirements: + type: list + description: The actual virtual NIC requirements that is been assigned when instantiating the connection point + required: false + entry_schema: + type: tosca.datatypes.nfv.VirtualNetworkInterfaceRequirements + requirements: + - external_virtual_link: + capability: tosca.capabilities.nfv.VirtualLinkable + relationship: tosca.relationships.nfv.VirtualLinksTo + - internal_virtual_link: #name in ETSI NFV IFA011 v0.7.3: intVirtualLinkDesc + capability: tosca.capabilities.nfv.VirtualLinkable + relationship: tosca.relationships.nfv.VirtualLinksTo + + tosca.nodes.nfv.Vdu.Compute: + derived_from: tosca.nodes.Root + description: Describes the virtual compute part of a VDU which is a construct supporting the description of the deployment and operational behavior of a VNFC + properties: + name: + type: string + description: Human readable name of the VDU + required: true + description: + type: string + description: Human readable description of the VDU + required: true + boot_order: + type: list # explicit index (boot index) not necessary, contrary to IFA011 + description: References a node template name from which a valid boot device is created + required: false + entry_schema: + type: string + nfvi_constraints: + type: list + description: Describes constraints on the NFVI for the VNFC instance(s) created from this VDU + required: false + entry_schema: + type: string + monitoring_parameters: + type: list + description: Describes monitoring parameters applicable to a VNFC instantiated from this VDU + required: false + entry_schema: + type: tosca.datatypes.nfv.VnfcMonitoringParameter + #configurable_properties: + #type: tosca.datatypes.nfv.VnfcConfigurableProperties + #required: false + # derived types are expected to introduce + # configurable_properties with its type derived from + # tosca.datatypes.nfv.VnfcConfigurableProperties + vdu_profile: + type: tosca.datatypes.nfv.VduProfile + description: Defines additional instantiation data for the VDU.Compute node + required: true + sw_image_data: + type: tosca.datatypes.nfv.SwImageData + description: Defines information related to a SwImage artifact used by this Vdu.Compute node + required: false # property is required when the node template has an associated artifact of type tosca.artifacts.nfv.SwImage and not required otherwise + boot_data: + type: string + description: Contains a string or a URL to a file contained in the VNF package used to customize a virtualised compute resource at boot time. The bootData may contain variable parts that are replaced by deployment specific values before being sent to the VIM. + required: false + capabilities: + virtual_compute: + type: tosca.capabilities.nfv.VirtualCompute + occurrences: [ 1, 1 ] + virtual_binding: + type: tosca.capabilities.nfv.VirtualBindable + occurrences: [ 1, UNBOUNDED ] + requirements: + - virtual_storage: + capability: tosca.capabilities.nfv.VirtualStorage + relationship: tosca.relationships.nfv.AttachesTo + occurrences: [ 0, UNBOUNDED ] + + tosca.nodes.nfv.Vdu.VirtualBlockStorage: + derived_from: tosca.nodes.Root + description: This node type describes the specifications of requirements related to virtual block storage resources + properties: + virtual_block_storage_data: + type: tosca.datatypes.nfv.VirtualBlockStorageData + description: Describes the block storage characteristics. + required: true + sw_image_data: + type: tosca.datatypes.nfv.SwImageData + description: Defines information related to a SwImage artifact used by this Vdu.Compute node. + required: false # property is required when the node template has an associated artifact of type tosca.artifacts.nfv.SwImage and not required otherwise + capabilities: + virtual_storage: + type: tosca.capabilities.nfv.VirtualStorage + description: Defines the capabilities of virtual_storage. + + tosca.nodes.nfv.Vdu.VirtualObjectStorage: + derived_from: tosca.nodes.Root + description: This node type describes the specifications of requirements related to virtual object storage resources + properties: + virtual_object_storage_data: + type: tosca.datatypes.nfv.VirtualObjectStorageData + description: Describes the object storage characteristics. + required: true + capabilities: + virtual_storage: + type: tosca.capabilities.nfv.VirtualStorage + description: Defines the capabilities of virtual_storage. + + tosca.nodes.nfv.Vdu.VirtualFileStorage: + derived_from: tosca.nodes.Root + description: This node type describes the specifications of requirements related to virtual file storage resources + properties: + virtual_file_storage_data: + type: tosca.datatypes.nfv.VirtualFileStorageData + description: Describes the file storage characteristics. + required: true + capabilities: + virtual_storage: + type: tosca.capabilities.nfv.VirtualStorage + description: Defines the capabilities of virtual_storage. + requirements: + - virtual_link: + capability: tosca.capabilities.nfv.VirtualLinkable + relationship: tosca.relationships.nfv.VirtualLinksTo + #description: Describes the requirements for linking to virtual link + + tosca.nodes.nfv.VduCp: + derived_from: tosca.nodes.nfv.Cp + description: describes network connectivity between a VNFC instance based on this VDU and an internal VL + properties: + bitrate_requirement: + type: integer # in bits per second + description: Bitrate requirement in bit per second on this connection point + required: false + constraints: + - greater_or_equal: 0 + virtual_network_interface_requirements: + type: list + description: Specifies requirements on a virtual network interface realising the CPs instantiated from this CPD + required: false + entry_schema: + type: tosca.datatypes.nfv.VirtualNetworkInterfaceRequirements + order: + type: integer + description: The order of the NIC on the compute instance (e.g.eth2) + required: false + constraints: + - greater_or_equal: 0 + vnic_type: + type: string + description: Describes the type of the virtual network interface realizing the CPs instantiated from this CPD + required: false + constraints: + - valid_values: [ normal, virtio, direct-physical ] + requirements: + - virtual_link: + capability: tosca.capabilities.nfv.VirtualLinkable + relationship: tosca.relationships.nfv.VirtualLinksTo + - virtual_binding: + capability: tosca.capabilities.nfv.VirtualBindable + relationship: tosca.relationships.nfv.VirtualBindsTo + node: tosca.nodes.nfv.Vdu.Compute + + tosca.nodes.nfv.VnfVirtualLink: + derived_from: tosca.nodes.Root + description: Describes the information about an internal VNF VL + properties: + connectivity_type: + type: tosca.datatypes.nfv.ConnectivityType + description: Specifies the protocol exposed by the VL and the flow pattern supported by the VL + required: true + description: + type: string + description: Provides human-readable information on the purpose of the VL + required: false + test_access: + type: list + description: Test access facilities available on the VL + required: false + entry_schema: + type: string + constraints: + - valid_values: [ passive_monitoring, active_loopback ] + vl_profile: + type: tosca.datatypes.nfv.VlProfile + description: Defines additional data for the VL + required: true + monitoring_parameters: + type: list + description: Describes monitoring parameters applicable to the VL + required: false + entry_schema: + type: tosca.datatypes.nfv.VirtualLinkMonitoringParameter + capabilities: + virtual_linkable: + type: tosca.capabilities.nfv.VirtualLinkable + +group_types: + tosca.groups.nfv.PlacementGroup: + derived_from: tosca.groups.Root + description: PlacementGroup is used for describing the affinity or anti-affinity relationship applicable between the virtualization containers to be created based on different VDUs, or between internal VLs to be created based on different VnfVirtualLinkDesc(s) + properties: + description: + type: string + description: Human readable description of the group + required: true + members: [ tosca.nodes.nfv.Vdu.Compute, tosca.nodes.nfv.VnfVirtualLink ] + +policy_types: + tosca.policies.nfv.InstantiationLevels: + derived_from: tosca.policies.Root + description: The InstantiationLevels type is a policy type representing all the instantiation levels of resources to be instantiated within a deployment flavour and including default instantiation level in term of the number of VNFC instances to be created as defined in ETSI GS NFV-IFA 011 [1]. + properties: + levels: + type: map # key: levelId + description: Describes the various levels of resources that can be used to instantiate the VNF using this flavour. + required: true + entry_schema: + type: tosca.datatypes.nfv.InstantiationLevel + constraints: + - min_length: 1 + default_level: + type: string # levelId + description: The default instantiation level for this flavour. + required: false # required if multiple entries in levels + + tosca.policies.nfv.VduInstantiationLevels: + derived_from: tosca.policies.Root + description: The VduInstantiationLevels type is a policy type representing all the instantiation levels of resources to be instantiated within a deployment flavour in term of the number of VNFC instances to be created from each vdu.Compute. as defined in ETSI GS NFV-IFA 011 [1] + properties: + levels: + type: map # key: levelId + description: Describes the Vdu.Compute levels of resources that can be used to instantiate the VNF using this flavour + required: true + entry_schema: + type: tosca.datatypes.nfv.VduLevel + constraints: + - min_length: 1 + targets: [ tosca.nodes.nfv.Vdu.Compute ] + + tosca.policies.nfv.VirtualLinkInstantiationLevels: + derived_from: tosca.policies.Root + description: The VirtualLinkInstantiationLevels type is a policy type representing all the instantiation levels of virtual link resources to be instantiated within a deployment flavour as defined in ETSI GS NFV-IFA 011 [1]. + properties: + levels: + type: map # key: levelId + description: Describes the virtual link levels of resources that can be used to instantiate the VNF using this flavour. + required: true + entry_schema: + type: tosca.datatypes.nfv.VirtualLinkBitrateLevel + constraints: + - min_length: 1 + targets: [ tosca.nodes.nfv.VnfVirtualLink ] + + tosca.policies.nfv.ScalingAspects: + derived_from: tosca.policies.Root + description: The ScalingAspects type is a policy type representing the scaling aspects used for horizontal scaling as defined in ETSI GS NFV-IFA 011 [1]. + properties: + aspects: + type: map # key: aspectId + description: Describe maximum scale level for total number of scaling steps that can be applied to a particular aspect + required: true + entry_schema: + type: tosca.datatypes.nfv.ScalingAspect + constraints: + - min_length: 1 + + tosca.policies.nfv.VduScalingAspectDeltas: + derived_from: tosca.policies.Root + description: The VduScalingAspectDeltas type is a policy type representing the Vdu.Compute detail of an aspect deltas used for horizontal scaling, as defined in ETSI GS NFV-IFA 011 [1]. + properties: + aspect: + type: string + description: Represents the scaling aspect to which this policy applies + required: true + deltas: + type: map # key: scalingDeltaId + description: Describes the Vdu.Compute scaling deltas to be applied for every scaling steps of a particular aspect. + required: true + entry_schema: + type: tosca.datatypes.nfv.VduLevel + constraints: + - min_length: 1 + targets: [ tosca.nodes.nfv.Vdu.Compute ] + + tosca.policies.nfv.VirtualLinkBitrateScalingAspectDeltas: + derived_from: tosca.policies.Root + description: The VirtualLinkBitrateScalingAspectDeltas type is a policy type representing the VnfVirtualLink detail of an aspect deltas used for horizontal scaling, as defined in ETSI GS NFV-IFA 011 [1]. + properties: + aspect: + type: string + description: Represents the scaling aspect to which this policy applies. + required: true + deltas: + type: map # key: scalingDeltaId + description: Describes the VnfVirtualLink scaling deltas to be applied for every scaling steps of a particular aspect. + required: true + entry_schema: + type: tosca.datatypes.nfv.VirtualLinkBitrateLevel + constraints: + - min_length: 1 + targets: [ tosca.nodes.nfv.VnfVirtualLink ] + + tosca.policies.nfv.VduInitialDelta: + derived_from: tosca.policies.Root + description: The VduInitialDelta type is a policy type representing the Vdu.Compute detail of an initial delta used for horizontal scaling, as defined in ETSI GS NFV-IFA 011 [1]. + properties: + initial_delta: + type: tosca.datatypes.nfv.VduLevel + description: Represents the initial minimum size of the VNF. + required: true + targets: [ tosca.nodes.nfv.Vdu.Compute ] + + tosca.policies.nfv.VirtualLinkBitrateInitialDelta: + derived_from: tosca.policies.Root + description: The VirtualLinkBitrateInitialDelta type is a policy type representing the VnfVirtualLink detail of an initial deltas used for horizontal scaling, as defined in ETSI GS NFV-IFA 011 [1]. + properties: + initial_delta: + type: tosca.datatypes.nfv.VirtualLinkBitrateLevel + description: Represents the initial minimum size of the VNF. + required: true + targets: [ tosca.nodes.nfv.VnfVirtualLink ] + + tosca.policies.nfv.AffinityRule: + derived_from: tosca.policies.Placement + description: The AffinityRule describes the affinity rules applicable for the defined targets + properties: + scope: + type: string + description: scope of the rule is an NFVI_node, an NFVI_PoP, etc. + required: true + constraints: + - valid_values: [ nfvi_node, zone, zone_group, nfvi_pop ] + targets: [ tosca.nodes.nfv.Vdu.Compute, tosca.nodes.nfv.VnfVirtualLink, tosca.groups.nfv.PlacementGroup ] + + tosca.policies.nfv.AntiAffinityRule: + derived_from: tosca.policies.Placement + description: The AntiAffinityRule describes the anti-affinity rules applicable for the defined targets + properties: + scope: + type: string + description: scope of the rule is an NFVI_node, an NFVI_PoP, etc. + required: true + constraints: + - valid_values: [ nfvi_node, zone, zone_group, nfvi_pop ] + targets: [ tosca.nodes.nfv.Vdu.Compute, tosca.nodes.nfv.VnfVirtualLink, tosca.groups.nfv.PlacementGroup ] + + tosca.policies.nfv.SecurityGroupRule: + derived_from: tosca.policies.Root + description: The SecurityGroupRule type is a policy type specified the matching criteria for the ingress and/or egress traffic to/from visited connection points as defined in ETSI GS NFV-IFA 011 [1]. + properties: + description: + type: string + description: Human readable description of the security group rule. + required: false + direction: + type: string + description: The direction in which the security group rule is applied. The direction of 'ingress' or 'egress' is specified against the associated CP. I.e., 'ingress' means the packets entering a CP, while 'egress' means the packets sent out of a CP. + required: false + constraints: + - valid_values: [ ingress, egress ] + default: ingress + ether_type: + type: string + description: Indicates the protocol carried over the Ethernet layer. + required: false + constraints: + - valid_values: [ ipv4, ipv6 ] + default: ipv4 + protocol: + type: string + description: Indicates the protocol carried over the IP layer. Permitted values include any protocol defined in the IANA protocol registry, e.g. TCP, UDP, ICMP, etc. + required: false + constraints: + - valid_values: [ hopopt, icmp, igmp, ggp, ipv4, st, tcp, cbt, egp, igp, bbn_rcc_mon, nvp_ii, pup, argus, emcon, xnet, chaos, udp, mux, dcn_meas, hmp, prm, xns_idp, trunk_1, trunk_2, leaf_1, leaf_2, rdp, irtp, iso_tp4, netblt, mfe_nsp, merit_inp, dccp, 3pc, idpr, xtp, ddp, idpr_cmtp, tp++, il, ipv6, sdrp, ipv6_route, ipv6_frag, idrp, rsvp, gre, dsr, bna, esp, ah, i_nlsp, swipe, narp, mobile, tlsp, skip, ipv6_icmp, ipv6_no_nxt, ipv6_opts, cftp, sat_expak, kryptolan, rvd, ippc, sat_mon, visa, ipcv, cpnx, cphb, wsn, pvp, br_sat_mon, sun_nd, wb_mon, wb_expak, iso_ip, vmtp, secure_vmtp, vines, ttp, iptm, nsfnet_igp, dgp, tcf, eigrp, ospfigp, sprite_rpc, larp, mtp, ax.25, ipip, micp, scc_sp, etherip, encap, gmtp, ifmp, pnni, pim, aris, scps, qnx, a/n, ip_comp, snp, compaq_peer, ipx_in_ip, vrrp, pgm, l2tp, ddx, iatp, stp, srp, uti, smp, sm, ptp, isis, fire, crtp, crudp, sscopmce, iplt, sps, pipe, sctp, fc, rsvp_e2e_ignore, mobility, udp_lite, mpls_in_ip, manet, hip, shim6, wesp, rohc ] + default: tcp + port_range_min: + type: integer + description: Indicates minimum port number in the range that is matched by the security group rule. If a value is provided at design-time, this value may be overridden at run-time based on other deployment requirements or constraints. + required: false + constraints: + - greater_or_equal: 0 + - less_or_equal: 65535 + default: 0 + port_range_max: + type: integer + description: Indicates maximum port number in the range that is matched by the security group rule. If a value is provided at design-time, this value may be overridden at run-time based on other deployment requirements or constraints. + required: false + constraints: + - greater_or_equal: 0 + - less_or_equal: 65535 + default: 65535 + targets: [ tosca.nodes.nfv.VduCp, tosca.nodes.nfv.VnfExtCp ] + + tosca.policies.nfv.SupportedVnfInterface: + derived_from: tosca.policies.Root + description: this policy type represents interfaces produced by a VNF, the details to access them and the applicable connection points to use to access these interfaces + properties: + interface_name: + type: string + description: Identifies an interface produced by the VNF. + required: true + constraints: + - valid_values: [ vnf_indicator, vnf_configuration ] + details: + type: tosca.datatypes.nfv.InterfaceDetails + description: Provide additional data to access the interface endpoint + required: false + targets: [ tosca.nodes.nfv.VnfExtCp, tosca.nodes.nfv.VduCp ] + + diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Definitions/sample_free5gc_cnf_on_vm_df_simple.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Definitions/sample_free5gc_cnf_on_vm_df_simple.yaml new file mode 100644 index 000000000..2978e1e22 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Definitions/sample_free5gc_cnf_on_vm_df_simple.yaml @@ -0,0 +1,236 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 + +description: Simple deployment flavour for Sample VNF + +imports: + - etsi_nfv_sol001_common_types.yaml + - etsi_nfv_sol001_vnfd_types.yaml + - sample_free5gc_cnf_on_vm_types.yaml + +topology_template: + inputs: + id: + type: string + vendor: + type: string + version: + type: version + descriptor_id: + type: string + descriptor_version: + type: string + provider: + type: string + product_name: + type: string + software_version: + type: string + vnfm_info: + type: list + entry_schema: + type: string + flavour_id: + type: string + flavour_description: + type: string + + substitution_mappings: + node_type: company.provider.VNF + properties: + flavour_id: simple + requirements: + virtual_link_external1_1: [ masterNode_CP1, virtual_link ] + virtual_link_external1_2: [ workerNode_CP1, virtual_link ] + virtual_link_external1_3: [ workerNode_CP2, virtual_link ] + virtual_link_external1_4: [ workerNode_CP3, virtual_link ] + virtual_link_external1_5: [ workerNode_CP4, virtual_link ] + + node_templates: + VNF: + type: company.provider.VNF + properties: + flavour_description: A simple flavour + interfaces: + Vnflcm: + instantiate_end: + implementation: mgmt-drivers-kubernetes-free5gc + terminate_end: + implementation: mgmt-drivers-kubernetes-free5gc + scale_start: + implementation: mgmt-drivers-kubernetes-free5gc + scale_end: + implementation: mgmt-drivers-kubernetes-free5gc + heal_start: + implementation: mgmt-drivers-kubernetes-free5gc + heal_end: + implementation: mgmt-drivers-kubernetes-free5gc + artifacts: + mgmt-drivers-kubernetes-free5gc: + description: Management driver for kubernetes cluster + type: tosca.artifacts.Implementation.Python + file: Scripts/kubernetes_mgmt_free5gc.py + + masterNode: + type: tosca.nodes.nfv.Vdu.Compute + properties: + name: masterNode + description: masterNode compute node + vdu_profile: + min_number_of_instances: 1 + max_number_of_instances: 1 + sw_image_data: + name: free5gc-master_img + version: '20.04' + checksum: + algorithm: sha-512 + hash: d0741bf24b8c9bac3c5bbd13a016ddcd291467b573b43f236fa82affa4bf11538ddde02481ba7767196eb2d8571b747c52b20a27cea03fe2496b0faaf10d6491 + container_format: bare + disk_format: qcow2 + min_disk: 0 GB + size: 2 GB + + + capabilities: + virtual_compute: + properties: + requested_additional_capabilities: + properties: + requested_additional_capability_name: m1.medium + support_mandatory: true + target_performance_parameters: + entry_schema: test + virtual_memory: + virtual_mem_size: 4 GB + virtual_cpu: + num_virtual_cpu: 2 + virtual_local_storage: + - size_of_storage: 45 GB + + workerNode: + type: tosca.nodes.nfv.Vdu.Compute + properties: + name: workerNode + description: workerNode compute node + vdu_profile: + min_number_of_instances: 1 + max_number_of_instances: 2 + sw_image_data: + name: free5gc-worker-img + version: '20.04' + checksum: + algorithm: sha-512 + hash: f489d48ea08fc10bbf96a33bbc3dbe620fda0138825aa41b3770ab67461993f162e39af5f0d3ceba398fc9cddeb76795f823673bec2affb63a24ba31f4d824ac + container_format: bare + disk_format: qcow2 + min_disk: 0 GB + size: 2 GB + + capabilities: + virtual_compute: + properties: + requested_additional_capabilities: + properties: + requested_additional_capability_name: m1.xlarge + support_mandatory: true + target_performance_parameters: + entry_schema: test + virtual_memory: + virtual_mem_size: 8 GB + virtual_cpu: + num_virtual_cpu: 8 + virtual_local_storage: + - size_of_storage: 160 GB + + masterNode_CP1: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 0 + requirements: + - virtual_binding: masterNode + + workerNode_CP1: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 0 + requirements: + - virtual_binding: workerNode + + workerNode_CP2: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 0 + requirements: + - virtual_binding: workerNode + + workerNode_CP3: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 0 + requirements: + - virtual_binding: workerNode + + workerNode_CP4: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 0 + requirements: + - virtual_binding: workerNode + + policies: + - scaling_aspects: + type: tosca.policies.nfv.ScalingAspects + properties: + aspects: + worker_instance: + name: worker_instance_aspect + description: worker_instance scaling aspect + max_scale_level: 1 + step_deltas: + - delta_1 + + - workerNode_initial_delta: + type: tosca.policies.nfv.VduInitialDelta + properties: + initial_delta: + number_of_instances: 1 + targets: [ workerNode ] + + - workerNode_scaling_aspect_deltas: + type: tosca.policies.nfv.VduScalingAspectDeltas + properties: + aspect: worker_instance + deltas: + delta_1: + number_of_instances: 1 + targets: [ workerNode ] + + - instantiation_levels: + type: tosca.policies.nfv.InstantiationLevels + properties: + levels: + instantiation_level_1: + description: Smallest size + scale_info: + worker_instance: + scale_level: 0 + instantiation_level_2: + description: Largest size + scale_info: + worker_instance: + scale_level: 1 + default_level: instantiation_level_1 + + - workerNode_instantiation_levels: + type: tosca.policies.nfv.VduInstantiationLevels + properties: + levels: + instantiation_level_1: + number_of_instances: 1 + instantiation_level_2: + number_of_instances: 2 + targets: [ workerNode ] diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Definitions/sample_free5gc_cnf_on_vm_top.vnfd.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Definitions/sample_free5gc_cnf_on_vm_top.vnfd.yaml new file mode 100644 index 000000000..243667f93 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Definitions/sample_free5gc_cnf_on_vm_top.vnfd.yaml @@ -0,0 +1,31 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 + +description: Sample VNF. + +imports: + - etsi_nfv_sol001_common_types.yaml + - etsi_nfv_sol001_vnfd_types.yaml + - sample_free5gc_cnf_on_vm_types.yaml + - sample_free5gc_cnf_on_vm_df_simple.yaml + +topology_template: + inputs: + selected_flavour: + type: string + description: VNF deployment flavour selected by the consumer. It is provided in the API + + node_templates: + VNF: + type: company.provider.VNF + properties: + flavour_id: { get_input: selected_flavour } + descriptor_id: 75ebb928-87ea-2759-9242-b13f2602a6d4 + provider: Company + product_name: Sample VNF + software_version: '1.0' + descriptor_version: '1.0' + vnfm_info: + - Tacker + requirements: + #- virtual_link_external # mapped in lower-level templates + #- virtual_link_internal # mapped in lower-level templates diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Definitions/sample_free5gc_cnf_on_vm_types.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Definitions/sample_free5gc_cnf_on_vm_types.yaml new file mode 100644 index 000000000..a4c1d2b80 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Definitions/sample_free5gc_cnf_on_vm_types.yaml @@ -0,0 +1,63 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 + +description: VNF type definition + +imports: + - etsi_nfv_sol001_common_types.yaml + - etsi_nfv_sol001_vnfd_types.yaml + +node_types: + company.provider.VNF: + derived_from: tosca.nodes.nfv.VNF + properties: + id: + type: string + description: ID of this VNF + default: vnf_id + vendor: + type: string + description: name of the vendor who generate this VNF + default: vendor + version: + type: version + description: version of the software for this VNF + default: 1.0 + descriptor_id: + type: string + constraints: [ valid_values: [ 75ebb928-87ea-2759-9242-b13f2602a6d4 ] ] + default: 75ebb928-87ea-2759-9242-b13f2602a6d4 + descriptor_version: + type: string + constraints: [ valid_values: [ '1.0' ] ] + default: '1.0' + provider: + type: string + constraints: [ valid_values: [ 'Company' ] ] + default: 'Company' + product_name: + type: string + constraints: [ valid_values: [ 'Sample VNF' ] ] + default: 'Sample VNF' + software_version: + type: string + constraints: [ valid_values: [ '1.0' ] ] + default: '1.0' + vnfm_info: + type: list + entry_schema: + type: string + constraints: [ valid_values: [ Tacker ] ] + default: [ Tacker ] + flavour_id: + type: string + constraints: [ valid_values: [ simple ] ] + default: simple + flavour_description: + type: string + default: "This is the default flavour description" + requirements: + - virtual_link_internal: + capability: tosca.capabilities.nfv.VirtualLinkable + interfaces: + Vnflcm: + type: tosca.interfaces.nfv.Vnflcm diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Scripts/install_k8s_cluster.sh b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Scripts/install_k8s_cluster.sh new file mode 100644 index 000000000..4622489c8 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Scripts/install_k8s_cluster.sh @@ -0,0 +1,827 @@ +#!/bin/bash +set -o xtrace +############################################################################### +# +# This script will install and setting for the Kubernetes Cluster on Ubuntu. +# It's confirmed operation on Ubuntu of below. +# +# * OS type : Ubuntu(64 bit) +# * OS version : 20.04 LTS +# * OS architecture : amd64 (x86_64) +# * Disk/Ram size : 15GB/2GB +# * Pre setup user : ubuntu +# +############################################################################### + +#============================================================================== +# Usage Definition +#============================================================================== +function usage { + sudo cat <<_EOT_ +$(basename ${0}) is script to construct the kubernetes cluster. + +Usage: + $(basename ${0}) [-d] [-o] [-m ] + [-w ] [-i ] + [-a ] + [-t ] [-s ] [-k ] + +Description: + This script is to construct the kubernetes cluster on a virtual machine. + It can install and configure a Master node or each Worker Node + as specify arguments. + +Options: + -m Install and setup all master nodes(use "," to separate, the first master ip is main master ip) + -w Install and setup worker node + -i master cluster IP address (e.g. 192.168.120.100) + -a Kubernetes api cluster CIDR (e.g. 10.96.0.0/12) + -p Kubernetes pod network CIDR (e.g. 192.168.0.0/16) + -d Display the execution result in debug mode + -o Output the execution result to the log file + -t The first master's token name + -s The first master's token hash + -k The first master‘s certificate key + --help, -h Print this + +_EOT_ + exit 1 +} + +declare -g INSTALL_MODE="" +declare -g DEBUG_MODE="False" +declare -g OUTPUT_LOGFILE="False" +# master/worker ip +declare -g MASTER_IPADDRS=${MASTER_IPADDRS:-} +declare -a -g MASTER_IPS=${MASTER_IPS:-} +declare -g MASTER_IP=${MASTER_IP:-} +declare -g WORKER_IPADDR=${WORKER_IPADDR:-} +declare -g TOKEN_NAME=${TOKEN_NAME:-} +declare -g TOKEN_HASH=${TOKEN_HASH:-} +declare -g CERT_KEY=${CERT_KEY:-} +declare -g K8S_API_CLUSTER_CIDR=${K8S_API_CLUSTER_CIDR:-10.96.0.0/12} +declare -g K8S_POD_CIDR=${K8S_POD_CIDR:-192.168.0.0/16} + +if [ "$OPTIND" = 1 ]; then + while getopts dom:w:i:a:p:t:s:k:h OPT; do + case $OPT in + m) + MASTER_IPADDRS=$OPTARG # 192.168.120.17,192.168.120.18,192.168.120.19 + INSTALL_MODE="master" # master + MASTER_IPS=(${MASTER_IPADDRS//,/ }) + MASTER_IP=${MASTER_IPS[0]} + ;; + w) + WORKER_IPADDR=$OPTARG # 192.168.120.2 + INSTALL_MODE="worker" # worker + ;; + i) + MASTER_CLUSTER_IP=$OPTARG # master cluster ip: 192.168.120.100 + ;; + a) + K8S_API_CLUSTER_CIDR=$OPTARG # cluster cidr: 10.96.0.0/12 + ;; + p) + K8S_POD_CIDR=$OPTARG # pod network cidr: 192.168.0.0/16 + ;; + d) + DEBUG_MODE="True" # start debug + ;; + o) + OUTPUT_LOGFILE="True" # output log file + ;; + t) + TOKEN_NAME=$OPTARG # token name + ;; + s) + TOKEN_HASH=$OPTARG # token hash + ;; + k) + CERT_KEY=$OPTARG # certificate key + ;; + h) + echo "h option. display help" + usage + ;; + \?) + echo "Try to enter the h option." 1>&2 + ;; + esac + done +else + echo "No installed getopts-command." 1>&2 + exit 1 +fi + +# check parameter entered by user +if [ "$DEBUG_MODE" == "True" ]; then + echo "*** DEBUG MODE ***" + set -x +fi + +if [ "$OUTPUT_LOGFILE" == "True" ]; then + echo "*** OUTPUT LOGFILE MODE ***" + exec > /tmp/k8s_install_`date +%Y%m%d%H%M%S`.log 2>&1 +fi + +# Application Variables +#---------------------- +# haproxy +declare -g CURRENT_HOST_IP=${CURRENT_HOST_IP:-} +declare -g MASTER_CLUSTER_PORT=16443 +# kubeadm join +declare -g KUBEADM_JOIN_WORKER_RESULT=${KUBEADM_JOIN_WORKER_RESULT:-} + + +# Functions +#========== + +# Set OS common functions +#------------------------ + +# Set public DNS +function set_public_dns { + sudo sed -i -e 's/^#DNS=/DNS=8.8.8.8 8.8.4.4/g' /etc/systemd/resolved.conf + sudo systemctl restart systemd-resolved.service +} + +function set_hostname { + tmp_master_ipaddr3=`echo ${MASTER_IP} | sudo sed -e "s/.[0-9]\{1,3\}$//"` + local tmp_result="" + if [[ "$INSTALL_MODE" =~ "master" ]]; then + for _ip in `ip -4 addr | grep -oP '(?<=inet\s)\d+(\.\d+){3}'`; do + _tmp_ip=`echo ${_ip} |sudo sed -e "s/.[0-9]\{1,3\}$//"` + if [[ $_tmp_ip == $tmp_master_ipaddr3 ]]; then + CURRENT_HOST_IP=$_ip + tmp_result=`echo $_ip|cut -d"." -f4` + break + fi + done + sudo /usr/bin/hostnamectl set-hostname master$tmp_result + elif [[ "$INSTALL_MODE" == "worker" ]]; then + CURRENT_HOST_IP=$WORKER_IPADDR + tmp_result=`echo $CURRENT_HOST_IP|cut -d"." -f4` + sudo /usr/bin/hostnamectl set-hostname worker$tmp_result + else + echo "error. please execute sh install_k8s_cluster.sh -h." + exit 0 + fi +} + +function set_sudoers { + echo "ubuntu ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/ubuntu +} + +function set_hosts { + hostname=`hostname` + sudo sed -i -e 's/127.0.0.1localhost/127.0.0.1 localhost master/g' \ + /etc/hosts + sudo sed -i -e "s/127.0.1.1 $hostname/127.0.1.1 $hostname master/g" \ + /etc/hosts +} + +function invalidate_swap { + sudo sed -i -e '/swap/s/^/#/' /etc/fstab + swapoff -a +} + + +# Install Haproxy +#---------------- +function install_haproxy { + REPOS_UPDATED=False apt_get_update + apt_get install haproxy +} + +function modify_haproxy_conf { + cat </dev/null +global + log /dev/log local0 + log /dev/log local1 notice + chroot /var/lib/haproxy + stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners + stats timeout 30s + user haproxy + group haproxy + daemon + + # Default SSL material locations + ca-base /etc/ssl/certs + crt-base /etc/ssl/private + + # Default ciphers to use on SSL-enabled listening sockets. + # For more information, see ciphers(1SSL). This list is from: + # https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ + # An alternative list with additional directives can be obtained from + # https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=haproxy + ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS + ssl-default-bind-options no-sslv3 + +defaults + log global + mode http + option httplog + option dontlognull + timeout connect 5000 + timeout client 50000 + timeout server 50000 + errorfile 400 /etc/haproxy/errors/400.http + errorfile 403 /etc/haproxy/errors/403.http + errorfile 408 /etc/haproxy/errors/408.http + errorfile 500 /etc/haproxy/errors/500.http + errorfile 502 /etc/haproxy/errors/502.http + errorfile 503 /etc/haproxy/errors/503.http + errorfile 504 /etc/haproxy/errors/504.http + +frontend kubernetes-apiserver + mode tcp + bind *:$MASTER_CLUSTER_PORT + option tcplog + default_backend kubernetes-apiserver + +backend kubernetes-apiserver + mode tcp + balance roundrobin +EOF + for master_ip in ${MASTER_IPS[@]}; do + split_ips=(${master_ip//./ }) + cat </dev/null + server master${split_ips[3]} $master_ip:6443 check +EOF + done + cat </dev/null +listen stats + bind *:1080 + stats auth admin:awesomePassword + stats refresh 5s + stats realm HAProxy\ Statistics + stats uri /admin?stats +EOF + +} + +function start_haproxy { + sudo systemctl enable haproxy + sudo systemctl start haproxy + sudo systemctl status haproxy | grep Active + result=$(ss -lnt |grep -E "16443|1080") + if [[ -z $result ]]; then + sudo systemctl restart haproxy + fi +} + + +# Install Keepalived +#------------------- +function install_keepalived { + REPOS_UPDATED=False apt_get_update + apt_get install keepalived +} +function modify_keepalived_conf { + local priority + local ip_name + local index=0 + for master_ip in ${MASTER_IPS[@]}; do + if [[ "$CURRENT_HOST_IP" == "$master_ip" ]]; then + priority=$(expr 103 - $index) + fi + index=$(expr $index + 1) + done + + ip_name=$(ip a s | grep $CURRENT_HOST_IP | awk '{print $NF}') + + cat </dev/null +vrrp_script chk_haproxy { + script "killall -0 haproxy" + interval 3 fall 3 +} +vrrp_instance VRRP1 { + state MASTER + interface $ip_name + virtual_router_id 51 + priority $priority + advert_int 1 + virtual_ipaddress { + $MASTER_CLUSTER_IP/24 + } + track_script { + chk_haproxy + } +} +EOF +} + +function start_keepalived { + sudo systemctl enable keepalived.service + sudo systemctl start keepalived.service + sudo systemctl status keepalived.service | grep Active + result=$(sudo systemctl status keepalived.service | \ + grep Active | grep "running") + if [[ "$result" == "" ]]; then + exit 0 + fi +} + +# Install Docker +#--------------- +function install_docker { + arch=$(sudo dpkg --print-architecture) + REPOS_UPDATED=False apt_get_update + DEBIAN_FRONTEND=noninteractive sudo apt-get install -y \ + apt-transport-https ca-certificates curl gnupg-agent \ + software-properties-common + result=`curl -fsSL https://download.docker.com/linux/ubuntu/gpg | \ + sudo apt-key add -` + if [[ $result != "OK" ]]; then + exit 0 + fi + sudo add-apt-repository \ + "deb [arch=${arch}] \ +https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + apt_get update + DEBIAN_FRONTEND=noninteractive sudo apt-get install \ + docker-ce \ + docker-ce-cli containerd.io << EOF +y +EOF +} + +function set_docker_proxy { + sudo mkdir -p /etc/systemd/system/docker.service.d + sudo touch /etc/systemd/system/docker.service.d/https-proxy.conf + + cat </dev/null +[Service] +Environment="HTTP_PROXY=${http_proxy//%40/@}" "HTTPS_PROXY=${https_proxy//%40/@}" "NO_PROXY=$no_proxy" +EOF + cat </dev/null +{ + "exec-opts": ["native.cgroupdriver=systemd"] +} +EOF + sudo systemctl daemon-reload + sudo systemctl restart docker + sleep 3 + result=$(sudo systemctl status docker | grep Active | grep "running") + if [[ -z "$result" ]]; then + exit 0 + fi + sleep 7 + sudo docker run hello-world +} + + +# Install Kubernetes +#------------------- +function set_k8s_components { + REPOS_UPDATED=False apt_get_update + sleep 60 + sudo apt-get install -y apt-transport-https curl + result=`curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | \ + sudo apt-key add -` + if [[ $result != "OK" ]]; then + exit 0 + fi + echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | \ + sudo tee -a /etc/apt/sources.list.d/kubernetes.list + apt_get update + sleep 60 + apt_get install -y kubelet kubeadm kubectl + sudo apt-mark hold kubelet kubeadm kubectl + echo "starting kubelet, wait 30s ..." + sleep 30 + sudo systemctl status kubelet | grep Active +} + +function init_master { + if [[ "$MASTER_IPADDRS" =~ "," ]]; then + bindPort=16443 + else + bindPort=6443 + fi + cat </dev/null +apiVersion: kubeadm.k8s.io/v1beta2 +bootstrapTokens: +- groups: + - system:bootstrappers:kubeadm:default-node-token + token: abcdef.0123456789abcdef + ttl: 24h0m0s + usages: + - signing + - authentication +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: $MASTER_CLUSTER_IP + bindPort: $bindPort +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: $hostname + taints: + - effect: NoSchedule + key: node-role.kubernetes.io/master +--- +apiServer: + certSANs: + - $MASTER_CLUSTER_IP + timeoutForControlPlane: 8m0s +apiVersion: kubeadm.k8s.io/v1beta2 +certificatesDir: /etc/kubernetes/pki +clusterName: kubernetes +controlPlaneEndpoint: $MASTER_CLUSTER_IP:$bindPort +controllerManager: {} +dns: + type: CoreDNS +etcd: + local: + dataDir: /var/lib/etcd +imageRepository: k8s.gcr.io +kind: ClusterConfiguration +networking: + dnsDomain: cluster.local + serviceSubnet: $K8S_API_CLUSTER_CIDR + podSubnet: $K8S_POD_CIDR +scheduler: {} +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +mode: ipvs +EOF + sudo kubeadm init --config=kubeadm.yaml --upload-certs + sleep 3 + sudo mkdir -p $HOME/.kube + sudo /bin/cp -f /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + sleep 20 +} + +function install_pod_network { + wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml + echo "waiting install pod network..." + count=3 + while ((count > 0)); do + result=$(kubectl apply -f kube-flannel.yml) + if [[ "$result" =~ "created" ]] || \ + [[ "$result" =~ "unchanged" ]]; then + echo "$result" + break + fi + sudo rm -rf $HOME/.kube + sudo mkdir -p $HOME/.kube + sudo /bin/cp -f /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + sleep 10 + ((count--)) + done + if count == 0; then + echo 'install flannel failed!' + exit 255 + fi +} + +function install_multus_cni { + git clone https://github.com/intel/multus-cni.git + count=3 + while ((count > 0)); do + result=$(cat multus-cni/images/multus-daemonset.yml | kubectl apply -f -) + if [[ "$result" =~ "created" ]] || \ + [[ "$result" =~ "unchanged" ]]; then + echo "$result" + break + fi + sleep 10 + ((count--)) + done + if count == 0; then + echo 'install multus failed!' + exit 255 + fi + kubectl api-versions | grep -i cncf +} + +function prepare_ovs_cni { + cd free5gc-eno/ + wget https://raw.githubusercontent.com/k8snetworkplumbingwg/ovs-cni/main/examples/ovs-cni.yml + cd ~ +} + +function add_master_node { + sudo kubeadm join $MASTER_CLUSTER_IP:16443 \ + --token $TOKEN_NAME \ + --discovery-token-ca-cert-hash sha256:$TOKEN_HASH \ + --control-plane --certificate-key $CERT_KEY + sudo mkdir -p $HOME/.kube + sudo /bin/cp -f /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + echo "add node ..." + sleep 10 + kubectl get nodes -o wide + echo "add node successfully" +} + +function init_worker { + sudo kubeadm init --pod-network-cidr=$K8S_POD_CIDR \ + --service-cidr=$K8S_API_CLUSTER_CIDR + sleep 5 + sudo mkdir -p $HOME/.kube + sudo /bin/cp -f /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + sleep 10 +} + +function add_worker_node { + if [[ "$ha_flag" != "False" ]]; then + KUBEADM_JOIN_WORKER_RESULT=$(sudo kubeadm join \ + $MASTER_CLUSTER_IP:16443 --token $TOKEN_NAME \ + --discovery-token-ca-cert-hash sha256:$TOKEN_HASH) + else + KUBEADM_JOIN_WORKER_RESULT=$(sudo kubeadm join \ + $MASTER_CLUSTER_IP:6443 --token $TOKEN_NAME \ + --discovery-token-ca-cert-hash sha256:$TOKEN_HASH) + fi +} + +function set_br_ex { + ip_br_1=`ip addr | grep '/23' | awk '{print $2}'` + interface_name_1=`ip addr | grep '/23' | awk '{print $8}'` + sudo ip addr del $ip_br_1 dev $interface_name_1 + sudo ovs-vsctl add-br br1 + sudo ovs-vsctl add-port br1 $interface_name_1 + sudo ip addr add $ip_br_1 dev br1 + sudo ip link set br1 up + + ip_br_2=`ip addr | grep '192.168.20' | awk '{print $2}'` + interface_name_2=`ip addr | grep '192.168.20' | awk '{print $8}'` + sudo ip addr del $ip_br_2 dev $interface_name_2 + sudo ovs-vsctl add-br br2 + sudo ovs-vsctl add-port br2 $interface_name_2 + sudo ip addr add $ip_br_2 dev br2 + sudo ip link set br2 up + + ip_br_3=`ip addr | grep '192.168.52' | awk '{print $2}'` + interface_name_3=`ip addr | grep '192.168.52' | awk '{print $8}'` + sudo ip addr del $ip_br_3 dev $interface_name_3 + sudo ovs-vsctl add-br br3 + sudo ovs-vsctl add-port br3 $interface_name_3 + sudo ip addr add $ip_br_3 dev br3 + sudo ip link set br3 up + + sudo iptables -t nat -A POSTROUTING -o $interface_name_2 -j MASQUERADE + sudo iptables -t nat -A POSTROUTING -o ens3 -j MASQUERADE + +} + +# Set common functions +# +# Refer: devstack project functions-common +#----------------------------------------- +function apt_get_update { + if [[ "$REPOS_UPDATED" == "True" ]]; then + return + fi + + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + + # time all the apt operations + time_start "apt-get-update" + + local update_cmd="sudo apt-get update" + if ! timeout 300 sh -c "while ! $update_cmd; do sleep 30; done"; then + die $LINENO "Failed to update apt repos, we're dead now" + fi + + REPOS_UPDATED=True + # stop the clock + time_stop "apt-get-update" +} + +function time_start { + local name=$1 + local start_time=${_TIME_START[$name]} + if [[ -n "$start_time" ]]; then + die $LINENO \ + "Trying to start the clock on $name, but it's already been started" + fi + + _TIME_START[$name]=$(date +%s%3N) +} + +function time_stop { + local name + local end_time + local elapsed_time + local total + local start_time + + name=$1 + start_time=${_TIME_START[$name]} + + if [[ -z "$start_time" ]]; then + die $LINENO \ + "Trying to stop the clock on $name, but it was never started" + fi + end_time=$(date +%s%3N) + elapsed_time=$(($end_time - $start_time)) + total=${_TIME_TOTAL[$name]:-0} + # reset the clock so we can start it in the future + _TIME_START[$name]="" + _TIME_TOTAL[$name]=$(($total + $elapsed_time)) +} + +function apt_get { + local xtrace result + xtrace=$(set +o | grep xtrace) # set +o xtrace + set +o xtrace + + [[ "$OFFLINE" = "True" || -z "$@" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + + # time all the apt operations + time_start "apt-get" + + $xtrace + + $sudo DEBIAN_FRONTEND=noninteractive \ + http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} \ + no_proxy=${no_proxy:-} \ + apt-get --option "Dpkg::Options::=--force-confold" \ + --assume-yes "$@" < /dev/null + result=$? + + # stop the clock + time_stop "apt-get" + return $result +} + +# Choose install function based on install mode +#---------------------------------------------- +function main_master { + # prepare + set_public_dns + set_hostname + set_sudoers + set_hosts + invalidate_swap + if [[ "$MASTER_IPADDRS" =~ "," ]]; then + # haproxy + install_haproxy + modify_haproxy_conf + start_haproxy + + # keepalived + install_keepalived + modify_keepalived_conf + start_keepalived + fi + + # Docker + install_docker + set_docker_proxy + + # kubernetes + set_k8s_components + init_master + install_pod_network + install_multus_cni + prepare_ovs_cni + # build_free5gc_image + + clear + token=$(sudo kubeadm token create) + echo "token:$token" + server=$(kubectl cluster-info | \ + sed 's,\x1B\[[0-9;]*[a-zA-Z],,g' | \ + grep 'Kubernetes' |awk '{print $7}') + echo "server:$server" + cat /etc/kubernetes/pki/ca.crt + ssl_ca_cert_hash=$(openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | \ + openssl rsa -pubin -outform der 2>/dev/null | \ + openssl dgst -sha256 -hex | sudo sed 's/^.* //') + echo "ssl_ca_cert_hash:$ssl_ca_cert_hash" + cert_key=$(sudo kubeadm init phase upload-certs --upload-certs) + echo "certificate_key:$cert_key" +} + +function normal_master { + # prepare + set_public_dns + set_hostname + set_sudoers + set_hosts + invalidate_swap + + # haproxy + install_haproxy + modify_haproxy_conf + start_haproxy + + # keepalived + install_keepalived + modify_keepalived_conf + start_keepalived + + # Docker + install_docker + set_docker_proxy + + # kubernetes + set_k8s_components + add_master_node + +} + +function main_worker { + # prepare + set_public_dns + set_hostname + set_sudoers + set_hosts + invalidate_swap + + # Docker + set_docker_proxy + + # kubernetes + set_k8s_components + add_worker_node + + # set br-ex + set_br_ex + +} + +# Pre preparations +# ________________ + +function check_OS { + . /etc/os-release + if [[ $PRETTY_NAME =~ "Ubuntu 20.04" ]]; then + os_architecture=`uname -a | grep 'x86_64'` + if [[ $os_architecture == "" ]]; then + echo "Your OS does not support at present." + echo "It only supports x86_64." + fi + else + echo "Your OS does not support at present." + echo "It only supports Ubuntu 20.04.1 LTS." + fi +} + +function set_apt-conf_proxy { + sudo touch /etc/apt/apt.conf.d/proxy.conf + + cat </dev/null +Acquire::http::Proxy "${http_proxy}"; +Acquire::https::Proxy "${https_proxy}"; +EOF +} + +# Main +# ____ + +flag="False" +set_apt-conf_proxy +check_OS +if [[ "$INSTALL_MODE" =~ "master" ]]; then + echo "Start install to main master node" + for _ip in `ip -4 addr | grep -oP '(?<=inet\s)\d+(\.\d+){3}'`; do + if [[ $_ip == $MASTER_IP ]]; then + flag="True" + break + fi + done + if [[ "$flag" == "True" ]]; then + INSTALL_MODE="main_master" + main_master + else + INSTALL_MODE="normal_master" + normal_master + fi +elif [ "$INSTALL_MODE" == "worker" ]; then + echo "Start install to worker node" + main_worker +else + echo "The install mode does not support at present!" + exit 255 +fi + +if [[ "$INSTALL_MODE" =~ "master" ]]; then + result=$(kubectl get nodes -o wide | grep $CURRENT_HOST_IP) + if [[ -z "$result" ]];then + echo "Install Failed! The node does not exist in Kubernetes cluster." + exit 255 + else + echo "Install Success!" + fi +else + if [[ "$KUBEADM_JOIN_WORKER_RESULT" =~ \ + "This node has joined the cluster" ]]; then + echo "Install Success!" + else + echo "Install Failed! The node does not exist in Kubernetes cluster." + exit 255 + fi +fi +exit 0 diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Scripts/kubernetes_mgmt_free5gc.py b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Scripts/kubernetes_mgmt_free5gc.py new file mode 100644 index 000000000..4606a5dbf --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/Scripts/kubernetes_mgmt_free5gc.py @@ -0,0 +1,2184 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +import ipaddress +import json +import os +import re +import time + +from oslo_log import log as logging +from oslo_utils import uuidutils +import paramiko + +from tacker.common import cmd_executer +from tacker.common import exceptions +from tacker.db.db_base import CommonDbMixin +from tacker.db.nfvo import nfvo_db +from tacker.nfvo.nfvo_plugin import NfvoPlugin +from tacker import objects +from tacker.vnflcm import utils as vnflcm_utils +from tacker.vnfm.infra_drivers.openstack import heat_client as hc +from tacker.vnfm.mgmt_drivers import vnflcm_abstract_driver + +LOG = logging.getLogger(__name__) +K8S_CMD_TIMEOUT = 30 +K8S_INSTALL_TIMEOUT = 3600 +SERVER_WAIT_COMPLETE_TIME = 120 + + +class KubernetesFree5gcMgmtDriver( + vnflcm_abstract_driver.VnflcmMgmtAbstractDriver): + + def __init__(self): + self._init_flag() + + def get_type(self): + return 'mgmt-drivers-kubernetes-free5gc' + + def get_name(self): + return 'mgmt-drivers-kubernetes-free5gc' + + def get_description(self): + return 'Tacker Kubernetes VNFMgmt Driver for free5gc' + + def instantiate_start(self, context, vnf_instance, + instantiate_vnf_request, grant, + grant_request, **kwargs): + pass + + def _init_flag(self): + self.FLOATING_IP_FLAG = False + self.SET_NODE_LABEL_FLAG = False + self.SET_ZONE_ID_FLAG = False + + def _check_is_cidr(self, cidr_str): + # instantiate: check cidr + try: + ipaddress.ip_network(cidr_str) + return True + except ValueError: + return False + + def _execute_command(self, commander, ssh_command, timeout, type, retry): + eventlet.monkey_patch() + while retry >= 0: + try: + with eventlet.Timeout(timeout, True): + result = commander.execute_command( + ssh_command, input_data=None) + break + except eventlet.timeout.Timeout: + LOG.debug('It is time out, When execute command: ' + '{}.'.format(ssh_command)) + retry -= 1 + if retry < 0: + LOG.error('It is time out, When execute command: ' + '{}.'.format(ssh_command)) + raise exceptions.MgmtDriverOtherError( + error_message='It is time out, When execute command: ' + '{}.'.format(ssh_command)) + time.sleep(30) + if type == 'common' or type == 'etcd': + if result.get_return_code() != 0: + err = result.get_stderr() + if err: + LOG.error(err) + raise exceptions.MgmtDriverRemoteCommandError(err_info=err) + elif type == 'drain': + for res in result.get_stdout(): + if 'drained' in res: + break + else: + err = result.get_stderr() + stdout = result.get_stdout() + LOG.debug(stdout) + LOG.debug(err) + elif type == 'certificate_key' or type == 'install': + if result.get_return_code() != 0: + err = result.get_stderr() + LOG.error(err) + raise exceptions.MgmtDriverRemoteCommandError(err_info=err) + return result.get_stdout() + + def _create_vim(self, context, vnf_instance, server, bearer_token, + ssl_ca_cert, vim_name, project_name, master_vm_dict_list): + # ha: create vim + vim_info = { + 'vim': { + 'name': vim_name, + 'auth_url': server, + 'vim_project': { + 'name': project_name + }, + 'auth_cred': { + 'bearer_token': bearer_token, + 'ssl_ca_cert': ssl_ca_cert + }, + 'type': 'kubernetes', + 'tenant_id': context.project_id + } + } + if self.FLOATING_IP_FLAG: + if not master_vm_dict_list[0].get( + 'k8s_cluster', {}).get('cluster_fip'): + register_ip = master_vm_dict_list[0].get('ssh').get('ipaddr') + else: + register_ip = master_vm_dict_list[0].get( + 'k8s_cluster', {}).get('cluster_fip') + server = re.sub(r'(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})', + register_ip, server) + vim_info['vim']['auth_url'] = server + del vim_info['vim']['auth_cred']['ssl_ca_cert'] + try: + nfvo_plugin = NfvoPlugin() + created_vim_info = nfvo_plugin.create_vim(context, vim_info) + except Exception as e: + LOG.error("Failed to register kubernetes vim: {}".format(e)) + raise exceptions.MgmtDriverOtherError( + error_message="Failed to register kubernetes vim: {}".format( + e)) + id = uuidutils.generate_uuid() + vim_id = created_vim_info.get('id') + vim_type = 'kubernetes' + access_info = { + 'auth_url': server + } + vim_connection_info = objects.VimConnectionInfo( + id=id, vim_id=vim_id, vim_type=vim_type, + access_info=access_info, interface_info=None + ) + vim_connection_infos = vnf_instance.vim_connection_info + vim_connection_infos.append(vim_connection_info) + vnf_instance.vim_connection_info = vim_connection_infos + vnf_instance.save() + + def _get_ha_group_resources_list( + self, heatclient, stack_id, node, additional_params): + # ha: get group resources list + nest_resources_list = heatclient.resources.list(stack_id=stack_id) + group_stack_name = node.get("aspect_id") + if 'lcm-operation-user-data' in additional_params.keys() and \ + 'lcm-operation-user-data-class' in additional_params.keys(): + group_stack_name = group_stack_name + '_group' + group_stack_id = "" + for nest_resources in nest_resources_list: + if nest_resources.resource_name == group_stack_name: + group_stack_id = nest_resources.physical_resource_id + if not group_stack_id: + LOG.error('No stack id matching the group was found.') + raise exceptions.MgmtDriverOtherError( + error_message="No stack id matching the group was found") + group_resources_list = heatclient.resources.list( + stack_id=group_stack_id) + return group_resources_list + + def _get_cluster_ip(self, heatclient, resource_num, + node, stack_id, nest_stack_id): + cluster_cp_name = node.get('cluster_cp_name') + if not node.get('aspect_id'): + # num_master_node = 1, type=OS::Nova::Server + cluster_ip = heatclient.resources.get( + stack_id=nest_stack_id, + resource_name=cluster_cp_name).attributes.get( + 'fixed_ips')[0].get('ip_address') + else: + # num_master_node > 1, type=OS::Heat::AutoScalingGroup + if resource_num > 1: + cluster_ip = heatclient.resources.get( + stack_id=nest_stack_id, + resource_name=cluster_cp_name).attributes.get( + 'fixed_ips')[0].get('ip_address') + # num_master_node = 1, type=OS::Heat::AutoScalingGroup + else: + cluster_ip = heatclient.resources.get( + stack_id=stack_id, + resource_name=cluster_cp_name).attributes.get( + 'fixed_ips')[0].get('ip_address') + if not cluster_ip: + LOG.error('Failed to get the cluster ip.') + raise exceptions.MgmtDriverOtherError( + error_message="Failed to get the cluster ip") + return cluster_ip + + def _get_zone_id_from_grant(self, vnf_instance, grant, operation_type, + physical_resource_id): + # TODO(LiangLu): heal and scale-out operation will fail here + # At present, heal_grant and scale_grant still have some bugs, + # the information in grant cannot match it in vnf_instance, so + # we cannot get the zone_id in heal and scale-out operation. + # This part will be updated in next release. + for vnfc_resource in \ + vnf_instance.instantiated_vnf_info.vnfc_resource_info: + if physical_resource_id == \ + vnfc_resource.compute_resource.resource_id: + vnfc_id = vnfc_resource.id + break + + if operation_type == 'HEAL': + resources = grant.update_resources + else: + resources = grant.add_resources + + for resource in resources: + if vnfc_id == resource.resource_definition_id: + add_resource_zone_id = resource.zone_id + break + + for zone in grant.zones: + if add_resource_zone_id == zone.id: + zone_id = zone.zone_id + break + + return zone_id + + def _get_install_info_for_k8s_node(self, nest_stack_id, node, + additional_params, role, + access_info, vnf_instance, grant): + # instantiate: get k8s ssh ips + vm_dict_list = [] + stack_id = '' + zone_id = '' + host_compute = '' + heatclient = hc.HeatClient(access_info) + + # get ssh_ip and nic_ip and set ssh's values + if not node.get('aspect_id'): + ssh_ip = heatclient.resources.get( + stack_id=nest_stack_id, + resource_name=node.get('ssh_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + nic_ip = heatclient.resources.get( + stack_id=nest_stack_id, + resource_name=node.get('nic_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + vm_dict = { + "ssh": { + "username": node.get("username"), + "password": node.get("password"), + "ipaddr": ssh_ip, + "nic_ip": nic_ip + } + } + vm_dict_list.append(vm_dict) + else: + group_resources_list = self._get_ha_group_resources_list( + heatclient, nest_stack_id, node, additional_params) + for group_resource in group_resources_list: + stack_id = group_resource.physical_resource_id + resource_name = node.get('ssh_cp_name') + resource_info = heatclient.resources.get( + stack_id=stack_id, + resource_name=resource_name) + if resource_info.attributes.get('floating_ip_address'): + self.FLOATING_IP_FLAG = True + ssh_ip = resource_info.attributes.get( + 'floating_ip_address') + nic_ip = heatclient.resources.get( + stack_id=stack_id, + resource_name=node.get('nic_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + else: + ssh_ip = heatclient.resources.get( + stack_id=stack_id, + resource_name=resource_name).attributes.get( + 'fixed_ips')[0].get('ip_address') + nic_ip = heatclient.resources.get( + stack_id=stack_id, + resource_name=node.get('nic_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + if role == 'worker': + # get pod_affinity info + nest_resources_list = \ + heatclient.resources.list(stack_id=nest_stack_id) + for nest_resource in nest_resources_list: + if nest_resource.resource_type == \ + 'OS::Nova::ServerGroup': + pod_affinity_resource_info = \ + heatclient.resources.get( + stack_id=nest_stack_id, + resource_name=nest_resource.resource_name) + srv_group_policies = \ + pod_affinity_resource_info.attributes.get( + 'policy') + if srv_group_policies and \ + srv_group_policies == 'anti-affinity': + srv_group_physical_resource_id = \ + pod_affinity_resource_info.\ + physical_resource_id + lowest_resources_list = heatclient.resources.list( + stack_id=stack_id) + for lowest_resource in lowest_resources_list: + if lowest_resource.resource_type == \ + 'OS::Nova::Server': + lowest_resource_name = \ + lowest_resource.resource_name + worker_node_resource_info = \ + heatclient.resources.get( + stack_id=stack_id, + resource_name=lowest_resource_name) + srv_groups = worker_node_resource_info.\ + attributes.get('server_groups') + if srv_groups and \ + srv_group_physical_resource_id \ + in srv_groups: + host_compute = worker_node_resource_info.\ + attributes.get('OS-EXT-SRV-ATTR:host') + if self.SET_ZONE_ID_FLAG: + physical_resource_id = \ + worker_node_resource_info.\ + physical_resource_id + zone_id = self._get_zone_id_from_grant( + vnf_instance, grant, 'INSTANTIATE', + physical_resource_id) + + vm_dict_list.append({ + "host_compute": host_compute, + "zone_id": zone_id, + "ssh": { + "username": node.get("username"), + "password": node.get("password"), + "ipaddr": ssh_ip, + "nic_ip": nic_ip + } + }) + + # get cluster_ip from master node + if role == 'master': + cluster_fip = '' + resource_num = len(vm_dict_list) + cluster_ip = self._get_cluster_ip(heatclient, + resource_num, node, stack_id, nest_stack_id) + if self.FLOATING_IP_FLAG and len(vm_dict_list) > 1: + cluster_fip = heatclient.resource_get( + nest_stack_id, + node.get('cluster_fip_name')).attributes.get( + 'floating_ip_address') + + # set k8s_cluster's values + for vm_dict in vm_dict_list: + vm_dict["k8s_cluster"] = { + "pod_cidr": node.get('pod_cidr'), + "cluster_cidr": node.get('cluster_cidr'), + "ipaddr": cluster_ip, + "cluster_fip": cluster_fip + } + return vm_dict_list + + def _get_hosts(self, master_vm_dict_list, worker_vm_dict_list): + # merge /etc/hosts + hosts = [] + for master_vm_dict in master_vm_dict_list: + hosts_master_ip = master_vm_dict.get('ssh', ()).get('nic_ip') + hosts.append(hosts_master_ip + ' ' + 'master' + + hosts_master_ip.split('.')[-1]) + for worker_vm_dict in worker_vm_dict_list: + hosts_worker_ip = worker_vm_dict.get('ssh', ()).get('nic_ip') + hosts.append(hosts_worker_ip + ' ' + 'worker' + + hosts_worker_ip.split('.')[-1]) + hosts_str = '\\n'.join(hosts) + return hosts_str + + def _init_commander_and_send_install_scripts(self, user, password, host, + vnf_package_path=None, script_path=None): + retry = 4 + while retry > 0: + try: + if vnf_package_path and script_path: + connect = paramiko.Transport(host, 22) + connect.connect(username=user, password=password) + sftp = paramiko.SFTPClient.from_transport(connect) + # put script file content to '/tmp/install_k8s_cluster.sh' + sftp.put(os.path.join(vnf_package_path, script_path), + "/tmp/install_k8s_cluster.sh") + sftp.put(os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "../../../samples/mgmt_driver/" + "create_admin_token.yaml"), + "/tmp/create_admin_token.yaml") + connect.close() + commander = cmd_executer.RemoteCommandExecutor( + user=user, password=password, host=host, + timeout=K8S_INSTALL_TIMEOUT) + return commander + except paramiko.SSHException as e: + LOG.debug(e) + retry -= 1 + if retry == 0: + LOG.error(e) + raise paramiko.SSHException() + time.sleep(SERVER_WAIT_COMPLETE_TIME) + + def _get_vm_cidr_list(self, master_ip, proxy): + # ha and scale: get vm cidr list + vm_cidr_list = [] + if proxy.get('k8s_node_cidr'): + cidr = proxy.get('k8s_node_cidr') + else: + cidr = master_ip + '/24' + network_ips = ipaddress.ip_network(cidr, False) + for network_ip in network_ips: + vm_cidr_list.append(str(network_ip)) + return vm_cidr_list + + def _install_worker_node(self, commander, proxy, + ha_flag, nic_ip, cluster_ip, kubeadm_token, + ssl_ca_cert_hash): + if proxy.get('http_proxy') and proxy.get('https_proxy'): + ssh_command = \ + "export http_proxy={http_proxy};" \ + "export https_proxy={https_proxy};" \ + "export no_proxy={no_proxy};" \ + "export ha_flag={ha_flag};" \ + "bash /tmp/install_k8s_cluster.sh " \ + "-w {worker_ip} -i {cluster_ip} " \ + "-t {kubeadm_token} -s {ssl_ca_cert_hash}".format( + http_proxy=proxy.get('http_proxy'), + https_proxy=proxy.get('https_proxy'), + no_proxy=proxy.get('no_proxy'), + ha_flag=ha_flag, + worker_ip=nic_ip, cluster_ip=cluster_ip, + kubeadm_token=kubeadm_token, + ssl_ca_cert_hash=ssl_ca_cert_hash) + else: + ssh_command = \ + "export ha_flag={ha_flag};" \ + "bash /tmp/install_k8s_cluster.sh " \ + "-w {worker_ip} -i {cluster_ip} " \ + "-t {kubeadm_token} -s {ssl_ca_cert_hash}".format( + ha_flag=ha_flag, + worker_ip=nic_ip, cluster_ip=cluster_ip, + kubeadm_token=kubeadm_token, + ssl_ca_cert_hash=ssl_ca_cert_hash) + self._execute_command( + commander, ssh_command, K8S_INSTALL_TIMEOUT, 'install', 0) + + def _set_node_label(self, commander, nic_ip, host_compute, zone_id): + worker_host_name = 'worker' + nic_ip.split('.')[3] + if host_compute: + ssh_command = "kubectl label nodes {worker_host_name}" \ + " CIS-node={host_compute}".format( + worker_host_name=worker_host_name, + host_compute=host_compute) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + if zone_id: + ssh_command = "kubectl label nodes {worker_host_name}" \ + " kubernetes.io/zone={zone_id}".format( + worker_host_name=worker_host_name, + zone_id=zone_id) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + commander.close_session() + + def _install_k8s_cluster(self, context, vnf_instance, + proxy, script_path, + master_vm_dict_list, worker_vm_dict_list): + # instantiate: pre /etc/hosts + hosts_str = self._get_hosts( + master_vm_dict_list, worker_vm_dict_list) + master_ssh_ips_str = ','.join([ + vm_dict.get('ssh', {}).get('nic_ip') + for vm_dict in master_vm_dict_list]) + worker_ssh_ips_str = ','.join([ + vm_dict.get('ssh', {}).get('nic_ip') + for vm_dict in worker_vm_dict_list]) + ha_flag = "True" + if ',' not in master_ssh_ips_str: + ha_flag = "False" + + # get vnf package path and check script_path + vnf_package_path = vnflcm_utils._get_vnf_package_path( + context, vnf_instance.vnfd_id) + abs_script_path = os.path.join(vnf_package_path, script_path) + if not os.path.exists(abs_script_path): + LOG.error('The path of install script is invalid.') + raise exceptions.MgmtDriverOtherError( + error_message="The path of install script is invalid") + + # set no proxy + project_name = '' + if proxy.get("http_proxy") and proxy.get("https_proxy"): + master_cluster_ip = master_vm_dict_list[0].get( + "k8s_cluster", {}).get('ipaddr') + pod_cidr = master_vm_dict_list[0].get( + "k8s_cluster", {}).get("pod_cidr") + cluster_cidr = master_vm_dict_list[0].get( + "k8s_cluster", {}).get("cluster_cidr") + proxy["no_proxy"] = ",".join(list(filter(None, [ + proxy.get("no_proxy"), pod_cidr, cluster_cidr, + "127.0.0.1", "localhost", master_ssh_ips_str, + worker_ssh_ips_str, + master_cluster_ip]))) + + # install k8s + active_username = "" + active_password = "" + active_host = "" + ssl_ca_cert_hash = "" + kubeadm_token = "" + # install master node + for vm_dict in master_vm_dict_list: + if vm_dict.get('ssh', {}).get('nic_ip') == \ + master_ssh_ips_str.split(',')[0]: + active_username = vm_dict.get('ssh', {}).get('username') + active_password = vm_dict.get('ssh', {}).get('password') + active_host = vm_dict.get('ssh', {}).get('ipaddr') + else: + # get certificate key from active master node + commander = cmd_executer.RemoteCommandExecutor( + user=active_username, password=active_password, + host=active_host, timeout=K8S_CMD_TIMEOUT) + ssh_command = "sudo kubeadm init phase upload-certs " \ + "--upload-certs" + result = self._execute_command( + commander, ssh_command, + K8S_CMD_TIMEOUT, 'certificate_key', 3) + certificate_key = result[-1].replace('\n', '') + + user = vm_dict.get('ssh', {}).get('username') + password = vm_dict.get('ssh', {}).get('password') + host = vm_dict.get('ssh', {}).get('ipaddr') + k8s_cluster = vm_dict.get('k8s_cluster', {}) + commander = self._init_commander_and_send_install_scripts( + user, password, host, + vnf_package_path, script_path) + + # set /etc/hosts for each node + ssh_command = "> /tmp/tmp_hosts" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "cp /etc/hosts /tmp/tmp_hosts" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "sed -i '$a{}' /tmp/tmp_hosts".format( + hosts_str) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "sudo mv /tmp/tmp_hosts /etc/hosts;" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + + # execute install k8s command on VM + if proxy.get('http_proxy') and proxy.get('https_proxy'): + if vm_dict.get('ssh', {}).get('nic_ip') == \ + master_ssh_ips_str.split(',')[0]: + ssh_command = \ + "export http_proxy={http_proxy};" \ + "export https_proxy={https_proxy};" \ + "export no_proxy={no_proxy};" \ + "bash /tmp/install_k8s_cluster.sh " \ + "-m {master_ip} -i {cluster_ip} " \ + "-p {pod_cidr} -a {k8s_cluster_cidr}".format( + http_proxy=proxy.get('http_proxy'), + https_proxy=proxy.get('https_proxy'), + no_proxy=proxy.get('no_proxy'), + master_ip=master_ssh_ips_str, + cluster_ip=k8s_cluster.get("ipaddr"), + pod_cidr=k8s_cluster.get('pod_cidr'), + k8s_cluster_cidr=k8s_cluster.get('cluster_cidr')) + else: + ssh_command = \ + "export http_proxy={http_proxy};" \ + "export https_proxy={https_proxy};" \ + "export no_proxy={no_proxy};" \ + "bash /tmp/install_k8s_cluster.sh " \ + "-m {master_ip} -i {cluster_ip} " \ + "-p {pod_cidr} -a {k8s_cluster_cidr} " \ + "-t {kubeadm_token} -s {ssl_ca_cert_hash} " \ + "-k {certificate_key}".format( + http_proxy=proxy.get('http_proxy'), + https_proxy=proxy.get('https_proxy'), + no_proxy=proxy.get('no_proxy'), + master_ip=master_ssh_ips_str, + cluster_ip=k8s_cluster.get("ipaddr"), + pod_cidr=k8s_cluster.get('pod_cidr'), + k8s_cluster_cidr=k8s_cluster.get('cluster_cidr'), + kubeadm_token=kubeadm_token, + ssl_ca_cert_hash=ssl_ca_cert_hash, + certificate_key=certificate_key) + else: + if vm_dict.get('ssh', {}).get('nic_ip') == \ + master_ssh_ips_str.split(',')[0]: + ssh_command = \ + "bash /tmp/install_k8s_cluster.sh " \ + "-m {master_ip} -i {cluster_ip} " \ + "-p {pod_cidr} -a {k8s_cluster_cidr}".format( + master_ip=master_ssh_ips_str, + cluster_ip=k8s_cluster.get("ipaddr"), + pod_cidr=k8s_cluster.get('pod_cidr'), + k8s_cluster_cidr=k8s_cluster.get('cluster_cidr')) + + else: + ssh_command = \ + "bash /tmp/install_k8s_cluster.sh " \ + "-m {master_ip} -i {cluster_ip} " \ + "-p {pod_cidr} -a {k8s_cluster_cidr} " \ + "-t {kubeadm_token} -s {ssl_ca_cert_hash} " \ + "-k {certificate_key}".format( + master_ip=master_ssh_ips_str, + cluster_ip=k8s_cluster.get("ipaddr"), + pod_cidr=k8s_cluster.get('pod_cidr'), + k8s_cluster_cidr=k8s_cluster.get('cluster_cidr'), + kubeadm_token=kubeadm_token, + ssl_ca_cert_hash=ssl_ca_cert_hash, + certificate_key=certificate_key) + results = self._execute_command( + commander, ssh_command, K8S_INSTALL_TIMEOUT, 'install', 0) + + # get install-information from active master node + if vm_dict.get('ssh', {}).get('nic_ip') == \ + master_ssh_ips_str.split(',')[0]: + for result in results: + if 'token:' in result: + kubeadm_token = result.replace( + 'token:', '').replace('\n', '') + if 'server:' in result: + server = result.replace( + 'server:', '').replace('\n', '') + if 'ssl_ca_cert_hash:' in result: + ssl_ca_cert_hash = result.replace( + 'ssl_ca_cert_hash:', '').replace('\n', '') + begin_index = results.index('-----BEGIN CERTIFICATE-----\n') + end_index = results.index('-----END CERTIFICATE-----\n') + ssl_ca_cert = ''.join(results[begin_index: end_index + 1]) + commander = cmd_executer.RemoteCommandExecutor( + user=user, password=password, host=host, + timeout=K8S_CMD_TIMEOUT) + ssh_command = "kubectl create -f /tmp/create_admin_token.yaml" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + time.sleep(30) + ssh_command = "kubectl get secret -n kube-system " \ + "| grep '^admin-token' " \ + "| awk '{print $1}' " \ + "| xargs -i kubectl describe secret {} " \ + "-n kube-system" \ + "| grep 'token:' | awk '{print $2}'" + bearer_token = self._execute_command( + commander, ssh_command, + K8S_CMD_TIMEOUT, 'common', 0)[0].replace('\n', '') + commander.close_session() + + # install worker node + for vm_dict in worker_vm_dict_list: + user = vm_dict.get('ssh', {}).get('username') + password = vm_dict.get('ssh', {}).get('password') + host = vm_dict.get('ssh', {}).get('ipaddr') + nic_ip = vm_dict.get('ssh', {}).get('nic_ip') + cluster_ip = master_vm_dict_list[0].get( + 'k8s_cluster', {}).get('ipaddr') + commander = self._init_commander_and_send_install_scripts( + user, password, host, + vnf_package_path, script_path) + + # set /etc/hosts for each node + ssh_command = "> /tmp/tmp_hosts" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "cp /etc/hosts /tmp/tmp_hosts" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "sed -i '$a{}' /tmp/tmp_hosts".format( + hosts_str) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "sudo mv /tmp/tmp_hosts /etc/hosts;" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + + # execute install k8s command on VM + self._install_worker_node( + commander, proxy, ha_flag, nic_ip, + cluster_ip, kubeadm_token, ssl_ca_cert_hash) + commander.close_session() + + commander = cmd_executer.RemoteCommandExecutor( + user=active_username, password=active_password, + host=active_host, timeout=K8S_CMD_TIMEOUT) + # create ovs-cni + ssh_command = "kubectl apply -f free5gc-eno/ovs-cni.yml" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "kubectl apply -f free5gc-eno/ovs-net-crd.yaml" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + self._pod_create_wait(commander) + # set pod_affinity + self._set_node_label( + commander, nic_ip, vm_dict.get('host_compute'), + vm_dict.get('zone_id')) + + return server, bearer_token, ssl_ca_cert, project_name + + def _pod_create_wait(self, commander): + retry = 10 + install_flag = False + while retry > 0: + ssh_command = 'kubectl describe daemonset ' \ + 'ovs-cni-amd64 -n kube-system' + results = self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + for result in results: + if 'Pods Status' in result: + if '2 Running' in result: + install_flag = True + LOG.debug('ovs-cni pod create successfully.') + break + else: + LOG.debug('Wait ovs-cni create...') + break + if install_flag: + break + else: + time.sleep(60) + retry = retry - 1 + if retry == 0: + LOG.error('ovs-cni pod create failed.') + raise exceptions.MgmtDriverOtherError( + error_message="ovs-cni pod create failed.") + + def _check_values(self, additional_param): + for key, value in additional_param.items(): + if 'master_node' == key or 'worker_node' == key: + if not value.get('username'): + LOG.error('The username in the ' + 'additionalParams is invalid.') + raise exceptions.MgmtDriverNotFound(param='username') + if not value.get('password'): + LOG.error('The password in the ' + 'additionalParams is invalid.') + raise exceptions.MgmtDriverNotFound(param='password') + if not value.get('ssh_cp_name'): + LOG.error('The ssh_cp_name in the ' + 'additionalParams is invalid.') + raise exceptions.MgmtDriverNotFound( + param='ssh_cp_name') + if 'master_node' == key: + if not value.get('cluster_cp_name'): + LOG.error('The cluster_cp_name in the ' + 'additionalParams is invalid.') + raise exceptions.MgmtDriverNotFound( + param='cluster_cp_name') + + def _get_vim_connection_info(self, context, instantiate_vnf_req): + + vim_info = vnflcm_utils._get_vim(context, + instantiate_vnf_req.vim_connection_info) + + vim_connection_info = objects.VimConnectionInfo.obj_from_primitive( + vim_info, context) + + return vim_connection_info + + def instantiate_end(self, context, vnf_instance, + instantiate_vnf_request, grant, + grant_request, **kwargs): + self._init_flag() + # get vim_connect_info + if hasattr(instantiate_vnf_request, 'vim_connection_info'): + vim_connection_info = self._get_vim_connection_info( + context, instantiate_vnf_request) + else: + # In case of healing entire Kubernetes cluster, 'heal_end' method + # will call this method using 'vnf_instance.instantiated_vnf_info' + # as the 'instantiate_vnf_request', but there is no + # 'vim_connection_info' in it, so we should get + # 'vim_connection_info' from 'vnf_instance'. + vim_connection_info = self._get_vim_connection_info( + context, vnf_instance) + additional_param = instantiate_vnf_request.additional_params.get( + 'k8s_cluster_installation_param', {}) + script_path = additional_param.get('script_path') + vim_name = additional_param.get('vim_name') + master_node = additional_param.get('master_node', {}) + worker_node = additional_param.get('worker_node', {}) + proxy = additional_param.get('proxy', {}) + # check script_path + if not script_path: + LOG.error('The script_path in the ' + 'additionalParams is invalid.') + raise exceptions.MgmtDriverNotFound(param='script_path') + # get pod_cidr and cluster_cidr + pod_cidr = additional_param.get('master_node', {}).get('pod_cidr') + cluster_cidr = additional_param.get( + 'master_node', {}).get('cluster_cidr') + # check pod_cidr's value + if pod_cidr: + if not self._check_is_cidr(pod_cidr): + LOG.error('The pod_cidr in the ' + 'additionalParams is invalid.') + raise exceptions.MgmtDriverParamInvalid(param='pod_cidr') + else: + additional_param['master_node']['pod_cidr'] = '10.244.0.0/16' + # check cluster_cidr's value + if cluster_cidr: + if not self._check_is_cidr(cluster_cidr): + LOG.error('The cluster_cidr in the ' + 'additionalParams is invalid.') + raise exceptions.MgmtDriverParamInvalid(param='cluster_cidr') + else: + additional_param['master_node']['cluster_cidr'] = '10.96.0.0/12' + # check grants exists + if grant: + self.SET_ZONE_ID_FLAG = True + # get stack_id + nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id + # set vim_name + if not vim_name: + vim_name = 'kubernetes_vim_' + vnf_instance.id + + # get vm list + access_info = vim_connection_info.access_info + master_vm_dict_list = \ + self._get_install_info_for_k8s_node( + nest_stack_id, master_node, + instantiate_vnf_request.additional_params, + 'master', access_info, vnf_instance, grant) + worker_vm_dict_list = self._get_install_info_for_k8s_node( + nest_stack_id, worker_node, + instantiate_vnf_request.additional_params, 'worker', + access_info, vnf_instance, grant) + server, bearer_token, ssl_ca_cert, project_name = \ + self._install_k8s_cluster(context, vnf_instance, + proxy, script_path, master_vm_dict_list, + worker_vm_dict_list) + + # register vim with kubernetes cluster info + self._create_vim(context, vnf_instance, server, + bearer_token, ssl_ca_cert, vim_name, project_name, + master_vm_dict_list) + + def terminate_start(self, context, vnf_instance, + terminate_vnf_request, grant, + grant_request, **kwargs): + pass + + def _get_vim_by_name(self, context, k8s_vim_name): + common_db_api = CommonDbMixin() + result = common_db_api._get_by_name( + context, nfvo_db.Vim, k8s_vim_name) + + if not result: + LOG.debug("Cannot find kubernetes " + "vim with name: {}".format(k8s_vim_name)) + + return result + + def terminate_end(self, context, vnf_instance, + terminate_vnf_request, grant, + grant_request, **kwargs): + self._init_flag() + k8s_params = vnf_instance.instantiated_vnf_info.additional_params.get( + 'k8s_cluster_installation_param', {}) + k8s_vim_name = k8s_params.get('vim_name') + if not k8s_vim_name: + k8s_vim_name = 'kubernetes_vim_' + vnf_instance.id + + vim_info = self._get_vim_by_name( + context, k8s_vim_name) + if vim_info: + nfvo_plugin = NfvoPlugin() + nfvo_plugin.delete_vim(context, vim_info.id) + + def _get_username_pwd(self, vnf_request, vnf_instance, role): + # heal and scale: get user pwd + kwargs_additional_params = vnf_request.additional_params + additionalParams = \ + vnf_instance.instantiated_vnf_info.additional_params + if role == 'master': + if kwargs_additional_params and \ + kwargs_additional_params.get('master_node_username') and \ + kwargs_additional_params.get('master_node_password'): + username = \ + kwargs_additional_params.get('master_node_username') + password = \ + kwargs_additional_params.get('master_node_password') + else: + username = \ + additionalParams.get( + 'k8s_cluster_installation_param').get( + 'master_node').get('username') + password = \ + additionalParams.get( + 'k8s_cluster_installation_param').get( + 'master_node').get('password') + else: + if kwargs_additional_params and \ + kwargs_additional_params.get('worker_node_username') and \ + kwargs_additional_params.get('worker_node_username'): + username = \ + kwargs_additional_params.get('worker_node_username') + password = \ + kwargs_additional_params.get('worker_node_password') + else: + username = \ + additionalParams.get( + 'k8s_cluster_installation_param').get( + 'worker_node').get('username') + password = \ + additionalParams.get( + 'k8s_cluster_installation_param').get( + 'worker_node').get('password') + return username, password + + def _get_resources_list(self, heatclient, stack_id, resource_name): + # scale: get resources list + physical_resource_id = heatclient.resources.get( + stack_id=stack_id, + resource_name=resource_name).physical_resource_id + resources_list = heatclient.resources.list( + stack_id=physical_resource_id) + return resources_list + + def _get_host_resource_list(self, heatclient, stack_id, node): + # scale: get host resource list + host_ips_list = [] + node_resource_name = node.get('aspect_id') + node_group_resource_name = node.get('aspect_id') + '_group' + if node_resource_name: + resources_list = self._get_resources_list( + heatclient, stack_id, node_group_resource_name) + for resources in resources_list: + resource_info = heatclient.resource_get( + resources.physical_resource_id, + node.get('ssh_cp_name')) + if resource_info.attributes.get('floating_ip_address'): + self.FLOATING_IP_FLAG = True + ssh_master_ip = resource_info.attributes.get( + 'floating_ip_address') + else: + ssh_master_ip = resource_info.attributes.get( + 'fixed_ips')[0].get('ip_address') + host_ips_list.append(ssh_master_ip) + else: + master_ip = heatclient.resource_get( + stack_id, node.get('ssh_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + host_ips_list.append(master_ip) + return host_ips_list + + def _connect_ssh_scale(self, master_ip_list, master_username, + master_password): + for master_ip in master_ip_list: + retry = 4 + while retry > 0: + try: + commander = cmd_executer.RemoteCommandExecutor( + user=master_username, password=master_password, + host=master_ip, + timeout=K8S_CMD_TIMEOUT) + return commander, master_ip + except (exceptions.NotAuthorized, paramiko.SSHException, + paramiko.ssh_exception.NoValidConnectionsError) as e: + LOG.debug(e) + retry -= 1 + time.sleep(SERVER_WAIT_COMPLETE_TIME) + if master_ip == master_ip_list[-1]: + LOG.error('Failed to execute remote command.') + raise exceptions.MgmtDriverRemoteCommandError() + + def evacuate_wait(self, commander, daemonset_content): + # scale: evacuate wait + wait_flag = True + retry_count = 20 + while wait_flag and retry_count > 0: + if daemonset_content.get('items'): + ssh_command = "kubectl get pods --all-namespaces -o json" + result = self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3) + pods_list = json.loads(''.join(result)).get('items') + pods_names = [pod.get('metadata', {}).get('name') + for pod in pods_list] + for daemonset in daemonset_content.get('items'): + daemonset_name = daemonset.get('metadata', {}).get('name') + if daemonset_name in pods_names and \ + 'calico-node' not in daemonset_name and \ + 'kube-proxy' not in daemonset_name: + break + else: + wait_flag = False + else: + break + if not wait_flag: + break + time.sleep(15) + retry_count -= 1 + + def _delete_scale_in_worker( + self, worker_node, kwargs, heatclient, stack_id, + commander): + # scale: get host name + scale_worker_nic_ips = [] + normal_worker_ssh_ips = [] + worker_host_names = [] + scale_name_list = kwargs.get('scale_name_list') + physical_resource_id = heatclient.resource_get( + stack_id, + kwargs.get('scale_vnf_request', {}).aspect_id + '_group') \ + .physical_resource_id + worker_resource_list = heatclient.resource_get_list( + physical_resource_id) + for worker_resource in worker_resource_list: + worker_cp_resource = heatclient.resource_get( + worker_resource.physical_resource_id, + worker_node.get('nic_cp_name')) + if worker_resource.resource_name in scale_name_list: + scale_worker_ip = worker_cp_resource.attributes.get( + 'fixed_ips')[0].get('ip_address') + scale_worker_nic_ips.append(scale_worker_ip) + worker_host_name = \ + 'worker' + scale_worker_ip.split('.')[-1] + worker_host_names.append(worker_host_name) + else: + normal_worker_ssh_cp_resource = heatclient.resource_get( + worker_resource.physical_resource_id, + worker_node.get('ssh_cp_name')) + if normal_worker_ssh_cp_resource.attributes.get( + 'floating_ip_address'): + normal_worker_ssh_ips.append( + normal_worker_ssh_cp_resource.attributes.get( + 'floating_ip_address')) + else: + normal_worker_ssh_ips.append( + normal_worker_ssh_cp_resource.attributes.get( + 'fixed_ips')[0].get('ip_address')) + + for worker_host_name in worker_host_names: + ssh_command = "kubectl get pods --field-selector=spec." \ + "nodeName={} --all-namespaces " \ + "-o json".format(worker_host_name) + result = self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3) + daemonset_content_str = ''.join(result) + daemonset_content = json.loads( + daemonset_content_str) + ssh_command = \ + "kubectl drain {resource} --ignore-daemonsets " \ + "--timeout={k8s_cmd_timeout}s".format( + resource=worker_host_name, + k8s_cmd_timeout=K8S_CMD_TIMEOUT) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'drain', 3) + # evacuate_wait() + # input: resource, daemonset_content + self.evacuate_wait(commander, daemonset_content) + ssh_command = "kubectl delete node {}".format(worker_host_name) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3) + return scale_worker_nic_ips, normal_worker_ssh_ips + + def _set_node_ip_in_hosts(self, commander, + type, ips=None, hosts_str=None): + ssh_command = "> /tmp/tmp_hosts" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "cp /etc/hosts /tmp/tmp_hosts" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + if type == 'scale_in': + for ip in ips: + ssh_command = "sed -i '/{}/d' /tmp/tmp_hosts".format( + ip) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + elif type == 'scale_out' or type == 'heal_end': + ssh_command = "sed -i '$a{}' /tmp/tmp_hosts".format( + hosts_str) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + ssh_command = "sudo mv /tmp/tmp_hosts /etc/hosts;" + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0) + + def scale_start(self, context, vnf_instance, + scale_vnf_request, grant, + grant_request, **kwargs): + self._init_flag() + if scale_vnf_request.type == 'SCALE_IN': + vim_connection_info = \ + self._get_vim_connection_info(context, vnf_instance) + + kwargs['scale_vnf_request'] = scale_vnf_request + heatclient = hc.HeatClient(vim_connection_info.access_info) + additionalParams = \ + vnf_instance.instantiated_vnf_info.additional_params + master_username, master_password = self._get_username_pwd( + scale_vnf_request, vnf_instance, 'master') + worker_username, worker_password = self._get_username_pwd( + scale_vnf_request, vnf_instance, 'worker') + stack_id = vnf_instance.instantiated_vnf_info.instance_id + master_node = \ + additionalParams.get('k8s_cluster_installation_param').get( + 'master_node') + worker_node = \ + additionalParams.get('k8s_cluster_installation_param').get( + 'worker_node') + master_ip_list = self._get_host_resource_list( + heatclient, stack_id, master_node) + commander, master_ip = self._connect_ssh_scale( + master_ip_list, master_username, + master_password) + + scale_worker_nic_ips, normal_worker_ssh_ips = \ + self._delete_scale_in_worker( + worker_node, kwargs, heatclient, stack_id, commander) + commander.close_session() + + # modify /etc/hosts/ on each node + for master_ip in master_ip_list: + commander = self._init_commander_and_send_install_scripts( + master_username, master_password, master_ip) + self._set_node_ip_in_hosts( + commander, 'scale_in', scale_worker_nic_ips) + commander.close_session() + for worker_ip in normal_worker_ssh_ips: + commander = self._init_commander_and_send_install_scripts( + worker_username, worker_password, worker_ip) + self._set_node_ip_in_hosts( + commander, 'scale_in', scale_worker_nic_ips) + commander.close_session() + else: + pass + + def _get_worker_info(self, worker_node, worker_resource_list, + heatclient, scale_out_id_list, vnf_instance, grant): + normal_ssh_worker_ip_list = [] + normal_nic_worker_ip_list = [] + add_worker_ssh_ip_list = [] + add_worker_nic_ip_list = [] + zone_id_dict = {} + host_compute_dict = {} + for worker_resource in worker_resource_list: + if self.FLOATING_IP_FLAG: + ssh_ip = heatclient.resources.get( + stack_id=worker_resource.physical_resource_id, + resource_name=worker_node.get('ssh_cp_name')). \ + attributes.get('floating_ip_address') + else: + ssh_ip = heatclient.resources.get( + stack_id=worker_resource.physical_resource_id, + resource_name=worker_node.get('ssh_cp_name')). \ + attributes.get( + 'fixed_ips')[0].get('ip_address') + nic_ip = heatclient.resources.get( + stack_id=worker_resource.physical_resource_id, + resource_name=worker_node.get('nic_cp_name')). \ + attributes.get('fixed_ips')[0].get('ip_address') + + if worker_resource.physical_resource_id in scale_out_id_list: + add_worker_ssh_ip_list.append(ssh_ip) + add_worker_nic_ip_list.append(nic_ip) + if self.SET_NODE_LABEL_FLAG: + lowest_worker_resources_list = heatclient.resources.list( + stack_id=worker_resource.physical_resource_id) + for lowest_resource in lowest_worker_resources_list: + if lowest_resource.resource_type == \ + 'OS::Nova::Server': + worker_node_resource_info = \ + heatclient.resource_get( + worker_resource.physical_resource_id, + lowest_resource.resource_name) + host_compute = worker_node_resource_info.\ + attributes.get('OS-EXT-SRV-ATTR:host') + if self.SET_ZONE_ID_FLAG: + physical_resource_id = \ + lowest_resource.physical_resource_id + zone_id = self._get_zone_id_from_grant( + vnf_instance, grant, 'SCALE', + physical_resource_id) + zone_id_dict[nic_ip] = zone_id + host_compute_dict[nic_ip] = host_compute + elif worker_resource.physical_resource_id not in \ + scale_out_id_list: + normal_ssh_worker_ip_list.append(ssh_ip) + normal_nic_worker_ip_list.append(nic_ip) + + return (add_worker_ssh_ip_list, add_worker_nic_ip_list, + normal_ssh_worker_ip_list, normal_nic_worker_ip_list, + host_compute_dict, zone_id_dict) + + def _get_master_info( + self, master_resource_list, heatclient, master_node): + master_ssh_ip_list = [] + master_nic_ip_list = [] + for master_resource in master_resource_list: + master_host_reource_info = heatclient.resource_get( + master_resource.physical_resource_id, + master_node.get('ssh_cp_name')) + if master_host_reource_info.attributes.get('floating_ip_address'): + self.FLOATING_IP_FLAG = True + master_ssh_ip = master_host_reource_info.attributes.get( + 'floating_ip_address') + else: + master_ssh_ip = master_host_reource_info.attributes. \ + get('fixed_ips')[0].get('ip_address') + master_nic_ip = heatclient.resource_get( + master_resource.physical_resource_id, + master_node.get('nic_cp_name')).attributes. \ + get('fixed_ips')[0].get('ip_address') + master_ssh_ip_list.append(master_ssh_ip) + master_nic_ip_list.append(master_nic_ip) + return master_ssh_ip_list, master_nic_ip_list + + def _check_pod_affinity(self, heatclient, nest_stack_id, worker_node): + stack_base_hot_template = heatclient.stacks.template( + stack_id=nest_stack_id) + worker_instance_group_name = worker_node.get('aspect_id') + '_group' + worker_node_properties = stack_base_hot_template['resources'][ + worker_instance_group_name][ + 'properties']['resource']['properties'] + if 'scheduler_hints' in worker_node_properties: + self.SET_NODE_LABEL_FLAG = True + + def scale_end(self, context, vnf_instance, + scale_vnf_request, grant, + grant_request, **kwargs): + self._init_flag() + if scale_vnf_request.type == 'SCALE_OUT': + k8s_cluster_installation_param = \ + vnf_instance.instantiated_vnf_info. \ + additional_params.get('k8s_cluster_installation_param') + vnf_package_path = vnflcm_utils._get_vnf_package_path( + context, vnf_instance.vnfd_id) + nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id + resource_name = scale_vnf_request.aspect_id + '_group' + vim_connection_info = \ + self._get_vim_connection_info(context, vnf_instance) + heatclient = hc.HeatClient(vim_connection_info.access_info) + scale_out_id_list = kwargs.get('scale_out_id_list') + + # get master_ip + master_ssh_ip_list = [] + master_nic_ip_list = [] + master_node = k8s_cluster_installation_param.get('master_node') + + # The VM is created with SOL001 TOSCA-based VNFD and + # not use policies. At present, scale operation dose + # not support this case. + if not master_node.get('aspect_id'): + master_ssh_ip_list.append(heatclient.resources.get( + stack_id=nest_stack_id, + resource_name=master_node.get( + 'ssh_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address')) + master_nic_ip_list.append(heatclient.resources.get( + stack_id=nest_stack_id, + resource_name=master_node.get( + 'nic_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address')) + cluster_ip = self._get_cluster_ip( + heatclient, 1, master_node, None, nest_stack_id) + + # The VM is created with UserData format + else: + master_resource_list = self._get_resources_list( + heatclient, nest_stack_id, master_node.get( + 'aspect_id') + '_group') + master_ssh_ip_list, master_nic_ip_list = \ + self._get_master_info(master_resource_list, + heatclient, master_node) + resource_num = len(master_resource_list) + cluster_ip = self._get_cluster_ip( + heatclient, resource_num, master_node, + master_resource_list[0].physical_resource_id, + nest_stack_id) + + # get scale out worker_ips + worker_resource_list = self._get_resources_list( + heatclient, nest_stack_id, resource_name) + worker_node = \ + k8s_cluster_installation_param['worker_node'] + + # check pod-affinity flag + if grant: + self.SET_ZONE_ID_FLAG = True + self._check_pod_affinity(heatclient, nest_stack_id, worker_node) + (add_worker_ssh_ip_list, add_worker_nic_ip_list, + normal_ssh_worker_ip_list, normal_nic_worker_ip_list, + host_compute_dict, zone_id_dict) = \ + self._get_worker_info( + worker_node, worker_resource_list, + heatclient, scale_out_id_list, vnf_instance, grant) + + # get kubeadm_token from one of master node + master_username, master_password = self._get_username_pwd( + scale_vnf_request, vnf_instance, 'master') + worker_username, worker_password = self._get_username_pwd( + scale_vnf_request, vnf_instance, 'worker') + commander, master_ip = self._connect_ssh_scale( + master_ssh_ip_list, master_username, + master_password) + ssh_command = "kubeadm token create;" + kubeadm_token = self._execute_command( + commander, ssh_command, + K8S_CMD_TIMEOUT, 'common', 3)[0].replace('\n', '') + + # get hash from one of master node + ssh_command = "openssl x509 -pubkey -in " \ + "/etc/kubernetes/pki/ca.crt | openssl rsa " \ + "-pubin -outform der 2>/dev/null | " \ + "openssl dgst -sha256 -hex | sed 's/^.* //';" + ssl_ca_cert_hash = self._execute_command( + commander, ssh_command, + K8S_CMD_TIMEOUT, 'common', 3)[0].replace('\n', '') + commander.close_session() + # set no_proxy + proxy = k8s_cluster_installation_param.get('proxy') + pod_cidr = master_node.get('pod_cidr', '10.244.0.0/16') + cluster_cidr = master_node.get("cluster_cidr", '10.96.0.0/12') + if proxy.get("http_proxy") and proxy.get("https_proxy"): + no_proxy = (','.join(list(filter(None, [ + proxy.get("no_proxy"), pod_cidr, cluster_cidr, "127.0.0.1", + "localhost", cluster_ip] + + master_nic_ip_list + + add_worker_nic_ip_list + + normal_nic_worker_ip_list)))) + proxy['no_proxy'] = no_proxy + + # set /etc/hosts + master_hosts = [] + add_worker_hosts = [] + normal_worker_hosts = [] + for master_ip in master_nic_ip_list: + master_ip_str = \ + master_ip + ' master' + master_ip.split('.')[-1] + master_hosts.append(master_ip_str) + for worker_ip in add_worker_nic_ip_list: + worker_ip_str = \ + worker_ip + ' worker' + worker_ip.split('.')[-1] + add_worker_hosts.append(worker_ip_str) + for worker_ip in normal_nic_worker_ip_list: + worker_ip_str = \ + worker_ip + ' worker' + worker_ip.split('.')[-1] + normal_worker_hosts.append(worker_ip_str) + + ha_flag = True + if len(master_nic_ip_list) == 1: + ha_flag = False + for worker_ip in add_worker_ssh_ip_list: + script_path = \ + k8s_cluster_installation_param.get('script_path') + commander = self._init_commander_and_send_install_scripts( + worker_username, worker_password, + worker_ip, vnf_package_path, script_path) + hosts_str = '\\n'.join(master_hosts + add_worker_hosts + + normal_worker_hosts) + self._set_node_ip_in_hosts(commander, + 'scale_out', hosts_str=hosts_str) + worker_nic_ip = add_worker_nic_ip_list[ + add_worker_ssh_ip_list.index(worker_ip)] + self._install_worker_node( + commander, proxy, ha_flag, worker_nic_ip, + cluster_ip, kubeadm_token, ssl_ca_cert_hash) + commander.close_session() + if self.SET_NODE_LABEL_FLAG: + commander, _ = self._connect_ssh_scale( + master_ssh_ip_list, master_username, + master_password) + self._set_node_label( + commander, worker_nic_ip, + host_compute_dict.get(worker_nic_ip), + zone_id_dict.get(worker_nic_ip)) + + hosts_str = '\\n'.join(add_worker_hosts) + # set /etc/hosts on master node and normal worker node + for master_ip in master_ssh_ip_list: + commander = self._init_commander_and_send_install_scripts( + worker_username, worker_password, master_ip) + self._set_node_ip_in_hosts( + commander, 'scale_out', hosts_str=hosts_str) + commander.close_session() + for worker_ip in normal_ssh_worker_ip_list: + commander = self._init_commander_and_send_install_scripts( + worker_node.get('username'), worker_node.get('password'), + worker_ip) + self._set_node_ip_in_hosts( + commander, 'scale_out', hosts_str=hosts_str) + commander.close_session() + else: + pass + + def _get_vnfc_resource_id(self, vnfc_resource_info, vnfc_instance_id): + for vnfc_resource in vnfc_resource_info: + if vnfc_resource.id == vnfc_instance_id: + return vnfc_resource + else: + return None + + def _get_master_node_name( + self, heatclient, master_resource_list, + target_physical_resource_ids, master_node): + fixed_master_infos = {} + not_fixed_master_infos = {} + flag_master = False + for master_resource in master_resource_list: + master_resource_infos = heatclient.resources.list( + master_resource.physical_resource_id) + master_host_reource_info = heatclient.resource_get( + master_resource.physical_resource_id, + master_node.get('ssh_cp_name')) + for master_resource_info in master_resource_infos: + if master_resource_info.resource_type == \ + 'OS::Nova::Server' and \ + master_resource_info.physical_resource_id in \ + target_physical_resource_ids: + flag_master = True + if master_host_reource_info.attributes.get( + 'floating_ip_address'): + self.FLOATING_IP_FLAG = True + master_ssh_ip = master_host_reource_info.attributes.\ + get('floating_ip_address') + else: + master_ssh_ip = heatclient.resource_get( + master_resource.physical_resource_id, + master_node.get('ssh_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + master_nic_ip = heatclient.resource_get( + master_resource.physical_resource_id, + master_node.get('nic_cp_name')).attributes. \ + get('fixed_ips')[0].get('ip_address') + master_name = 'master' + master_nic_ip.split('.')[-1] + fixed_master_infos[master_name] = {} + fixed_master_infos[master_name]['master_ssh_ip'] = \ + master_ssh_ip + fixed_master_infos[master_name]['master_nic_ip'] = \ + master_nic_ip + elif master_resource_info.resource_type == \ + 'OS::Nova::Server' and \ + master_resource_info.physical_resource_id not in \ + target_physical_resource_ids: + if master_host_reource_info.attributes.get( + 'floating_ip_address'): + self.FLOATING_IP_FLAG = True + master_ssh_ip = master_host_reource_info.attributes.\ + get('floating_ip_address') + else: + master_ssh_ip = heatclient.resource_get( + master_resource.physical_resource_id, + master_node.get('ssh_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + master_nic_ip = heatclient.resource_get( + master_resource.physical_resource_id, + master_node.get('nic_cp_name')).attributes. \ + get('fixed_ips')[0].get('ip_address') + master_name = 'master' + master_nic_ip.split('.')[-1] + not_fixed_master_infos[master_name] = {} + not_fixed_master_infos[master_name]['master_ssh_ip'] = \ + master_ssh_ip + not_fixed_master_infos[master_name]['master_nic_ip'] = \ + master_nic_ip + if flag_master and len(master_resource_list) == 1: + LOG.error("An error occurred in MgmtDriver:{" + "The number of Master-Nodes is 1 " + "or less. If you want to heal, " + "please respawn.}") + raise exceptions.MgmtDriverOtherError( + error_message="An error occurred in MgmtDriver:{" + "The number of Master-Nodes is 1 " + "or less. If you want to heal, " + "please respawn.}") + return flag_master, fixed_master_infos, not_fixed_master_infos + + def _get_worker_node_name( + self, heatclient, worker_resource_list, + target_physical_resource_ids, worker_node, vnf_instance, grant): + fixed_worker_infos = {} + not_fixed_worker_infos = {} + flag_worker = False + for worker_resource in worker_resource_list: + worker_resource_infos = heatclient.resources.list( + worker_resource.physical_resource_id) + worker_host_reource_info = heatclient.resource_get( + worker_resource.physical_resource_id, + worker_node.get('ssh_cp_name')) + for worker_resource_info in worker_resource_infos: + if worker_resource_info.resource_type == \ + 'OS::Nova::Server' and \ + worker_resource_info.physical_resource_id in \ + target_physical_resource_ids: + flag_worker = True + if worker_host_reource_info.attributes.get( + 'floating_ip_address'): + self.FLOATING_IP_FLAG = True + worker_ssh_ip = worker_host_reource_info.attributes.\ + get('floating_ip_address') + else: + worker_ssh_ip = heatclient.resource_get( + worker_resource.physical_resource_id, + worker_node.get('ssh_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + worker_nic_ip = heatclient.resource_get( + worker_resource.physical_resource_id, + worker_node.get('nic_cp_name')).attributes. \ + get('fixed_ips')[0].get('ip_address') + worker_name = 'worker' + worker_nic_ip.split('.')[-1] + fixed_worker_infos[worker_name] = {} + if self.SET_NODE_LABEL_FLAG: + worker_node_resource_info = heatclient.resource_get( + worker_resource.physical_resource_id, + worker_resource_info.resource_name) + host_compute = worker_node_resource_info.attributes.\ + get('OS-EXT-SRV-ATTR:host') + fixed_worker_infos[worker_name]['host_compute'] = \ + host_compute + if self.SET_ZONE_ID_FLAG: + physical_resource_id = \ + worker_resource_info.physical_resource_id + zone_id = self._get_zone_id_from_grant( + vnf_instance, grant, 'HEAL', + physical_resource_id) + fixed_worker_infos[worker_name]['zone_id'] = \ + zone_id + fixed_worker_infos[worker_name]['worker_ssh_ip'] = \ + worker_ssh_ip + fixed_worker_infos[worker_name]['worker_nic_ip'] = \ + worker_nic_ip + elif worker_resource_info.resource_type == \ + 'OS::Nova::Server' and \ + worker_resource_info.physical_resource_id not in \ + target_physical_resource_ids: + if worker_host_reource_info.attributes.get( + 'floating_ip_address'): + self.FLOATING_IP_FLAG = True + worker_ssh_ip = worker_host_reource_info.attributes.\ + get('floating_ip_address') + else: + worker_ssh_ip = heatclient.resource_get( + worker_resource.physical_resource_id, + worker_node.get('ssh_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + worker_nic_ip = heatclient.resource_get( + worker_resource.physical_resource_id, + worker_node.get('nic_cp_name')).attributes. \ + get('fixed_ips')[0].get('ip_address') + worker_name = 'worker' + worker_nic_ip.split('.')[-1] + not_fixed_worker_infos[worker_name] = {} + not_fixed_worker_infos[worker_name]['worker_ssh_ip'] = \ + worker_ssh_ip + not_fixed_worker_infos[worker_name]['worker_nic_ip'] = \ + worker_nic_ip + return flag_worker, fixed_worker_infos, not_fixed_worker_infos + + def _get_worker_ssh_ip( + self, heatclient, stack_id, master_resource_name, + worker_resource_name, target_physical_resource_ids): + flag_worker = False + fixed_worker_infos = dict() + not_fixed_master_infos = dict() + stack_resource_list = heatclient.resources.list(stack_id) + worker_ip = heatclient.resource_get( + stack_id, worker_resource_name).attributes.get( + 'fixed_ips')[0].get('ip_address') + master_ip = heatclient.resource_get( + stack_id, master_resource_name).attributes.get( + 'fixed_ips')[0].get('ip_address') + master_name = 'master' + master_ip.split('.')[-1] + for stack_resource in stack_resource_list: + if stack_resource.resource_type == 'OS::Nova::Server': + current_ip_list = [] + current_address = heatclient.resource_get( + stack_id, stack_resource.resource_name).attributes.get( + 'addresses', {}) + for network, network_info in current_address.items(): + for network_ip_info in network_info: + current_ip_list.append(network_ip_info.get('addr')) + + if stack_resource.physical_resource_id in \ + target_physical_resource_ids and \ + master_ip in current_ip_list: + LOG.error("An error occurred in MgmtDriver:{" + "The number of Master-Nodes is 1 " + "or less. If you want to heal, " + "please respawn.}") + raise exceptions.MgmtDriverOtherError( + error_message="An error occurred in MgmtDriver:{" + "The number of Master-Nodes is 1 " + "or less. If you want to heal, " + "please respawn.}") + elif stack_resource.physical_resource_id not in \ + target_physical_resource_ids and \ + master_ip in current_ip_list: + not_fixed_master_infos.update( + {master_name: {'master_ssh_ip': master_ip}}) + not_fixed_master_infos[master_name].update( + {'master_nic_ip': master_ip}) + elif stack_resource.physical_resource_id in \ + target_physical_resource_ids and \ + worker_ip in current_ip_list: + worker_name = 'worker' + worker_ip.split('.')[-1] + fixed_worker_infos.update( + {worker_name: {'worker_ssh_ip': worker_ip}}) + fixed_worker_infos[worker_name].update( + {'worker_nic_ip': worker_ip}) + flag_worker = True + return flag_worker, fixed_worker_infos, not_fixed_master_infos, {} + + def _delete_master_node( + self, fixed_master_infos, not_fixed_master_infos, + master_username, master_password): + not_fixed_master_ssh_ips = [ + master_ips.get('master_ssh_ip') + for master_ips in not_fixed_master_infos.values()] + + for fixed_master_name in fixed_master_infos.keys(): + # delete heal master node info from haproxy.cfg + # on other master node + for not_fixed_master_ssh_ip in not_fixed_master_ssh_ips: + commander = cmd_executer.RemoteCommandExecutor( + user=master_username, password=master_password, + host=not_fixed_master_ssh_ip, + timeout=K8S_CMD_TIMEOUT) + master_ssh_ip = not_fixed_master_ssh_ip + ssh_command = "sudo sed -i '/server {}/d' " \ + "/etc/haproxy/haproxy.cfg;" \ + "sudo service haproxy restart;" \ + "".format(fixed_master_name) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3) + + # delete master node + ssh_command = "kubectl delete node " + \ + fixed_master_name + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3) + connect_master_name = '' + for not_master_name, not_master_ip_info in \ + not_fixed_master_infos.items(): + if not_master_ip_info['master_ssh_ip'] == master_ssh_ip: + connect_master_name = not_master_name + ssh_command = \ + "kubectl get pods -n kube-system | " \ + "grep %(connect_master_name)s | " \ + "awk '{print $1}'" \ + "" % {'connect_master_name': connect_master_name} + etcd_name = self._execute_command( + commander, ssh_command, + K8S_CMD_TIMEOUT, 'common', 3)[0].replace('\n', '') + ssh_command = \ + "kubectl exec -i %(etcd_name)s -n kube-system " \ + "-- sh<< EOF\n" \ + "etcdctl --endpoints 127.0.0.1:2379 " \ + "--cacert /etc/kubernetes/pki/etcd/ca.crt " \ + "--cert /etc/kubernetes/pki/etcd/server.crt " \ + "--key /etc/kubernetes/pki/etcd/server.key " \ + "member list\nEOF" \ + "" % {'etcd_name': etcd_name} + results = self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'etcd', 3) + etcd_id = [res for res in results + if fixed_master_name + in res][0].split(',')[0] + ssh_command = \ + "kubectl exec -i %(etcd_name)s -n kube-system " \ + "-- sh<< EOF\n" \ + "etcdctl --endpoints 127.0.0.1:2379 " \ + "--cacert /etc/kubernetes/pki/etcd/ca.crt " \ + "--cert /etc/kubernetes/pki/etcd/server.crt " \ + "--key /etc/kubernetes/pki/etcd/server.key " \ + "member remove %(etcd_id)s\nEOF" % \ + {'etcd_name': etcd_name, "etcd_id": etcd_id} + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'etcd', 3) + commander.close_session() + + def _delete_worker_node( + self, fixed_worker_infos, not_fixed_master_infos, + master_username, master_password): + not_fixed_master_ssh_ips = [ + master_ips.get('master_ssh_ip') + for master_ips in not_fixed_master_infos.values()] + for fixed_worker_name in fixed_worker_infos.keys(): + commander, master_ssh_ip = self._connect_ssh_scale( + not_fixed_master_ssh_ips, master_username, + master_password) + ssh_command = "kubectl get pods --field-selector=" \ + "spec.nodeName={} -o json" \ + "".format(fixed_worker_name) + result = self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3) + worker_node_pod_info_str = ''.join(result) + worker_node_pod_info = json.loads( + worker_node_pod_info_str) + ssh_command = "kubectl drain {} " \ + "--ignore-daemonsets " \ + "--timeout={}s" \ + "".format(fixed_worker_name, + K8S_CMD_TIMEOUT) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'drain', 3) + self.evacuate_wait( + commander, worker_node_pod_info) + ssh_command = "kubectl delete node {}".format( + fixed_worker_name) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3) + commander.close_session() + + def _delete_node_to_be_healed( + self, heatclient, stack_id, target_physical_resource_ids, + master_username, master_password, worker_resource_name, + master_resource_name, master_node, worker_node): + master_ssh_cp_name = master_node.get('nic_cp_name') + flag_master = False + flag_worker = False + if master_resource_name == master_ssh_cp_name: + (flag_worker, fixed_worker_infos, not_fixed_master_infos, + not_fixed_worker_infos) = \ + self._get_worker_ssh_ip( + heatclient, stack_id, master_resource_name, + worker_resource_name, target_physical_resource_ids) + else: + master_resource_list = self._get_resources_list( + heatclient, stack_id, master_resource_name) + flag_master, fixed_master_infos, not_fixed_master_infos = \ + self._get_master_node_name( + heatclient, master_resource_list, + target_physical_resource_ids, + master_node) + if len(master_resource_list) == 1 and flag_master: + LOG.error("An error occurred in MgmtDriver:{" + "The number of Master-Nodes is 1 " + "or less. If you want to heal, " + "please respawn.}") + raise exceptions.MgmtDriverOtherError( + error_message="An error occurred in MgmtDriver:{" + "The number of Master-Nodes is 1 " + "or less. If you want to heal, " + "please respawn.}") + worker_resource_list = self._get_resources_list( + heatclient, stack_id, worker_resource_name) + flag_worker, fixed_worker_infos, not_fixed_worker_infos = \ + self._get_worker_node_name( + heatclient, worker_resource_list, + target_physical_resource_ids, + worker_node, vnf_instance=None, grant=None) + if flag_master: + self._delete_master_node( + fixed_master_infos, not_fixed_master_infos, + master_username, master_password) + if flag_worker: + self._delete_worker_node( + fixed_worker_infos, not_fixed_master_infos, + master_username, master_password) + + def _get_node_resource_name(self, vnf_additional_params, node): + if node.get('aspect_id'): + # in case of Userdata format + if 'lcm-operation-user-data' in vnf_additional_params.keys() and \ + 'lcm-operation-user-data-class' in \ + vnf_additional_params.keys(): + resource_name = node.get('aspect_id') + '_group' + # in case of SOL001 TOSCA-based VNFD with HA master node + else: + resource_name = node.get('aspect_id') + else: + # in case of SOL001 TOSCA-based VNFD with single master node + resource_name = node.get('nic_cp_name') + return resource_name + + def _get_target_physical_resource_ids(self, vnf_instance, + heal_vnf_request): + target_physical_resource_ids = [] + for vnfc_instance_id in heal_vnf_request.vnfc_instance_id: + instantiated_vnf_info = vnf_instance.instantiated_vnf_info + vnfc_resource_info = instantiated_vnf_info.vnfc_resource_info + vnfc_resource = self._get_vnfc_resource_id( + vnfc_resource_info, vnfc_instance_id) + if vnfc_resource: + target_physical_resource_ids.append( + vnfc_resource.compute_resource.resource_id) + + return target_physical_resource_ids + + def heal_start(self, context, vnf_instance, + heal_vnf_request, grant, + grant_request, **kwargs): + self._init_flag() + stack_id = vnf_instance.instantiated_vnf_info.instance_id + vnf_additional_params = \ + vnf_instance.instantiated_vnf_info.additional_params + master_node = vnf_additional_params.get( + 'k8s_cluster_installation_param', {}).get( + 'master_node', {}) + worker_node = vnf_additional_params.get( + 'k8s_cluster_installation_param', {}).get( + 'worker_node', {}) + master_resource_name = self._get_node_resource_name( + vnf_additional_params, master_node) + worker_resource_name = self._get_node_resource_name( + vnf_additional_params, worker_node) + master_username, master_password = self._get_username_pwd( + heal_vnf_request, vnf_instance, 'master') + vim_connection_info = self._get_vim_connection_info( + context, vnf_instance) + heatclient = hc.HeatClient(vim_connection_info.access_info) + if not heal_vnf_request.vnfc_instance_id: + k8s_params = vnf_additional_params.get( + 'k8s_cluster_installation_param', {}) + k8s_vim_name = k8s_params.get('vim_name') + if not k8s_vim_name: + k8s_vim_name = 'kubernetes_vim_' + vnf_instance.id + k8s_vim_info = self._get_vim_by_name( + context, k8s_vim_name) + if k8s_vim_info: + nfvo_plugin = NfvoPlugin() + nfvo_plugin.delete_vim(context, k8s_vim_info.id) + for vim_info in vnf_instance.vim_connection_info: + if vim_info.vim_id == k8s_vim_info.id: + vnf_instance.vim_connection_info.remove(vim_info) + else: + target_physical_resource_ids = \ + self._get_target_physical_resource_ids( + vnf_instance, heal_vnf_request) + self._delete_node_to_be_healed( + heatclient, stack_id, target_physical_resource_ids, + master_username, master_password, worker_resource_name, + master_resource_name, master_node, worker_node) + + def _fix_master_node( + self, not_fixed_master_infos, hosts_str, + fixed_master_infos, proxy, + master_username, master_password, vnf_package_path, + script_path, cluster_ip, pod_cidr, cluster_cidr, + kubeadm_token, ssl_ca_cert_hash, ha_flag): + not_fixed_master_nic_ips = [ + master_ips.get('master_nic_ip') + for master_ips in not_fixed_master_infos.values()] + not_fixed_master_ssh_ips = [ + master_ips.get('master_ssh_ip') + for master_ips in not_fixed_master_infos.values()] + fixed_master_nic_ips = [ + master_ips.get('master_nic_ip') + for master_ips in fixed_master_infos.values()] + master_ssh_ips_str = ','.join( + not_fixed_master_nic_ips + fixed_master_nic_ips) + for fixed_master_name, fixed_master_info in \ + fixed_master_infos.items(): + commander, master_ip = self._connect_ssh_scale( + not_fixed_master_ssh_ips, + master_username, master_password) + ssh_command = "sudo kubeadm init phase upload-certs " \ + "--upload-certs" + result = self._execute_command( + commander, ssh_command, + K8S_CMD_TIMEOUT, 'certificate_key', 3) + certificate_key = result[-1].replace('\n', '') + commander.close_session() + commander = self._init_commander_and_send_install_scripts( + master_username, master_password, + fixed_master_info.get('master_ssh_ip'), + vnf_package_path, script_path) + self._set_node_ip_in_hosts( + commander, 'heal_end', hosts_str=hosts_str) + if proxy.get('http_proxy') and proxy.get('https_proxy'): + ssh_command = \ + "export http_proxy={http_proxy};" \ + "export https_proxy={https_proxy};" \ + "export no_proxy={no_proxy};" \ + "export ha_flag={ha_flag};" \ + "bash /tmp/install_k8s_cluster.sh " \ + "-m {master_ip} -i {cluster_ip} " \ + "-p {pod_cidr} -a {k8s_cluster_cidr} " \ + "-t {kubeadm_token} -s {ssl_ca_cert_hash} " \ + "-k {certificate_key}".format( + http_proxy=proxy.get('http_proxy'), + https_proxy=proxy.get('https_proxy'), + no_proxy=proxy.get('no_proxy'), + ha_flag=ha_flag, + master_ip=master_ssh_ips_str, + cluster_ip=cluster_ip, + pod_cidr=pod_cidr, + k8s_cluster_cidr=cluster_cidr, + kubeadm_token=kubeadm_token, + ssl_ca_cert_hash=ssl_ca_cert_hash, + certificate_key=certificate_key) + else: + ssh_command = \ + "export ha_flag={ha_flag};" \ + "bash /tmp/install_k8s_cluster.sh " \ + "-m {master_ip} -i {cluster_ip} " \ + "-p {pod_cidr} -a {k8s_cluster_cidr} " \ + "-t {kubeadm_token} -s {ssl_ca_cert_hash} " \ + "-k {certificate_key}".format( + ha_flag=ha_flag, + master_ip=master_ssh_ips_str, + cluster_ip=cluster_ip, + pod_cidr=pod_cidr, + k8s_cluster_cidr=cluster_cidr, + kubeadm_token=kubeadm_token, + ssl_ca_cert_hash=ssl_ca_cert_hash, + certificate_key=certificate_key) + self._execute_command( + commander, ssh_command, K8S_INSTALL_TIMEOUT, 'install', 0) + commander.close_session() + for not_fixed_master_name, not_fixed_master in \ + not_fixed_master_infos.items(): + commander = self._init_commander_and_send_install_scripts( + master_username, master_password, + not_fixed_master.get('master_ssh_ip')) + ssh_command = r"sudo sed -i '/server * check/a\ server " \ + "{} {}:6443 check' " \ + "/etc/haproxy/haproxy.cfg" \ + "".format(fixed_master_name, + fixed_master_info.get( + 'master_nic_ip')) + self._execute_command( + commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3) + commander.close_session() + + def _fix_worker_node( + self, fixed_worker_infos, + hosts_str, worker_username, worker_password, + vnf_package_path, script_path, proxy, cluster_ip, + kubeadm_token, ssl_ca_cert_hash, ha_flag): + for fixed_worker_name, fixed_worker in fixed_worker_infos.items(): + commander = self._init_commander_and_send_install_scripts( + worker_username, worker_password, + fixed_worker.get('worker_ssh_ip'), + vnf_package_path, script_path) + self._install_worker_node( + commander, proxy, ha_flag, + fixed_worker.get('worker_nic_ip'), + cluster_ip, kubeadm_token, ssl_ca_cert_hash) + self._set_node_ip_in_hosts( + commander, 'heal_end', hosts_str=hosts_str) + commander.close_session() + + def _heal_and_join_k8s_node( + self, heatclient, stack_id, target_physical_resource_ids, + vnf_additional_params, master_resource_name, master_username, + master_password, vnf_package_path, worker_resource_name, + worker_username, worker_password, cluster_resource_name, + master_node, worker_node, vnf_instance, grant): + master_ssh_cp_name = master_node.get('nic_cp_name') + flag_master = False + flag_worker = False + fixed_master_infos = {} + if master_resource_name == master_ssh_cp_name: + (flag_worker, fixed_worker_infos, not_fixed_master_infos, + not_fixed_worker_infos) = \ + self._get_worker_ssh_ip( + heatclient, stack_id, master_resource_name, + worker_resource_name, target_physical_resource_ids) + cluster_ip = heatclient.resource_get( + stack_id, master_node.get('cluster_cp_name')).attributes.get( + 'fixed_ips')[0].get('ip_address') + else: + master_resource_list = self._get_resources_list( + heatclient, stack_id, master_resource_name) + flag_master, fixed_master_infos, not_fixed_master_infos = \ + self._get_master_node_name( + heatclient, master_resource_list, + target_physical_resource_ids, master_node) + + # check pod_affinity flag + if grant: + self.SET_ZONE_ID_FLAG = True + self._check_pod_affinity(heatclient, stack_id, worker_node) + worker_resource_list = self._get_resources_list( + heatclient, stack_id, worker_resource_name) + flag_worker, fixed_worker_infos, not_fixed_worker_infos = \ + self._get_worker_node_name( + heatclient, worker_resource_list, + target_physical_resource_ids, + worker_node, vnf_instance, grant) + if len(master_resource_list) > 1: + cluster_resource = heatclient.resource_get( + stack_id, cluster_resource_name) + cluster_ip = cluster_resource.attributes.get( + 'fixed_ips')[0].get('ip_address') + else: + cluster_ip = list(not_fixed_master_infos.values())[0].get( + 'master_nic_ip') + vm_cidr_list = [] + k8s_cluster_installation_param = vnf_additional_params.get( + 'k8s_cluster_installation_param', {}) + proxy = k8s_cluster_installation_param.get('proxy', {}) + if proxy.get('k8s_node_cidr'): + cidr = proxy.get('k8s_node_cidr') + else: + cidr = list(not_fixed_master_infos.values())[0].get( + 'master_nic_ip') + '/24' + network_ips = ipaddress.ip_network(cidr, False) + for network_ip in network_ips: + vm_cidr_list.append(str(network_ip)) + master_node = k8s_cluster_installation_param.get('master_node') + script_path = k8s_cluster_installation_param.get('script_path') + pod_cidr = master_node.get('pod_cidr', '10.244.0.0/16') + cluster_cidr = master_node.get("cluster_cidr", '10.96.0.0/12') + if proxy.get("http_proxy") and proxy.get("https_proxy"): + no_proxy = ','.join(list(filter(None, [ + proxy.get("no_proxy"), pod_cidr, cluster_cidr, + "127.0.0.1", "localhost", + cluster_ip] + vm_cidr_list))) + proxy['no_proxy'] = no_proxy + not_fixed_master_ssh_ips = [ + master_ips.get('master_ssh_ip') + for master_ips in not_fixed_master_infos.values()] + commander, master_ip = self._connect_ssh_scale( + not_fixed_master_ssh_ips, + master_username, master_password) + ssh_command = "sudo kubeadm token create" + kubeadm_token = self._execute_command( + commander, ssh_command, + K8S_CMD_TIMEOUT, 'common', 3)[0].replace('\n', '') + + # get hash from one of master node + ssh_command = "sudo openssl x509 -pubkey -in " \ + "/etc/kubernetes/pki/ca.crt | openssl rsa " \ + "-pubin -outform der 2>/dev/null | " \ + "openssl dgst -sha256 -hex | sed 's/^.* //'" + ssl_ca_cert_hash = self._execute_command( + commander, ssh_command, + K8S_CMD_TIMEOUT, 'common', 3)[0].replace('\n', '') + commander.close_session() + if len(fixed_master_infos) + len(not_fixed_master_ssh_ips) == 1: + ha_flag = False + else: + ha_flag = True + + hosts_str = self._get_all_hosts( + not_fixed_master_infos, fixed_master_infos, + not_fixed_worker_infos, fixed_worker_infos) + if flag_master: + self._fix_master_node( + not_fixed_master_infos, hosts_str, + fixed_master_infos, proxy, + master_username, master_password, vnf_package_path, + script_path, cluster_ip, pod_cidr, cluster_cidr, + kubeadm_token, ssl_ca_cert_hash, ha_flag) + if flag_worker: + self._fix_worker_node( + fixed_worker_infos, + hosts_str, worker_username, worker_password, + vnf_package_path, script_path, proxy, cluster_ip, + kubeadm_token, ssl_ca_cert_hash, ha_flag) + + if self.SET_NODE_LABEL_FLAG: + for fixed_worker_name, fixed_worker in fixed_worker_infos.items(): + commander, _ = self._connect_ssh_scale( + not_fixed_master_ssh_ips, + master_username, master_password) + self._set_node_label( + commander, fixed_worker.get('worker_nic_ip'), + fixed_worker.get('host_compute'), + fixed_worker.get('zone_id')) + + def _get_all_hosts(self, not_fixed_master_infos, fixed_master_infos, + not_fixed_worker_infos, fixed_worker_infos): + master_hosts = [] + worker_hosts = [] + + not_fixed_master_nic_ips = [ + master_ips.get('master_nic_ip') + for master_ips in not_fixed_master_infos.values()] + fixed_master_nic_ips = [ + master_ips.get('master_nic_ip') + for master_ips in fixed_master_infos.values()] + not_fixed_worker_nic_ips = [ + worker_ips.get('worker_nic_ip') + for worker_ips in not_fixed_worker_infos.values()] + fixed_worker_nic_ips = [ + worker_ips.get('worker_nic_ip') + for worker_ips in fixed_worker_infos.values()] + + for not_fixed_master_ip in not_fixed_master_nic_ips: + master_ip_str = \ + not_fixed_master_ip + ' master' + \ + not_fixed_master_ip.split('.')[-1] + master_hosts.append(master_ip_str) + + for fixed_master_nic_ip in fixed_master_nic_ips: + master_ip_str = \ + fixed_master_nic_ip + ' master' + \ + fixed_master_nic_ip.split('.')[-1] + master_hosts.append(master_ip_str) + + for not_fixed_worker_ip in not_fixed_worker_nic_ips: + worker_ip_str = \ + not_fixed_worker_ip + ' worker' + \ + not_fixed_worker_ip.split('.')[-1] + worker_hosts.append(worker_ip_str) + + for fixed_worker_nic_ip in fixed_worker_nic_ips: + worker_ip_str = \ + fixed_worker_nic_ip + ' worker' + \ + fixed_worker_nic_ip.split('.')[-1] + worker_hosts.append(worker_ip_str) + + hosts_str = '\\n'.join(master_hosts + worker_hosts) + + return hosts_str + + def heal_end(self, context, vnf_instance, + heal_vnf_request, grant, + grant_request, **kwargs): + self._init_flag() + vnf_package_path = vnflcm_utils._get_vnf_package_path( + context, vnf_instance.vnfd_id) + vnf_additional_params = \ + vnf_instance.instantiated_vnf_info.additional_params + master_node = \ + vnf_additional_params.get( + 'k8s_cluster_installation_param', {}).get( + 'master_node', {}) + worker_node = \ + vnf_additional_params.get( + 'k8s_cluster_installation_param', {}).get( + 'worker_node', {}) + if not heal_vnf_request.vnfc_instance_id: + self.instantiate_end(context, vnf_instance, + vnf_instance.instantiated_vnf_info, + grant=grant, + grant_request=grant_request, **kwargs) + else: + stack_id = vnf_instance.instantiated_vnf_info.instance_id + master_resource_name = self._get_node_resource_name( + vnf_additional_params, master_node) + worker_resource_name = self._get_node_resource_name( + vnf_additional_params, worker_node) + cluster_resource_name = master_node.get('cluster_cp_name') + master_username, master_password = self._get_username_pwd( + heal_vnf_request, vnf_instance, 'master') + worker_username, worker_password = self._get_username_pwd( + heal_vnf_request, vnf_instance, 'worker') + vim_connection_info = self._get_vim_connection_info( + context, vnf_instance) + heatclient = hc.HeatClient(vim_connection_info.access_info) + + # get all target physical resource id + target_physical_resource_ids = \ + self._get_target_physical_resource_ids( + vnf_instance, heal_vnf_request) + + self._heal_and_join_k8s_node( + heatclient, stack_id, target_physical_resource_ids, + vnf_additional_params, master_resource_name, + master_username, master_password, vnf_package_path, + worker_resource_name, worker_username, worker_password, + cluster_resource_name, master_node, worker_node, + vnf_instance, grant) + + def change_external_connectivity_start( + self, context, vnf_instance, + change_ext_conn_request, grant, + grant_request, **kwargs): + pass + + def change_external_connectivity_end( + self, context, vnf_instance, + change_ext_conn_request, grant, + grant_request, **kwargs): + pass diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/TOSCA-Metadata/TOSCA.meta b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/TOSCA-Metadata/TOSCA.meta new file mode 100644 index 000000000..f43d78b6b --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/TOSCA-Metadata/TOSCA.meta @@ -0,0 +1,14 @@ +TOSCA-Meta-File-Version: 1.0 +Created-by: Dummy User +CSAR-Version: 1.1 +Entry-Definitions: Definitions/sample_free5gc_cnf_on_vm_top.vnfd.yaml + +Name: Scripts/install_k8s_cluster.sh +Content-Type: application/sh +Algorithm: SHA-256 +Hash: + +Name: Scripts/kubernetes_mgmt_free5gc.py +Content-Type: text/x-python +Algorithm: SHA-256 +Hash: diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/UserData/__init__.py b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/UserData/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/UserData/k8s_cluster_user_data.py b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/UserData/k8s_cluster_user_data.py new file mode 100644 index 000000000..0bca8c228 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/UserData/k8s_cluster_user_data.py @@ -0,0 +1,35 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from tacker.vnfm.lcm_user_data.abstract_user_data import AbstractUserData +import tacker.vnfm.lcm_user_data.utils as UserDataUtil + + +class KubernetesClusterUserData(AbstractUserData): + @staticmethod + def instantiate(base_hot_dict=None, + vnfd_dict=None, + inst_req_info=None, + grant_info=None): + api_param = UserDataUtil.get_diff_base_hot_param_from_api( + base_hot_dict, inst_req_info) + initial_param_dict = \ + UserDataUtil.create_initial_param_server_port_dict( + base_hot_dict) + vdu_flavor_dict = \ + UserDataUtil.create_vdu_flavor_capability_name_dict(vnfd_dict) + vdu_image_dict = UserDataUtil.create_sw_image_dict(vnfd_dict) + cpd_vl_dict = UserDataUtil.create_network_dict( + inst_req_info, initial_param_dict) + final_param_dict = UserDataUtil.create_final_param_dict( + initial_param_dict, vdu_flavor_dict, vdu_image_dict, cpd_vl_dict) + return {**final_param_dict, **api_param} diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/simple_kubernetes_param_file.json b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/simple_kubernetes_param_file.json new file mode 100644 index 000000000..9d9de8759 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_on_vm_package/simple_kubernetes_param_file.json @@ -0,0 +1,82 @@ +{ + "flavourId": "simple", + "vimConnectionInfo": [{ + "id": "3cc2c4ff-525c-48b4-94c9-29247223322f", + "vimId": "c3369b54-e376-4423-bb61-afd255900fea", + "vimType": "openstack" + }], + "additionalParams": { + "k8s_cluster_installation_param": { + "script_path": "Scripts/install_k8s_cluster.sh", + "master_node": { + "aspect_id": "master_instance", + "ssh_cp_name": "masterNode_CP1", + "nic_cp_name": "masterNode_CP1", + "username": "ubuntu", + "password": "ubuntu", + "cluster_cp_name": "masterNode_CP1" + }, + "worker_node": { + "aspect_id": "worker_instance", + "ssh_cp_name": "workerNode_CP1", + "nic_cp_name": "workerNode_CP1", + "username": "ubuntu", + "password": "ubuntu" + }, + "proxy": { + "http_proxy": "http://user:password@host:port", + "https_proxy": "http://user:password@host:port" + } + }, + "lcm-operation-user-data": "./UserData/k8s_cluster_user_data.py", + "lcm-operation-user-data-class": "KubernetesClusterUserData" + }, + "extVirtualLinks": [ + { + "id": "net0_master", + "resourceId": "1642ac54-642c-407c-9c7d-e94c55ba5d33", + "extCps": [{ + "cpdId": "masterNode_CP1", + "cpConfig": [{ + "linkPortId": "2642ac54-642c-407c-9c7d-e94c55ba5d33" + }] + }] + }, { + "id": "net0_worker", + "resourceId": "1642ac54-642c-407c-9c7d-e94c55ba5d33", + "extCps": [{ + "cpdId": "workerNode_CP1", + "cpConfig": [{ + "linkPortId": "3642ac54-642c-407c-9c7d-e94c55ba5d33" + }] + }] + }, { + "id": "br1_net_worker", + "resourceId": "bd384a35-c4bb-48f8-8bd0-197c3970d616", + "extCps": [{ + "cpdId": "workerNode_CP2", + "cpConfig": [{ + "linkPortId": "0129ab10-7ce2-6a1f-adc0-acc6e004f4a9" + }] + }] + }, { + "id": "br2_net_worker", + "resourceId": "506966a9-7fc2-4865-993c-e0e167b65ade", + "extCps": [{ + "cpdId": "workerNode_CP3", + "cpConfig": [{ + "linkPortId": "3233274b-e78e-98f6-8540-220963784207" + }] + }] + }, { + "id": "br3_net_worker", + "resourceId": "b8d6848b-0d2e-45ec-ae08-357ab3c0a0de", + "extCps": [{ + "cpdId": "workerNode_CP4", + "cpConfig": [{ + "linkPortId": "9e5f5b25-e397-6983-3fda-eed56a348f49" + }] + }] + } + ] +} diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Definitions/etsi_nfv_sol001_common_types.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Definitions/etsi_nfv_sol001_common_types.yaml new file mode 100644 index 000000000..15ab39b13 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Definitions/etsi_nfv_sol001_common_types.yaml @@ -0,0 +1,202 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 +description: ETSI NFV SOL 001 common types definitions version 2.6.1 +metadata: + template_name: etsi_nfv_sol001_common_types + template_author: ETSI_NFV + template_version: 2.6.1 + +data_types: + tosca.datatypes.nfv.L2AddressData: + derived_from: tosca.datatypes.Root + description: Describes the information on the MAC addresses to be assigned to a connection point. + properties: + mac_address_assignment: + type: boolean + description: Specifies if the address assignment is the responsibility of management and orchestration function or not. If it is set to True, it is the management and orchestration function responsibility + required: true + + tosca.datatypes.nfv.L3AddressData: + derived_from: tosca.datatypes.Root + description: Provides information about Layer 3 level addressing scheme and parameters applicable to a CP + properties: + ip_address_assignment: + type: boolean + description: Specifies if the address assignment is the responsibility of management and orchestration function or not. If it is set to True, it is the management and orchestration function responsibility + required: true + floating_ip_activated: + type: boolean + description: Specifies if the floating IP scheme is activated on the Connection Point or not + required: true + ip_address_type: + type: string + description: Defines address type. The address type should be aligned with the address type supported by the layer_protocols properties of the parent VnfExtCp + required: false + constraints: + - valid_values: [ ipv4, ipv6 ] + number_of_ip_address: + type: integer + description: Minimum number of IP addresses to be assigned + required: false + constraints: + - greater_than: 0 + + tosca.datatypes.nfv.AddressData: + derived_from: tosca.datatypes.Root + description: Describes information about the addressing scheme and parameters applicable to a CP + properties: + address_type: + type: string + description: Describes the type of the address to be assigned to a connection point. The content type shall be aligned with the address type supported by the layerProtocol property of the connection point + required: true + constraints: + - valid_values: [ mac_address, ip_address ] + l2_address_data: + type: tosca.datatypes.nfv.L2AddressData + description: Provides the information on the MAC addresses to be assigned to a connection point. + required: false + l3_address_data: + type: tosca.datatypes.nfv.L3AddressData + description: Provides the information on the IP addresses to be assigned to a connection point + required: false + + tosca.datatypes.nfv.ConnectivityType: + derived_from: tosca.datatypes.Root + description: describes additional connectivity information of a virtualLink + properties: + layer_protocols: + type: list + description: Identifies the protocol a virtualLink gives access to (ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire).The top layer protocol of the virtualLink protocol stack shall always be provided. The lower layer protocols may be included when there are specific requirements on these layers. + required: true + entry_schema: + type: string + constraints: + - valid_values: [ ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire ] + flow_pattern: + type: string + description: Identifies the flow pattern of the connectivity + required: false + constraints: + - valid_values: [ line, tree, mesh ] + + tosca.datatypes.nfv.LinkBitrateRequirements: + derived_from: tosca.datatypes.Root + description: describes the requirements in terms of bitrate for a virtual link + properties: + root: + type: integer # in bits per second + description: Specifies the throughput requirement in bits per second of the link (e.g. bitrate of E-Line, root bitrate of E-Tree, aggregate capacity of E-LAN). + required: true + constraints: + - greater_or_equal: 0 + leaf: + type: integer # in bits per second + description: Specifies the throughput requirement in bits per second of leaf connections to the link when applicable to the connectivity type (e.g. for E-Tree and E LAN branches). + required: false + constraints: + - greater_or_equal: 0 + + tosca.datatypes.nfv.CpProtocolData: + derived_from: tosca.datatypes.Root + description: Describes and associates the protocol layer that a CP uses together with other protocol and connection point information + properties: + associated_layer_protocol: + type: string + required: true + description: One of the values of the property layer_protocols of the CP + constraints: + - valid_values: [ ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire ] + address_data: + type: list + description: Provides information on the addresses to be assigned to the CP + entry_schema: + type: tosca.datatypes.nfv.AddressData + required: false + + tosca.datatypes.nfv.VnfProfile: + derived_from: tosca.datatypes.Root + description: describes a profile for instantiating VNFs of a particular NS DF according to a specific VNFD and VNF DF. + properties: + instantiation_level: + type: string + description: Identifier of the instantiation level of the VNF DF to be used for instantiation. If not present, the default instantiation level as declared in the VNFD shall be used. + required: false + min_number_of_instances: + type: integer + description: Minimum number of instances of the VNF based on this VNFD that is permitted to exist for this VnfProfile. + required: true + constraints: + - greater_or_equal: 0 + max_number_of_instances: + type: integer + description: Maximum number of instances of the VNF based on this VNFD that is permitted to exist for this VnfProfile. + required: true + constraints: + - greater_or_equal: 0 + + tosca.datatypes.nfv.Qos: + derived_from: tosca.datatypes.Root + description: describes QoS data for a given VL used in a VNF deployment flavour + properties: + latency: + type: scalar-unit.time #Number + description: Specifies the maximum latency + required: true + constraints: + - greater_than: 0 s + packet_delay_variation: + type: scalar-unit.time #Number + description: Specifies the maximum jitter + required: true + constraints: + - greater_or_equal: 0 s + packet_loss_ratio: + type: float + description: Specifies the maximum packet loss ratio + required: false + constraints: + - in_range: [ 0.0, 1.0 ] + +capability_types: + tosca.capabilities.nfv.VirtualLinkable: + derived_from: tosca.capabilities.Node + description: A node type that includes the VirtualLinkable capability indicates that it can be pointed by tosca.relationships.nfv.VirtualLinksTo relationship type + +relationship_types: + tosca.relationships.nfv.VirtualLinksTo: + derived_from: tosca.relationships.DependsOn + description: Represents an association relationship between the VduCp and VnfVirtualLink node types + valid_target_types: [ tosca.capabilities.nfv.VirtualLinkable ] + +node_types: + tosca.nodes.nfv.Cp: + derived_from: tosca.nodes.Root + description: Provides information regarding the purpose of the connection point + properties: + layer_protocols: + type: list + description: Identifies which protocol the connection point uses for connectivity purposes + required: true + entry_schema: + type: string + constraints: + - valid_values: [ ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire ] + role: #Name in ETSI NFV IFA011 v0.7.3: cpRole + type: string + description: Identifies the role of the port in the context of the traffic flow patterns in the VNF or parent NS + required: false + constraints: + - valid_values: [ root, leaf ] + description: + type: string + description: Provides human-readable information on the purpose of the connection point + required: false + protocol: + type: list + description: Provides information on the addresses to be assigned to the connection point(s) instantiated from this Connection Point Descriptor + required: false + entry_schema: + type: tosca.datatypes.nfv.CpProtocolData + trunk_mode: + type: boolean + description: Provides information about whether the CP instantiated from this Cp is in Trunk mode (802.1Q or other), When operating in "trunk mode", the Cp is capable of carrying traffic for several VLANs. Absence of this property implies that trunkMode is not configured for the Cp i.e. It is equivalent to boolean value "false". + required: false diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Definitions/etsi_nfv_sol001_vnfd_types.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Definitions/etsi_nfv_sol001_vnfd_types.yaml new file mode 100644 index 000000000..23cdcc7ff --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Definitions/etsi_nfv_sol001_vnfd_types.yaml @@ -0,0 +1,1465 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 +description: ETSI NFV SOL 001 vnfd types definitions version 2.6.1 +metadata: + template_name: etsi_nfv_sol001_vnfd_types + template_author: ETSI_NFV + template_version: 2.6.1 + +imports: + - ./etsi_nfv_sol001_common_types.yaml + +data_types: + tosca.datatypes.nfv.VirtualNetworkInterfaceRequirements: + derived_from: tosca.datatypes.Root + description: Describes requirements on a virtual network interface + properties: + name: + type: string + description: Provides a human readable name for the requirement. + required: false + description: + type: string + description: Provides a human readable description of the requirement. + required: false + support_mandatory: + type: boolean + description: Indicates whether fulfilling the constraint is mandatory (TRUE) for successful operation or desirable (FALSE). + required: true + network_interface_requirements: + type: map + description: The network interface requirements. A map of strings that contain a set of key-value pairs that describes the hardware platform specific network interface deployment requirements. + required: true + entry_schema: + type: string + nic_io_requirements: + type: tosca.datatypes.nfv.LogicalNodeData + description: references (couples) the CP with any logical node I/O requirements (for network devices) that may have been created. Linking these attributes is necessary so that so that I/O requirements that need to be articulated at the logical node level can be associated with the network interface requirements associated with the CP. + required: false + + tosca.datatypes.nfv.RequestedAdditionalCapability: + derived_from: tosca.datatypes.Root + description: describes requested additional capability for a particular VDU + properties: + requested_additional_capability_name: + type: string + description: Identifies a requested additional capability for the VDU. + required: true + support_mandatory: + type: boolean + description: Indicates whether the requested additional capability is mandatory for successful operation. + required: true + min_requested_additional_capability_version: + type: string + description: Identifies the minimum version of the requested additional capability. + required: false + preferred_requested_additional_capability_version: + type: string + description: Identifies the preferred version of the requested additional capability. + required: false + target_performance_parameters: + type: map + description: Identifies specific attributes, dependent on the requested additional capability type. + required: true + entry_schema: + type: string + + tosca.datatypes.nfv.VirtualMemory: + derived_from: tosca.datatypes.Root + description: supports the specification of requirements related to virtual memory of a virtual compute resource + properties: + virtual_mem_size: + type: scalar-unit.size + description: Amount of virtual memory. + required: true + virtual_mem_oversubscription_policy: + type: string + description: The memory core oversubscription policy in terms of virtual memory to physical memory on the platform. + required: false + vdu_mem_requirements: + type: map + description: The hardware platform specific VDU memory requirements. A map of strings that contains a set of key-value pairs that describes hardware platform specific VDU memory requirements. + required: false + entry_schema: + type: string + numa_enabled: + type: boolean + description: It specifies the memory allocation to be cognisant of the relevant process/core allocation. + required: false + default: false + + tosca.datatypes.nfv.VirtualCpu: + derived_from: tosca.datatypes.Root + description: Supports the specification of requirements related to virtual CPU(s) of a virtual compute resource + properties: + cpu_architecture: + type: string + description: CPU architecture type. Examples are x86, ARM + required: false + num_virtual_cpu: + type: integer + description: Number of virtual CPUs + required: true + constraints: + - greater_than: 0 + virtual_cpu_clock: + type: scalar-unit.frequency + description: Minimum virtual CPU clock rate + required: false + virtual_cpu_oversubscription_policy: + type: string + description: CPU core oversubscription policy e.g. the relation of virtual CPU cores to physical CPU cores/threads. + required: false + vdu_cpu_requirements: + type: map + description: The hardware platform specific VDU CPU requirements. A map of strings that contains a set of key-value pairs describing VDU CPU specific hardware platform requirements. + required: false + entry_schema: + type: string + virtual_cpu_pinning: + type: tosca.datatypes.nfv.VirtualCpuPinning + description: The virtual CPU pinning configuration for the virtualised compute resource. + required: false + + tosca.datatypes.nfv.VirtualCpuPinning: + derived_from: tosca.datatypes.Root + description: Supports the specification of requirements related to the virtual CPU pinning configuration of a virtual compute resource + properties: + virtual_cpu_pinning_policy: + type: string + description: 'Indicates the policy for CPU pinning. The policy can take values of "static" or "dynamic". In case of "dynamic" the allocation of virtual CPU cores to logical CPU cores is decided by the VIM. (e.g.: SMT (Simultaneous Multi-Threading) requirements). In case of "static" the allocation is requested to be according to the virtual_cpu_pinning_rule.' + required: false + constraints: + - valid_values: [ static, dynamic ] + virtual_cpu_pinning_rule: + type: list + description: Provides the list of rules for allocating virtual CPU cores to logical CPU cores/threads + required: false + entry_schema: + type: string + + tosca.datatypes.nfv.VnfcConfigurableProperties: + derived_from: tosca.datatypes.Root + description: Defines the configurable properties of a VNFC + # properties: + # additional_vnfc_configurable_properties: + # type: tosca.datatypes.nfv.VnfcAdditionalConfigurableProperties + # description: Describes additional configuration for VNFC that + # can be modified using the ModifyVnfInfo operation + # required: false + # derived types are expected to introduce + # additional_vnfc_configurable_properties with its type derived from + # tosca.datatypes.nfv.VnfcAdditionalConfigurableProperties + + tosca.datatypes.nfv.VnfcAdditionalConfigurableProperties: + derived_from: tosca.datatypes.Root + description: VnfcAdditionalConfigurableProperties type is an empty base type for deriving data types for describing additional configurable properties for a given VNFC. + + tosca.datatypes.nfv.VduProfile: + derived_from: tosca.datatypes.Root + description: describes additional instantiation data for a given Vdu.Compute used in a specific deployment flavour. + properties: + min_number_of_instances: + type: integer + description: Minimum number of instances of the VNFC based on this Vdu.Compute that is permitted to exist for a particular VNF deployment flavour. + required: true + constraints: + - greater_or_equal: 0 + max_number_of_instances: + type: integer + description: Maximum number of instances of the VNFC based on this Vdu.Compute that is permitted to exist for a particular VNF deployment flavour. + required: true + constraints: + - greater_or_equal: 0 + + tosca.datatypes.nfv.VlProfile: + derived_from: tosca.datatypes.Root + description: Describes additional instantiation data for a given VL used in a specific VNF deployment flavour. + properties: + max_bitrate_requirements: + type: tosca.datatypes.nfv.LinkBitrateRequirements + description: Specifies the maximum bitrate requirements for a VL instantiated according to this profile. + required: true + min_bitrate_requirements: + type: tosca.datatypes.nfv.LinkBitrateRequirements + description: Specifies the minimum bitrate requirements for a VL instantiated according to this profile. + required: true + qos: + type: tosca.datatypes.nfv.Qos + description: Specifies the QoS requirements of a VL instantiated according to this profile. + required: false + virtual_link_protocol_data: + type: list + description: Specifies the protocol data for a virtual link. + required: false + entry_schema: + type: tosca.datatypes.nfv.VirtualLinkProtocolData + + tosca.datatypes.nfv.VirtualLinkProtocolData: + derived_from: tosca.datatypes.Root + description: describes one protocol layer and associated protocol data for a given virtual link used in a specific VNF deployment flavour + properties: + associated_layer_protocol: + type: string + description: Identifies one of the protocols a virtualLink gives access to (ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire) as specified by the connectivity_type property. + required: true + constraints: + - valid_values: [ ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire ] + l2_protocol_data: + type: tosca.datatypes.nfv.L2ProtocolData + description: Specifies the L2 protocol data for a virtual link. Shall be present when the associatedLayerProtocol attribute indicates a L2 protocol and shall be absent otherwise. + required: false + l3_protocol_data: + type: tosca.datatypes.nfv.L3ProtocolData + description: Specifies the L3 protocol data for this virtual link. Shall be present when the associatedLayerProtocol attribute indicates a L3 protocol and shall be absent otherwise. + required: false + + tosca.datatypes.nfv.L2ProtocolData: + derived_from: tosca.datatypes.Root + description: describes L2 protocol data for a given virtual link used in a specific VNF deployment flavour. + properties: + name: + type: string + description: Identifies the network name associated with this L2 protocol. + required: false + network_type: + type: string + description: Specifies the network type for this L2 protocol.The value may be overridden at run-time. + required: false + constraints: + - valid_values: [ flat, vlan, vxlan, gre ] + vlan_transparent: + type: boolean + description: Specifies whether to support VLAN transparency for this L2 protocol or not. + required: false + default: false + mtu: + type: integer + description: Specifies the maximum transmission unit (MTU) value for this L2 protocol. + required: false + constraints: + - greater_than: 0 + + tosca.datatypes.nfv.L3ProtocolData: + derived_from: tosca.datatypes.Root + description: describes L3 protocol data for a given virtual link used in a specific VNF deployment flavour. + properties: + name: + type: string + description: Identifies the network name associated with this L3 protocol. + required: false + ip_version: + type: string + description: Specifies IP version of this L3 protocol.The value of the ip_version property shall be consistent with the value of the layer_protocol in the connectivity_type property of the virtual link node. + required: true + constraints: + - valid_values: [ ipv4, ipv6 ] + cidr: + type: string + description: Specifies the CIDR (Classless Inter-Domain Routing) of this L3 protocol. The value may be overridden at run-time. + required: true + ip_allocation_pools: + type: list + description: Specifies the allocation pools with start and end IP addresses for this L3 protocol. The value may be overridden at run-time. + required: false + entry_schema: + type: tosca.datatypes.nfv.IpAllocationPool + gateway_ip: + type: string + description: Specifies the gateway IP address for this L3 protocol. The value may be overridden at run-time. + required: false + dhcp_enabled: + type: boolean + description: Indicates whether DHCP (Dynamic Host Configuration Protocol) is enabled or disabled for this L3 protocol. The value may be overridden at run-time. + required: false + ipv6_address_mode: + type: string + description: Specifies IPv6 address mode. May be present when the value of the ipVersion attribute is "ipv6" and shall be absent otherwise. The value may be overridden at run-time. + required: false + constraints: + - valid_values: [ slaac, dhcpv6-stateful, dhcpv6-stateless ] + + tosca.datatypes.nfv.IpAllocationPool: + derived_from: tosca.datatypes.Root + description: Specifies a range of IP addresses + properties: + start_ip_address: + type: string + description: The IP address to be used as the first one in a pool of addresses derived from the cidr block full IP range + required: true + end_ip_address: + type: string + description: The IP address to be used as the last one in a pool of addresses derived from the cidr block full IP range + required: true + + tosca.datatypes.nfv.InstantiationLevel: + derived_from: tosca.datatypes.Root + description: Describes the scale level for each aspect that corresponds to a given level of resources to be instantiated within a deployment flavour in term of the number VNFC instances + properties: + description: + type: string + description: Human readable description of the level + required: true + scale_info: + type: map # key: aspectId + description: Represents for each aspect the scale level that corresponds to this instantiation level. scale_info shall be present if the VNF supports scaling. + required: false + entry_schema: + type: tosca.datatypes.nfv.ScaleInfo + + tosca.datatypes.nfv.VduLevel: + derived_from: tosca.datatypes.Root + description: Indicates for a given Vdu.Compute in a given level the number of instances to deploy + properties: + number_of_instances: + type: integer + description: Number of instances of VNFC based on this VDU to deploy for this level. + required: true + constraints: + - greater_or_equal: 0 + + tosca.datatypes.nfv.VnfLcmOperationsConfiguration: + derived_from: tosca.datatypes.Root + description: Represents information to configure lifecycle management operations + properties: + instantiate: + type: tosca.datatypes.nfv.VnfInstantiateOperationConfiguration + description: Configuration parameters for the InstantiateVnf operation + required: false + scale: + type: tosca.datatypes.nfv.VnfScaleOperationConfiguration + description: Configuration parameters for the ScaleVnf operation + required: false + scale_to_level: + type: tosca.datatypes.nfv.VnfScaleToLevelOperationConfiguration + description: Configuration parameters for the ScaleVnfToLevel operation + required: false + change_flavour: + type: tosca.datatypes.nfv.VnfChangeFlavourOperationConfiguration + description: Configuration parameters for the changeVnfFlavourOpConfig operation + required: false + heal: + type: tosca.datatypes.nfv.VnfHealOperationConfiguration + description: Configuration parameters for the HealVnf operation + required: false + terminate: + type: tosca.datatypes.nfv.VnfTerminateOperationConfiguration + description: Configuration parameters for the TerminateVnf operation + required: false + operate: + type: tosca.datatypes.nfv.VnfOperateOperationConfiguration + description: Configuration parameters for the OperateVnf operation + required: false + change_ext_connectivity: + type: tosca.datatypes.nfv.VnfChangeExtConnectivityOperationConfiguration + description: Configuration parameters for the changeExtVnfConnectivityOpConfig operation + required: false + + tosca.datatypes.nfv.VnfInstantiateOperationConfiguration: + derived_from: tosca.datatypes.Root + description: represents information that affect the invocation of the InstantiateVnf operation. + + tosca.datatypes.nfv.VnfScaleOperationConfiguration: + derived_from: tosca.datatypes.Root + description: Represents information that affect the invocation of the ScaleVnf operation + properties: + scaling_by_more_than_one_step_supported: + type: boolean + description: Signals whether passing a value larger than one in the numScalingSteps parameter of the ScaleVnf operation is supported by this VNF. + required: false + default: false + + tosca.datatypes.nfv.VnfScaleToLevelOperationConfiguration: + derived_from: tosca.datatypes.Root + description: represents information that affect the invocation of the ScaleVnfToLevel operation + properties: + arbitrary_target_levels_supported: + type: boolean + description: Signals whether scaling according to the parameter "scaleInfo" is supported by this VNF + required: true + + tosca.datatypes.nfv.VnfHealOperationConfiguration: + derived_from: tosca.datatypes.Root + description: represents information that affect the invocation of the HealVnf operation + properties: + causes: + type: list + description: Supported "cause" parameter values + required: false + entry_schema: + type: string + + tosca.datatypes.nfv.VnfTerminateOperationConfiguration: + derived_from: tosca.datatypes.Root + description: represents information that affect the invocation of the TerminateVnf + properties: + min_graceful_termination_timeout: + type: scalar-unit.time + description: Minimum timeout value for graceful termination of a VNF instance + required: true + max_recommended_graceful_termination_timeout: + type: scalar-unit.time + description: Maximum recommended timeout value that can be needed to gracefully terminate a VNF instance of a particular type under certain conditions, such as maximum load condition. This is provided by VNF provider as information for the operator facilitating the selection of optimal timeout value. This value is not used as constraint + required: false + + tosca.datatypes.nfv.VnfOperateOperationConfiguration: + derived_from: tosca.datatypes.Root + description: represents information that affect the invocation of the OperateVnf operation + properties: + min_graceful_stop_timeout: + type: scalar-unit.time + description: Minimum timeout value for graceful stop of a VNF instance + required: true + max_recommended_graceful_stop_timeout: + type: scalar-unit.time + description: Maximum recommended timeout value that can be needed to gracefully stop a VNF instance of a particular type under certain conditions, such as maximum load condition. This is provided by VNF provider as information for the operator facilitating the selection of optimal timeout value. This value is not used as constraint + required: false + + tosca.datatypes.nfv.ScaleInfo: + derived_from: tosca.datatypes.Root + description: Indicates for a given scaleAspect the corresponding scaleLevel + properties: + scale_level: + type: integer + description: The scale level for a particular aspect + required: true + constraints: + - greater_or_equal: 0 + + tosca.datatypes.nfv.ScalingAspect: + derived_from: tosca.datatypes.Root + properties: + name: + type: string + required: true + description: + type: string + required: true + max_scale_level: + type: integer # positiveInteger + required: true + constraints: + - greater_or_equal: 0 + step_deltas: + type: list + required: false + entry_schema: + type: string # Identifier + + tosca.datatypes.nfv.VnfConfigurableProperties: + derived_from: tosca.datatypes.Root + description: indicates configuration properties for a given VNF (e.g. related to auto scaling and auto healing). + properties: + is_autoscale_enabled: + type: boolean + description: It permits to enable (TRUE)/disable (FALSE) the auto-scaling functionality. If the properties is not present for configuring, then VNF property is not supported + required: false + is_autoheal_enabled: + type: boolean + description: It permits to enable (TRUE)/disable (FALSE) the auto-healing functionality. If the properties is not present for configuring, then VNF property is not supported + required: false + # additional_configurable_properties: + # description: It provides VNF specific configurable properties that + # can be modified using the ModifyVnfInfo operation + # required: false + # type: tosca.datatypes.nfv.VnfAdditionalConfigurableProperties + # derived types are expected to introduce + # additional_configurable_properties with its type derived from + # tosca.datatypes.nfv.VnfAdditionalConfigurableProperties + + tosca.datatypes.nfv.VnfAdditionalConfigurableProperties: + derived_from: tosca.datatypes.Root + description: is an empty base type for deriving data types for describing additional configurable properties for a given VNF + + tosca.datatypes.nfv.VnfInfoModifiableAttributes: + derived_from: tosca.datatypes.Root + description: Describes VNF-specific extension and metadata for a given VNF + #properties: + #extensions: + #type: tosca.datatypes.nfv.VnfInfoModifiableAttributesExtensions + #description: "Extension" properties of VnfInfo that are writeable + #required: false + # derived types are expected to introduce + # extensions with its type derived from + # tosca.datatypes.nfv.VnfInfoModifiableAttributesExtensions + #metadata: + #type: tosca.datatypes.nfv.VnfInfoModifiableAttributesMetadata + #description: "Metadata" properties of VnfInfo that are writeable + #required: false + # derived types are expected to introduce + # metadata with its type derived from + # tosca.datatypes.nfv.VnfInfoModifiableAttributesMetadata + + tosca.datatypes.nfv.VnfInfoModifiableAttributesExtensions: + derived_from: tosca.datatypes.Root + description: is an empty base type for deriving data types for describing VNF-specific extension + + tosca.datatypes.nfv.VnfInfoModifiableAttributesMetadata: + derived_from: tosca.datatypes.Root + description: is an empty base type for deriving data types for describing VNF-specific metadata + + tosca.datatypes.nfv.LogicalNodeData: + derived_from: tosca.datatypes.Root + description: Describes compute, memory and I/O requirements associated with a particular VDU. + properties: + logical_node_requirements: + type: map + description: The logical node-level compute, memory and I/O requirements. A map of strings that contains a set of key-value pairs that describes hardware platform specific deployment requirements, including the number of CPU cores on this logical node, a memory configuration specific to a logical node or a requirement related to the association of an I/O device with the logical node. + required: false + entry_schema: + type: string + + tosca.datatypes.nfv.SwImageData: + derived_from: tosca.datatypes.Root + description: describes information related to a software image artifact + properties: # in SOL001 v0.8.0: "properties or metadata:" + name: + type: string + description: Name of this software image + required: true + version: + type: string + description: Version of this software image + required: true + checksum: + type: tosca.datatypes.nfv.ChecksumData + description: Checksum of the software image file + required: true + container_format: + type: string + description: The container format describes the container file format in which software image is provided + required: true + constraints: + - valid_values: [ aki, ami, ari, bare, docker, ova, ovf ] + disk_format: + type: string + description: The disk format of a software image is the format of the underlying disk image + required: true + constraints: + - valid_values: [ aki, ami, ari, iso, qcow2, raw, vdi, vhd, vhdx, vmdk ] + min_disk: + type: scalar-unit.size # Number + description: The minimal disk size requirement for this software image + required: true + constraints: + - greater_or_equal: 0 B + min_ram: + type: scalar-unit.size # Number + description: The minimal RAM requirement for this software image + required: false + constraints: + - greater_or_equal: 0 B + size: + type: scalar-unit.size # Number + description: The size of this software image + required: true + operating_system: + type: string + description: Identifies the operating system used in the software image + required: false + supported_virtualisation_environments: + type: list + description: Identifies the virtualisation environments (e.g. hypervisor) compatible with this software image + required: false + entry_schema: + type: string + + tosca.datatypes.nfv.VirtualBlockStorageData: + derived_from: tosca.datatypes.Root + description: VirtualBlockStorageData describes block storage requirements associated with compute resources in a particular VDU, either as a local disk or as virtual attached storage + properties: + size_of_storage: + type: scalar-unit.size + description: Size of virtualised storage resource + required: true + constraints: + - greater_or_equal: 0 B + vdu_storage_requirements: + type: map + description: The hardware platform specific storage requirements. A map of strings that contains a set of key-value pairs that represents the hardware platform specific storage deployment requirements. + required: false + entry_schema: + type: string + rdma_enabled: + type: boolean + description: Indicates if the storage support RDMA + required: false + default: false + + tosca.datatypes.nfv.VirtualObjectStorageData: + derived_from: tosca.datatypes.Root + description: VirtualObjectStorageData describes object storage requirements associated with compute resources in a particular VDU + properties: + max_size_of_storage: + type: scalar-unit.size + description: Maximum size of virtualized storage resource + required: false + constraints: + - greater_or_equal: 0 B + + tosca.datatypes.nfv.VirtualFileStorageData: + derived_from: tosca.datatypes.Root + description: VirtualFileStorageData describes file storage requirements associated with compute resources in a particular VDU + properties: + size_of_storage: + type: scalar-unit.size + description: Size of virtualized storage resource + required: true + constraints: + - greater_or_equal: 0 B + file_system_protocol: + type: string + description: The shared file system protocol (e.g. NFS, CIFS) + required: true + + tosca.datatypes.nfv.VirtualLinkBitrateLevel: + derived_from: tosca.datatypes.Root + description: Describes bitrate requirements applicable to the virtual link instantiated from a particicular VnfVirtualLink + properties: + bitrate_requirements: + type: tosca.datatypes.nfv.LinkBitrateRequirements + description: Virtual link bitrate requirements for an instantiation level or bitrate delta for a scaling step + required: true + + tosca.datatypes.nfv.VnfOperationAdditionalParameters: + derived_from: tosca.datatypes.Root + description: Is an empty base type for deriving data type for describing VNF-specific parameters to be passed when invoking lifecycle management operations + #properties: + + tosca.datatypes.nfv.VnfChangeFlavourOperationConfiguration: + derived_from: tosca.datatypes.Root + description: represents information that affect the invocation of the ChangeVnfFlavour operation + #properties: + + tosca.datatypes.nfv.VnfChangeExtConnectivityOperationConfiguration: + derived_from: tosca.datatypes.Root + description: represents information that affect the invocation of the ChangeExtVnfConnectivity operation + #properties: + + tosca.datatypes.nfv.VnfMonitoringParameter: + derived_from: tosca.datatypes.Root + description: Represents information on virtualised resource related performance metrics applicable to the VNF. + properties: + name: + type: string + description: Human readable name of the monitoring parameter + required: true + performance_metric: + type: string + description: Identifies the performance metric, according to ETSI GS NFV-IFA 027. + required: true + constraints: + - valid_values: [ v_cpu_usage_mean_vnf, v_cpu_usage_peak_vnf, v_memory_usage_mean_vnf, v_memory_usage_peak_vnf, v_disk_usage_mean_vnf, v_disk_usage_peak_vnf, byte_incoming_vnf_ext_cp, byte_outgoing_vnf_ext_cp, +packet_incoming_vnf_ext_cp, packet_outgoing_vnf_ext_cp ] + collection_period: + type: scalar-unit.time + description: Describes the periodicity at which to collect the performance information. + required: false + constraints: + - greater_than: 0 s + + tosca.datatypes.nfv.VnfcMonitoringParameter: + derived_from: tosca.datatypes.Root + description: Represents information on virtualised resource related performance metrics applicable to the VNF. + properties: + name: + type: string + description: Human readable name of the monitoring parameter + required: true + performance_metric: + type: string + description: Identifies the performance metric, according to ETSI GS NFV-IFA 027. + required: true + constraints: + - valid_values: [ v_cpu_usage_mean_vnf, v_cpu_usage_peak_vnf, v_memory_usage_mean_vnf, v_memory_usage_peak_vnf, v_disk_usage_mean_vnf, v_disk_usage_peak_vnf, byte_incoming_vnf_int_cp, byte_outgoing_vnf_int_cp, packet_incoming_vnf_int_cp, packet_outgoing_vnf_int_cp ] + collection_period: + type: scalar-unit.time + description: Describes the periodicity at which to collect the performance information. + required: false + constraints: + - greater_than: 0 s + + tosca.datatypes.nfv.VirtualLinkMonitoringParameter: + derived_from: tosca.datatypes.Root + description: Represents information on virtualised resource related performance metrics applicable to the VNF. + properties: + name: + type: string + description: Human readable name of the monitoring parameter + required: true + performance_metric: + type: string + description: Identifies a performance metric derived from those defined in ETSI GS NFV-IFA 027.The packetOutgoingVirtualLink and packetIncomingVirtualLink metrics shall be obtained by aggregation the PacketOutgoing and PacketIncoming measurements defined in clause 7.1 of GS NFV-IFA 027 of all virtual link ports attached to the virtual link to which the metrics apply. + required: true + constraints: + - valid_values: [ packet_outgoing_virtual_link, packet_incoming_virtual_link ] + collection_period: + type: scalar-unit.time + description: Describes the periodicity at which to collect the performance information. + required: false + constraints: + - greater_than: 0 s + + tosca.datatypes.nfv.InterfaceDetails: + derived_from: tosca.datatypes.Root + description: information used to access an interface exposed by a VNF + properties: + uri_components: + type: tosca.datatypes.nfv.UriComponents + description: Provides components to build a Uniform Ressource Identifier (URI) where to access the interface end point. + required: false + interface_specific_data: + type: map + description: Provides additional details that are specific to the type of interface considered. + required: false + entry_schema: + type: string + + tosca.datatypes.nfv.UriComponents: + derived_from: tosca.datatypes.Root + description: information used to build a URI that complies with IETF RFC 3986 [8]. + properties: + scheme: + type: string # shall comply with IETF RFC3986 + description: scheme component of a URI. + required: true + authority: + type: tosca.datatypes.nfv.UriAuthority + description: Authority component of a URI + required: false + path: + type: string # shall comply with IETF RFC 3986 + description: path component of a URI. + required: false + query: + type: string # shall comply with IETF RFC 3986 + description: query component of a URI. + required: false + fragment: + type: string # shall comply with IETF RFC 3986 + description: fragment component of a URI. + required: false + + tosca.datatypes.nfv.UriAuthority: + derived_from: tosca.datatypes.Root + description: information that corresponds to the authority component of a URI as specified in IETF RFC 3986 [8] + properties: + user_info: + type: string # shall comply with IETF RFC 3986 + description: user_info field of the authority component of a URI + required: false + host: + type: string # shall comply with IETF RFC 3986 + description: host field of the authority component of a URI + required: false + port: + type: string # shall comply with IETF RFC 3986 + description: port field of the authority component of a URI + required: false + + tosca.datatypes.nfv.ChecksumData: + derived_from: tosca.datatypes.Root + description: Describes information about the result of performing a checksum operation over some arbitrary data + properties: + algorithm: + type: string + description: Describes the algorithm used to obtain the checksum value + required: true + constraints: + - valid_values: [sha-224, sha-256, sha-384, sha-512 ] + hash: + type: string + description: Contains the result of applying the algorithm indicated by the algorithm property to the data to which this ChecksumData refers + required: true + +artifact_types: + tosca.artifacts.nfv.SwImage: + derived_from: tosca.artifacts.Deployment.Image + description: describes the software image which is directly loaded on the virtualisation container realizing of the VDU or is to be loaded on a virtual storage resource. + + tosca.artifacts.Implementation.nfv.Mistral: + derived_from: tosca.artifacts.Implementation + description: artifacts for Mistral workflows + mime_type: application/x-yaml + file_ext: [ yaml ] + +capability_types: + tosca.capabilities.nfv.VirtualBindable: + derived_from: tosca.capabilities.Node + description: Indicates that the node that includes it can be pointed by a tosca.relationships.nfv.VirtualBindsTo relationship type which is used to model the VduHasCpd association + + tosca.capabilities.nfv.VirtualCompute: + derived_from: tosca.capabilities.Node + description: Describes the capabilities related to virtual compute resources + properties: + logical_node: + type: map + description: Describes the Logical Node requirements + required: false + entry_schema: + type: tosca.datatypes.nfv.LogicalNodeData + requested_additional_capabilities: + type: map + description: Describes additional capability for a particular VDU + required: false + entry_schema: + type: tosca.datatypes.nfv.RequestedAdditionalCapability + compute_requirements: + type: map + required: false + entry_schema: + type: string + virtual_memory: + type: tosca.datatypes.nfv.VirtualMemory + description: Describes virtual memory of the virtualized compute + required: true + virtual_cpu: + type: tosca.datatypes.nfv.VirtualCpu + description: Describes virtual CPU(s) of the virtualized compute + required: true + virtual_local_storage: + type: list + description: A list of virtual system disks created and destroyed as part of the VM lifecycle + required: false + entry_schema: + type: tosca.datatypes.nfv.VirtualBlockStorageData + description: virtual system disk definition + + tosca.capabilities.nfv.VirtualStorage: + derived_from: tosca.capabilities.Root + description: Describes the attachment capabilities related to Vdu.Storage + +relationship_types: + tosca.relationships.nfv.VirtualBindsTo: + derived_from: tosca.relationships.DependsOn + description: Represents an association relationship between Vdu.Compute and VduCp node types + valid_target_types: [ tosca.capabilities.nfv.VirtualBindable ] + + tosca.relationships.nfv.AttachesTo: + derived_from: tosca.relationships.Root + description: Represents an association relationship between the Vdu.Compute and one of the node types, Vdu.VirtualBlockStorage, Vdu.VirtualObjectStorage or Vdu.VirtualFileStorage + valid_target_types: [ tosca.capabilities.nfv.VirtualStorage ] + +interface_types: + tosca.interfaces.nfv.Vnflcm: + derived_from: tosca.interfaces.Root + description: This interface encompasses a set of TOSCA operations corresponding to the VNF LCM operations defined in ETSI GS NFV-IFA 007 as well as to preamble and postamble procedures to the execution of the VNF LCM operations. + instantiate: + description: Invoked upon receipt of an Instantiate VNF request + # inputs: + # additional_parameters: + # type: tosca.datatypes.nfv.VnfOperationAdditionalParameters + # required: false + # derived types are expected to introduce additional_parameters with + # its type derived from + # tosca.datatypes.nfv.VnfOperationAdditionalParameters + instantiate_start: + description: Invoked before instantiate + instantiate_end: + description: Invoked after instantiate + terminate: + description: Invoked upon receipt Terminate VNF request + # inputs: + # additional_parameters: + # type: tosca.datatypes.nfv.VnfOperationAdditionalParameters + # required: false + # derived types are expected to introduce additional_parameters with + # its type derived from + # tosca.datatypes.nfv.VnfOperationAdditionalParameters + terminate_start: + description: Invoked before terminate + terminate_end: + description: Invoked after terminate + modify_information: + description: Invoked upon receipt of a Modify VNF Information request + modify_information_start: + description: Invoked before modify_information + modify_information_end: + description: Invoked after modify_information + change_flavour: + description: Invoked upon receipt of a Change VNF Flavour request + # inputs: + # additional_parameters: + # type: tosca.datatypes.nfv.VnfOperationAdditionalParameters + # required: false + # derived types are expected to introduce additional_parameters with + # its type derived from + # tosca.datatypes.nfv.VnfOperationAdditionalParameters + change_flavour_start: + description: Invoked before change_flavour + change_flavour_end: + description: Invoked after change_flavour + change_external_connectivity: + description: Invoked upon receipt of a Change External VNF Connectivity request + # inputs: + # additional_parameters: + # type: tosca.datatypes.nfv.VnfOperationAdditionalParameters + # required: false + # derived types are expected to introduce additional_parameters with + # its type derived from + # tosca.datatypes.nfv.VnfOperationAdditionalParameters + change_external_connectivity_start: + description: Invoked before change_external_connectivity + change_external_connectivity_end: + description: Invoked after change_external_connectivity + operate: + description: Invoked upon receipt of an Operate VNF request + # inputs: + # additional_parameters: + # type: tosca.datatypes.nfv.VnfOperationAdditionalParameters + # required: false + # derived types are expected to introduce additional_parameters with + # its type derived from + # tosca.datatypes.nfv.VnfOperationAdditionalParameters + operate_start: + description: Invoked before operate + operate_end: + description: Invoked after operate + heal: + description: Invoked upon receipt of a Heal VNF request + # inputs: + # additional_parameters: + # type: tosca.datatypes.nfv.VnfOperationAdditionalParameters + # required: false + # derived types are expected to introduce additional_parameters with + # its type derived from + # tosca.datatypes.nfv.VnfOperationAdditionalParameters + heal_start: + description: Invoked before heal + heal_end: + description: Invoked after heal + scale: + description: Invoked upon receipt of a Scale VNF request + # inputs: + # additional_parameters: + # type: tosca.datatypes.nfv.VnfOperationAdditionalParameters + # required: false + # derived types are expected to introduce additional_parameters with + # its type derived from + # tosca.datatypes.nfv.VnfOperationAdditionalParameters + scale_start: + description: Invoked before scale + scale_end: + description: Invoked after scale + scale_to_level: + description: Invoked upon receipt of a Scale VNF to Level request + # inputs: + # additional_parameters: + # type: tosca.datatypes.nfv.VnfOperationAdditionalParameters + # required: false + # derived types are expected to introduce additional_parameters with + # its type derived from + # tosca.datatypes.nfv.VnfOperationAdditionalParameters + scale_to_level_start: + description: Invoked before scale_to_level + scale_to_level_end: + description: Invoked after scale_to_level + +node_types: + tosca.nodes.nfv.VNF: + derived_from: tosca.nodes.Root + description: The generic abstract type from which all VNF specific abstract node types shall be derived to form, together with other node types, the TOSCA service template(s) representing the VNFD + properties: + descriptor_id: # instead of vnfd_id + type: string # GUID + description: Globally unique identifier of the VNFD + required: true + descriptor_version: # instead of vnfd_version + type: string + description: Identifies the version of the VNFD + required: true + provider: # instead of vnf_provider + type: string + description: Provider of the VNF and of the VNFD + required: true + product_name: # instead of vnf_product_name + type: string + description: Human readable name for the VNF Product + required: true + software_version: # instead of vnf_software_version + type: string + description: Software version of the VNF + required: true + product_info_name: # instead of vnf_product_info_name + type: string + description: Human readable name for the VNF Product + required: false + product_info_description: # instead of vnf_product_info_description + type: string + description: Human readable description of the VNF Product + required: false + vnfm_info: + type: list + required: true + description: Identifies VNFM(s) compatible with the VNF + entry_schema: + type: string + constraints: + - pattern: (^etsivnfm:v[0-9]?[0-9]\.[0-9]?[0-9]\.[0-9]?[0-9]$)|(^[0-9]+:[a-zA-Z0-9.-]+$) + localization_languages: + type: list + description: Information about localization languages of the VNF + required: false + entry_schema: + type: string #IETF RFC 5646 string + default_localization_language: + type: string #IETF RFC 5646 string + description: Default localization language that is instantiated if no information about selected localization language is available + required: false + #configurable_properties: + #type: tosca.datatypes.nfv.VnfConfigurableProperties + #description: Describes the configurable properties of the VNF + #required: false + # derived types are expected to introduce configurable_properties + # with its type derived from + # tosca.datatypes.nfv.VnfConfigurableProperties + #modifiable_attributes: + #type: tosca.datatypes.nfv.VnfInfoModifiableAttributes + #description: Describes the modifiable attributes of the VNF + #required: false + # derived types are expected to introduce modifiable_attributes + # with its type derived from + # tosca.datatypes.nfv.VnfInfoModifiableAttributes + lcm_operations_configuration: + type: tosca.datatypes.nfv.VnfLcmOperationsConfiguration + description: Describes the configuration parameters for the VNF LCM operations + required: false + monitoring_parameters: + type: list + entry_schema: + type: tosca.datatypes.nfv.VnfMonitoringParameter + description: Describes monitoring parameters applicable to the VNF. + required: false + flavour_id: + type: string + description: Identifier of the Deployment Flavour within the VNFD + required: true + flavour_description: + type: string + description: Human readable description of the DF + required: true + vnf_profile: + type: tosca.datatypes.nfv.VnfProfile + description: Describes a profile for instantiating VNFs of a particular NS DF according to a specific VNFD and VNF DF + required: false + requirements: + - virtual_link: + capability: tosca.capabilities.nfv.VirtualLinkable + relationship: tosca.relationships.nfv.VirtualLinksTo + occurrences: [ 0, 1 ] + # Additional requirements shall be defined in the VNF specific node type (deriving from tosca.nodes.nfv.VNF) corresponding to NS virtual links that need to connect to VnfExtCps + interfaces: + Vnflcm: + type: tosca.interfaces.nfv.Vnflcm + + tosca.nodes.nfv.VnfExtCp: + derived_from: tosca.nodes.nfv.Cp + description: Describes a logical external connection point, exposed by the VNF enabling connection with an external Virtual Link + properties: + virtual_network_interface_requirements: + type: list + description: The actual virtual NIC requirements that is been assigned when instantiating the connection point + required: false + entry_schema: + type: tosca.datatypes.nfv.VirtualNetworkInterfaceRequirements + requirements: + - external_virtual_link: + capability: tosca.capabilities.nfv.VirtualLinkable + relationship: tosca.relationships.nfv.VirtualLinksTo + - internal_virtual_link: #name in ETSI NFV IFA011 v0.7.3: intVirtualLinkDesc + capability: tosca.capabilities.nfv.VirtualLinkable + relationship: tosca.relationships.nfv.VirtualLinksTo + + tosca.nodes.nfv.Vdu.Compute: + derived_from: tosca.nodes.Root + description: Describes the virtual compute part of a VDU which is a construct supporting the description of the deployment and operational behavior of a VNFC + properties: + name: + type: string + description: Human readable name of the VDU + required: true + description: + type: string + description: Human readable description of the VDU + required: true + boot_order: + type: list # explicit index (boot index) not necessary, contrary to IFA011 + description: References a node template name from which a valid boot device is created + required: false + entry_schema: + type: string + nfvi_constraints: + type: list + description: Describes constraints on the NFVI for the VNFC instance(s) created from this VDU + required: false + entry_schema: + type: string + monitoring_parameters: + type: list + description: Describes monitoring parameters applicable to a VNFC instantiated from this VDU + required: false + entry_schema: + type: tosca.datatypes.nfv.VnfcMonitoringParameter + #configurable_properties: + #type: tosca.datatypes.nfv.VnfcConfigurableProperties + #required: false + # derived types are expected to introduce + # configurable_properties with its type derived from + # tosca.datatypes.nfv.VnfcConfigurableProperties + vdu_profile: + type: tosca.datatypes.nfv.VduProfile + description: Defines additional instantiation data for the VDU.Compute node + required: true + sw_image_data: + type: tosca.datatypes.nfv.SwImageData + description: Defines information related to a SwImage artifact used by this Vdu.Compute node + required: false # property is required when the node template has an associated artifact of type tosca.artifacts.nfv.SwImage and not required otherwise + boot_data: + type: string + description: Contains a string or a URL to a file contained in the VNF package used to customize a virtualised compute resource at boot time. The bootData may contain variable parts that are replaced by deployment specific values before being sent to the VIM. + required: false + capabilities: + virtual_compute: + type: tosca.capabilities.nfv.VirtualCompute + occurrences: [ 1, 1 ] + virtual_binding: + type: tosca.capabilities.nfv.VirtualBindable + occurrences: [ 1, UNBOUNDED ] + requirements: + - virtual_storage: + capability: tosca.capabilities.nfv.VirtualStorage + relationship: tosca.relationships.nfv.AttachesTo + occurrences: [ 0, UNBOUNDED ] + + tosca.nodes.nfv.Vdu.VirtualBlockStorage: + derived_from: tosca.nodes.Root + description: This node type describes the specifications of requirements related to virtual block storage resources + properties: + virtual_block_storage_data: + type: tosca.datatypes.nfv.VirtualBlockStorageData + description: Describes the block storage characteristics. + required: true + sw_image_data: + type: tosca.datatypes.nfv.SwImageData + description: Defines information related to a SwImage artifact used by this Vdu.Compute node. + required: false # property is required when the node template has an associated artifact of type tosca.artifacts.nfv.SwImage and not required otherwise + capabilities: + virtual_storage: + type: tosca.capabilities.nfv.VirtualStorage + description: Defines the capabilities of virtual_storage. + + tosca.nodes.nfv.Vdu.VirtualObjectStorage: + derived_from: tosca.nodes.Root + description: This node type describes the specifications of requirements related to virtual object storage resources + properties: + virtual_object_storage_data: + type: tosca.datatypes.nfv.VirtualObjectStorageData + description: Describes the object storage characteristics. + required: true + capabilities: + virtual_storage: + type: tosca.capabilities.nfv.VirtualStorage + description: Defines the capabilities of virtual_storage. + + tosca.nodes.nfv.Vdu.VirtualFileStorage: + derived_from: tosca.nodes.Root + description: This node type describes the specifications of requirements related to virtual file storage resources + properties: + virtual_file_storage_data: + type: tosca.datatypes.nfv.VirtualFileStorageData + description: Describes the file storage characteristics. + required: true + capabilities: + virtual_storage: + type: tosca.capabilities.nfv.VirtualStorage + description: Defines the capabilities of virtual_storage. + requirements: + - virtual_link: + capability: tosca.capabilities.nfv.VirtualLinkable + relationship: tosca.relationships.nfv.VirtualLinksTo + #description: Describes the requirements for linking to virtual link + + tosca.nodes.nfv.VduCp: + derived_from: tosca.nodes.nfv.Cp + description: describes network connectivity between a VNFC instance based on this VDU and an internal VL + properties: + bitrate_requirement: + type: integer # in bits per second + description: Bitrate requirement in bit per second on this connection point + required: false + constraints: + - greater_or_equal: 0 + virtual_network_interface_requirements: + type: list + description: Specifies requirements on a virtual network interface realising the CPs instantiated from this CPD + required: false + entry_schema: + type: tosca.datatypes.nfv.VirtualNetworkInterfaceRequirements + order: + type: integer + description: The order of the NIC on the compute instance (e.g.eth2) + required: false + constraints: + - greater_or_equal: 0 + vnic_type: + type: string + description: Describes the type of the virtual network interface realizing the CPs instantiated from this CPD + required: false + constraints: + - valid_values: [ normal, virtio, direct-physical ] + requirements: + - virtual_link: + capability: tosca.capabilities.nfv.VirtualLinkable + relationship: tosca.relationships.nfv.VirtualLinksTo + - virtual_binding: + capability: tosca.capabilities.nfv.VirtualBindable + relationship: tosca.relationships.nfv.VirtualBindsTo + node: tosca.nodes.nfv.Vdu.Compute + + tosca.nodes.nfv.VnfVirtualLink: + derived_from: tosca.nodes.Root + description: Describes the information about an internal VNF VL + properties: + connectivity_type: + type: tosca.datatypes.nfv.ConnectivityType + description: Specifies the protocol exposed by the VL and the flow pattern supported by the VL + required: true + description: + type: string + description: Provides human-readable information on the purpose of the VL + required: false + test_access: + type: list + description: Test access facilities available on the VL + required: false + entry_schema: + type: string + constraints: + - valid_values: [ passive_monitoring, active_loopback ] + vl_profile: + type: tosca.datatypes.nfv.VlProfile + description: Defines additional data for the VL + required: true + monitoring_parameters: + type: list + description: Describes monitoring parameters applicable to the VL + required: false + entry_schema: + type: tosca.datatypes.nfv.VirtualLinkMonitoringParameter + capabilities: + virtual_linkable: + type: tosca.capabilities.nfv.VirtualLinkable + +group_types: + tosca.groups.nfv.PlacementGroup: + derived_from: tosca.groups.Root + description: PlacementGroup is used for describing the affinity or anti-affinity relationship applicable between the virtualization containers to be created based on different VDUs, or between internal VLs to be created based on different VnfVirtualLinkDesc(s) + properties: + description: + type: string + description: Human readable description of the group + required: true + members: [ tosca.nodes.nfv.Vdu.Compute, tosca.nodes.nfv.VnfVirtualLink ] + +policy_types: + tosca.policies.nfv.InstantiationLevels: + derived_from: tosca.policies.Root + description: The InstantiationLevels type is a policy type representing all the instantiation levels of resources to be instantiated within a deployment flavour and including default instantiation level in term of the number of VNFC instances to be created as defined in ETSI GS NFV-IFA 011 [1]. + properties: + levels: + type: map # key: levelId + description: Describes the various levels of resources that can be used to instantiate the VNF using this flavour. + required: true + entry_schema: + type: tosca.datatypes.nfv.InstantiationLevel + constraints: + - min_length: 1 + default_level: + type: string # levelId + description: The default instantiation level for this flavour. + required: false # required if multiple entries in levels + + tosca.policies.nfv.VduInstantiationLevels: + derived_from: tosca.policies.Root + description: The VduInstantiationLevels type is a policy type representing all the instantiation levels of resources to be instantiated within a deployment flavour in term of the number of VNFC instances to be created from each vdu.Compute. as defined in ETSI GS NFV-IFA 011 [1] + properties: + levels: + type: map # key: levelId + description: Describes the Vdu.Compute levels of resources that can be used to instantiate the VNF using this flavour + required: true + entry_schema: + type: tosca.datatypes.nfv.VduLevel + constraints: + - min_length: 1 + targets: [ tosca.nodes.nfv.Vdu.Compute ] + + tosca.policies.nfv.VirtualLinkInstantiationLevels: + derived_from: tosca.policies.Root + description: The VirtualLinkInstantiationLevels type is a policy type representing all the instantiation levels of virtual link resources to be instantiated within a deployment flavour as defined in ETSI GS NFV-IFA 011 [1]. + properties: + levels: + type: map # key: levelId + description: Describes the virtual link levels of resources that can be used to instantiate the VNF using this flavour. + required: true + entry_schema: + type: tosca.datatypes.nfv.VirtualLinkBitrateLevel + constraints: + - min_length: 1 + targets: [ tosca.nodes.nfv.VnfVirtualLink ] + + tosca.policies.nfv.ScalingAspects: + derived_from: tosca.policies.Root + description: The ScalingAspects type is a policy type representing the scaling aspects used for horizontal scaling as defined in ETSI GS NFV-IFA 011 [1]. + properties: + aspects: + type: map # key: aspectId + description: Describe maximum scale level for total number of scaling steps that can be applied to a particular aspect + required: true + entry_schema: + type: tosca.datatypes.nfv.ScalingAspect + constraints: + - min_length: 1 + + tosca.policies.nfv.VduScalingAspectDeltas: + derived_from: tosca.policies.Root + description: The VduScalingAspectDeltas type is a policy type representing the Vdu.Compute detail of an aspect deltas used for horizontal scaling, as defined in ETSI GS NFV-IFA 011 [1]. + properties: + aspect: + type: string + description: Represents the scaling aspect to which this policy applies + required: true + deltas: + type: map # key: scalingDeltaId + description: Describes the Vdu.Compute scaling deltas to be applied for every scaling steps of a particular aspect. + required: true + entry_schema: + type: tosca.datatypes.nfv.VduLevel + constraints: + - min_length: 1 + targets: [ tosca.nodes.nfv.Vdu.Compute ] + + tosca.policies.nfv.VirtualLinkBitrateScalingAspectDeltas: + derived_from: tosca.policies.Root + description: The VirtualLinkBitrateScalingAspectDeltas type is a policy type representing the VnfVirtualLink detail of an aspect deltas used for horizontal scaling, as defined in ETSI GS NFV-IFA 011 [1]. + properties: + aspect: + type: string + description: Represents the scaling aspect to which this policy applies. + required: true + deltas: + type: map # key: scalingDeltaId + description: Describes the VnfVirtualLink scaling deltas to be applied for every scaling steps of a particular aspect. + required: true + entry_schema: + type: tosca.datatypes.nfv.VirtualLinkBitrateLevel + constraints: + - min_length: 1 + targets: [ tosca.nodes.nfv.VnfVirtualLink ] + + tosca.policies.nfv.VduInitialDelta: + derived_from: tosca.policies.Root + description: The VduInitialDelta type is a policy type representing the Vdu.Compute detail of an initial delta used for horizontal scaling, as defined in ETSI GS NFV-IFA 011 [1]. + properties: + initial_delta: + type: tosca.datatypes.nfv.VduLevel + description: Represents the initial minimum size of the VNF. + required: true + targets: [ tosca.nodes.nfv.Vdu.Compute ] + + tosca.policies.nfv.VirtualLinkBitrateInitialDelta: + derived_from: tosca.policies.Root + description: The VirtualLinkBitrateInitialDelta type is a policy type representing the VnfVirtualLink detail of an initial deltas used for horizontal scaling, as defined in ETSI GS NFV-IFA 011 [1]. + properties: + initial_delta: + type: tosca.datatypes.nfv.VirtualLinkBitrateLevel + description: Represents the initial minimum size of the VNF. + required: true + targets: [ tosca.nodes.nfv.VnfVirtualLink ] + + tosca.policies.nfv.AffinityRule: + derived_from: tosca.policies.Placement + description: The AffinityRule describes the affinity rules applicable for the defined targets + properties: + scope: + type: string + description: scope of the rule is an NFVI_node, an NFVI_PoP, etc. + required: true + constraints: + - valid_values: [ nfvi_node, zone, zone_group, nfvi_pop ] + targets: [ tosca.nodes.nfv.Vdu.Compute, tosca.nodes.nfv.VnfVirtualLink, tosca.groups.nfv.PlacementGroup ] + + tosca.policies.nfv.AntiAffinityRule: + derived_from: tosca.policies.Placement + description: The AntiAffinityRule describes the anti-affinity rules applicable for the defined targets + properties: + scope: + type: string + description: scope of the rule is an NFVI_node, an NFVI_PoP, etc. + required: true + constraints: + - valid_values: [ nfvi_node, zone, zone_group, nfvi_pop ] + targets: [ tosca.nodes.nfv.Vdu.Compute, tosca.nodes.nfv.VnfVirtualLink, tosca.groups.nfv.PlacementGroup ] + + tosca.policies.nfv.SecurityGroupRule: + derived_from: tosca.policies.Root + description: The SecurityGroupRule type is a policy type specified the matching criteria for the ingress and/or egress traffic to/from visited connection points as defined in ETSI GS NFV-IFA 011 [1]. + properties: + description: + type: string + description: Human readable description of the security group rule. + required: false + direction: + type: string + description: The direction in which the security group rule is applied. The direction of 'ingress' or 'egress' is specified against the associated CP. I.e., 'ingress' means the packets entering a CP, while 'egress' means the packets sent out of a CP. + required: false + constraints: + - valid_values: [ ingress, egress ] + default: ingress + ether_type: + type: string + description: Indicates the protocol carried over the Ethernet layer. + required: false + constraints: + - valid_values: [ ipv4, ipv6 ] + default: ipv4 + protocol: + type: string + description: Indicates the protocol carried over the IP layer. Permitted values include any protocol defined in the IANA protocol registry, e.g. TCP, UDP, ICMP, etc. + required: false + constraints: + - valid_values: [ hopopt, icmp, igmp, ggp, ipv4, st, tcp, cbt, egp, igp, bbn_rcc_mon, nvp_ii, pup, argus, emcon, xnet, chaos, udp, mux, dcn_meas, hmp, prm, xns_idp, trunk_1, trunk_2, leaf_1, leaf_2, rdp, irtp, iso_tp4, netblt, mfe_nsp, merit_inp, dccp, 3pc, idpr, xtp, ddp, idpr_cmtp, tp++, il, ipv6, sdrp, ipv6_route, ipv6_frag, idrp, rsvp, gre, dsr, bna, esp, ah, i_nlsp, swipe, narp, mobile, tlsp, skip, ipv6_icmp, ipv6_no_nxt, ipv6_opts, cftp, sat_expak, kryptolan, rvd, ippc, sat_mon, visa, ipcv, cpnx, cphb, wsn, pvp, br_sat_mon, sun_nd, wb_mon, wb_expak, iso_ip, vmtp, secure_vmtp, vines, ttp, iptm, nsfnet_igp, dgp, tcf, eigrp, ospfigp, sprite_rpc, larp, mtp, ax.25, ipip, micp, scc_sp, etherip, encap, gmtp, ifmp, pnni, pim, aris, scps, qnx, a/n, ip_comp, snp, compaq_peer, ipx_in_ip, vrrp, pgm, l2tp, ddx, iatp, stp, srp, uti, smp, sm, ptp, isis, fire, crtp, crudp, sscopmce, iplt, sps, pipe, sctp, fc, rsvp_e2e_ignore, mobility, udp_lite, mpls_in_ip, manet, hip, shim6, wesp, rohc ] + default: tcp + port_range_min: + type: integer + description: Indicates minimum port number in the range that is matched by the security group rule. If a value is provided at design-time, this value may be overridden at run-time based on other deployment requirements or constraints. + required: false + constraints: + - greater_or_equal: 0 + - less_or_equal: 65535 + default: 0 + port_range_max: + type: integer + description: Indicates maximum port number in the range that is matched by the security group rule. If a value is provided at design-time, this value may be overridden at run-time based on other deployment requirements or constraints. + required: false + constraints: + - greater_or_equal: 0 + - less_or_equal: 65535 + default: 65535 + targets: [ tosca.nodes.nfv.VduCp, tosca.nodes.nfv.VnfExtCp ] + + tosca.policies.nfv.SupportedVnfInterface: + derived_from: tosca.policies.Root + description: this policy type represents interfaces produced by a VNF, the details to access them and the applicable connection points to use to access these interfaces + properties: + interface_name: + type: string + description: Identifies an interface produced by the VNF. + required: true + constraints: + - valid_values: [ vnf_indicator, vnf_configuration ] + details: + type: tosca.datatypes.nfv.InterfaceDetails + description: Provide additional data to access the interface endpoint + required: false + targets: [ tosca.nodes.nfv.VnfExtCp, tosca.nodes.nfv.VduCp ] + + diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Definitions/free5gc_df_simple.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Definitions/free5gc_df_simple.yaml new file mode 100644 index 000000000..bd0ee76e9 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Definitions/free5gc_df_simple.yaml @@ -0,0 +1,200 @@ +description: Sample VNF +imports: +- etsi_nfv_sol001_common_types.yaml +- etsi_nfv_sol001_vnfd_types.yaml +- free5gc_types.yaml +topology_template: + inputs: + descriptor_id: + type: string + descriptor_version: + type: string + flavour_description: + type: string + flavour_id: + type: string + product_name: + type: string + provider: + type: string + software_version: + type: string + vnfm_info: + entry_schema: + type: string + type: list + node_templates: + VDU1: + properties: + description: kubernetes controller resource as VDU + name: network-controller-server-unix + vdu_profile: + max_number_of_instances: 1 + min_number_of_instances: 1 + type: tosca.nodes.nfv.Vdu.Compute + VDU2: + properties: + description: kubernetes controller resource as VDU + name: free5gc-mongodb + vdu_profile: + max_number_of_instances: 1 + min_number_of_instances: 1 + type: tosca.nodes.nfv.Vdu.Compute + VDU3: + properties: + description: kubernetes controller resource as VDU + name: free5gc-nrf-deployment + vdu_profile: + max_number_of_instances: 1 + min_number_of_instances: 1 + type: tosca.nodes.nfv.Vdu.Compute + VDU4: + properties: + description: kubernetes controller resource as VDU + name: free5gc-udr-deployment + vdu_profile: + max_number_of_instances: 1 + min_number_of_instances: 1 + type: tosca.nodes.nfv.Vdu.Compute + VDU5: + properties: + description: kubernetes controller resource as VDU + name: free5gc-upf-deployment + vdu_profile: + max_number_of_instances: 2 + min_number_of_instances: 1 + type: tosca.nodes.nfv.Vdu.Compute + VDU6: + properties: + description: kubernetes controller resource as VDU + name: free5gc-pcf-deployment + vdu_profile: + max_number_of_instances: 1 + min_number_of_instances: 1 + type: tosca.nodes.nfv.Vdu.Compute + VDU7: + properties: + description: kubernetes controller resource as VDU + name: free5gc-ausf-deployment + vdu_profile: + max_number_of_instances: 1 + min_number_of_instances: 1 + type: tosca.nodes.nfv.Vdu.Compute + VDU8: + properties: + description: kubernetes controller resource as VDU + name: free5gc-nssf-deployment + vdu_profile: + max_number_of_instances: 1 + min_number_of_instances: 1 + type: tosca.nodes.nfv.Vdu.Compute + VDU9: + properties: + description: kubernetes controller resource as VDU + name: free5gc-udm-deployment + vdu_profile: + max_number_of_instances: 1 + min_number_of_instances: 1 + type: tosca.nodes.nfv.Vdu.Compute + VDU10: + properties: + description: kubernetes controller resource as VDU + name: free5gc-amf-deployment + vdu_profile: + max_number_of_instances: 1 + min_number_of_instances: 1 + type: tosca.nodes.nfv.Vdu.Compute + VDU11: + properties: + description: kubernetes controller resource as VDU + name: free5gc-webui-deployment + vdu_profile: + max_number_of_instances: 1 + min_number_of_instances: 1 + type: tosca.nodes.nfv.Vdu.Compute + VDU12: + properties: + description: kubernetes controller resource as VDU + name: free5gc-smf-deployment + vdu_profile: + max_number_of_instances: 1 + min_number_of_instances: 1 + type: tosca.nodes.nfv.Vdu.Compute + VNF: + type: company.provider.VNF + properties: + flavour_description: A simple flavour + interfaces: + Vnflcm: + instantiate_start: + implementation: mgmt-drivers-free5gc-cnf + instantiate_end: + implementation: mgmt-drivers-free5gc-cnf + scale_end: + implementation: mgmt-drivers-free5gc-cnf + heal_end: + implementation: mgmt-drivers-free5gc-cnf + artifacts: + mgmt-drivers-free5gc-cnf: + description: Management driver for CNF Free5gc + type: tosca.artifacts.Implementation.Python + file: Scripts/free5gc_mgmt_cnf.py + policies: + - scaling_aspects: + properties: + aspects: + upf_aspect: + description: vdu5 scaling aspect + max_scale_level: 2 + name: upf_aspect + step_deltas: + - delta_1 + type: tosca.policies.nfv.ScalingAspects + - upf_initial_delta: + properties: + initial_delta: + number_of_instances: 1 + targets: + - VDU5 + type: tosca.policies.nfv.VduInitialDelta + - upf_scaling_aspect_deltas: + properties: + aspect: upf_aspect + deltas: + delta_1: + number_of_instances: 1 + targets: + - VDU5 + type: tosca.policies.nfv.VduScalingAspectDeltas + - instantiation_levels: + properties: + default_level: instantiation_level_1 + levels: + instantiation_level_1: + description: Smallest size + scale_info: + upf_aspect: + scale_level: 0 + instantiation_level_2: + description: Largest size + scale_info: + upf_aspect: + scale_level: 2 + type: tosca.policies.nfv.InstantiationLevels + - upf_instantiation_levels: + properties: + levels: + instantiation_level_1: + number_of_instances: 1 + instantiation_level_2: + number_of_instances: 2 + targets: + - VDU5 + type: tosca.policies.nfv.VduInstantiationLevels + substitution_mappings: + node_type: company.provider.VNF + properties: + flavour_id: simple + requirements: + virtual_link_external: [] +tosca_definitions_version: tosca_simple_yaml_1_2 diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Definitions/free5gc_top.vnfd.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Definitions/free5gc_top.vnfd.yaml new file mode 100644 index 000000000..9be2544c8 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Definitions/free5gc_top.vnfd.yaml @@ -0,0 +1,27 @@ +description: Sample VNF +imports: +- etsi_nfv_sol001_common_types.yaml +- etsi_nfv_sol001_vnfd_types.yaml +- free5gc_types.yaml +- free5gc_df_simple.yaml +topology_template: + inputs: + selected_flavour: + description: VNF deployment flavour selected by the consumer. It is provided + in the API + type: string + node_templates: + VNF: + properties: + descriptor_id: a8bb9888-7c63-4293-a95e-2ef102ae1d3a + descriptor_version: '1.0' + flavour_id: + get_input: selected_flavour + product_name: Sample VNF + provider: Company + software_version: '1.0' + vnfm_info: + - Tacker + requirements: null + type: company.provider.VNF +tosca_definitions_version: tosca_simple_yaml_1_2 diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Definitions/free5gc_types.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Definitions/free5gc_types.yaml new file mode 100644 index 000000000..313e30135 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Definitions/free5gc_types.yaml @@ -0,0 +1,65 @@ +description: VNF type definition +imports: +- etsi_nfv_sol001_common_types.yaml +- etsi_nfv_sol001_vnfd_types.yaml +node_types: + company.provider.VNF: + derived_from: tosca.nodes.nfv.VNF + interfaces: + Vnflcm: + type: tosca.interfaces.nfv.Vnflcm + properties: + descriptor_id: + constraints: + - valid_values: + - a8bb9888-7c63-4293-a95e-2ef102ae1d3a + default: a8bb9888-7c63-4293-a95e-2ef102ae1d3a + type: string + descriptor_version: + constraints: + - valid_values: + - '1.0' + default: '1.0' + type: string + flavour_description: + default: '' + type: string + flavour_id: + constraints: + - valid_values: + - simple + default: simple + type: string + product_name: + constraints: + - valid_values: + - Sample VNF + default: Sample VNF + type: string + provider: + constraints: + - valid_values: + - Company + default: Company + type: string + software_version: + constraints: + - valid_values: + - '1.0' + default: '1.0' + type: string + vnfm_info: + default: + - Tacker + entry_schema: + constraints: + - valid_values: + - Tacker + type: string + type: list + requirements: + - virtual_link_external: + capability: tosca.capabilities.nfv.VirtualLinkable + - virtual_link_internal: + capability: tosca.capabilities.nfv.VirtualLinkable +tosca_definitions_version: tosca_simple_yaml_1_2 diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-amf.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-amf.yaml new file mode 100644 index 000000000..4e992e8df --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-amf.yaml @@ -0,0 +1,93 @@ + +apiVersion: v1 +kind: Service +metadata: + name: ngap-svc + namespace: default +spec: + ports: + - protocol: SCTP + port: 38412 + targetPort: 38412 + selector: + app: free5gc-amf + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: free5gc-amf-deployment + namespace: default +spec: + selector: + matchLabels: + app: free5gc-amf + replicas: 1 + template: + metadata: + labels: + app: free5gc-amf + spec: + containers: + - name: myapp-container + image: eno/free5gc + imagePullPolicy: Never + command: ["/bin/sh"] + args: ["-c", "./bin/amf "] + volumeMounts: + - name: amfcfg + mountPath: /go/src/free5gc/config/amfcfg.yaml + subPath: amfcfg.yaml + ports: + - containerPort: 38412 + initContainers: + - name: init-network-client + image: sdnvortex/network-controller:v0.4.9 + command: ["/go/bin/client"] + args: ["-s=unix:///tmp/vortex.sock", "-b=br1", "-n=eth1", "-i=192.168.2.2/23", "-g=192.168.3.254"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_UUID + valueFrom: + fieldRef: + fieldPath: metadata.uid + volumeMounts: + - mountPath: /tmp/ + name: grpc-sock + - name: init-network-client-ngap + image: sdnvortex/network-controller:v0.4.9 + command: ["/go/bin/client"] + args: ["-s=unix:///tmp/vortex.sock", "-b=br2", "-n=eth2", "-i=192.168.20.20/24"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_UUID + valueFrom: + fieldRef: + fieldPath: metadata.uid + volumeMounts: + - mountPath: /tmp/ + name: grpc-sock + volumes: + - name: grpc-sock + hostPath: + path: /tmp/vortex/ + - name: amfcfg + configMap: + name: free5gc-configmap + items: + - key: amfcfg.yaml + path: amfcfg.yaml diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-ausf.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-ausf.yaml new file mode 100644 index 000000000..45c145892 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-ausf.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: free5gc-ausf-deployment + namespace: default +spec: + selector: + matchLabels: + app: free5gc-ausf + replicas: 1 + template: + metadata: + labels: + app: free5gc-ausf + spec: + containers: + - name: myapp-container + image: eno/free5gc + imagePullPolicy: Never + command: ["/bin/sh"] + args: ["-c", "./bin/ausf"] + volumeMounts: + - name: ausfcfg + mountPath: /go/src/free5gc/config/ausfcfg.yaml + subPath: ausfcfg.yaml + - name: free5gc + mountPath: /go/src/free5gc/config/free5GC.conf + subPath: free5GC.conf + initContainers: + - name: init-network-client + image: sdnvortex/network-controller:v0.4.9 + command: ["/go/bin/client"] + args: ["-s=unix:///tmp/vortex.sock", "-b=br1", "-n=eth1", "-i=192.168.2.4/23", "-g=192.168.3.254"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_UUID + valueFrom: + fieldRef: + fieldPath: metadata.uid + volumeMounts: + - mountPath: /tmp/ + name: grpc-sock + volumes: + - name: grpc-sock + hostPath: + path: /tmp/vortex/ + - name: ausfcfg + configMap: + name: free5gc-configmap + items: + - key: ausfcfg.yaml + path: ausfcfg.yaml + - name: free5gc + configMap: + name: free5gc-configmap + items: + - key: free5GC.conf + path: free5GC.conf diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-configmap.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-configmap.yaml new file mode 100644 index 000000000..642bdcecd --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-configmap.yaml @@ -0,0 +1,785 @@ +apiVersion: v1 +data: + free5GC.conf: | + db_uri: mongodb://mongodb-svc/free5GC + #all logging levels + #panic + #fatal + #error + #warn + #info + #debug + #trace + logger: + # network function + AMF: + debugLevel: trace + ReportCaller: false + SMF: + debugLevel: trace + ReportCaller: false + UDR: + debugLevel: trace + ReportCaller: false + UDM: + debugLevel: trace + ReportCaller: false + NRF: + debugLevel: trace + ReportCaller: false + PCF: + debugLevel: trace + ReportCaller: false + AUSF: + debugLevel: trace + ReportCaller: false + N3IWF: + debugLevel: trace + ReportCaller: false + # library + NAS: + debugLevel: trace + ReportCaller: false + FSM: + debugLevel: trace + ReportCaller: false + NGAP: + debugLevel: trace + ReportCaller: false + NamfComm: + debugLevel: trace + ReportCaller: false + NamfEventExposure: + debugLevel: trace + ReportCaller: false + NsmfPDUSession: + debugLevel: trace + ReportCaller: false + NudrDataRepository: + debugLevel: trace + ReportCaller: false + OpenApi: + debugLevel: trace + ReportCaller: false + Aper: + debugLevel: trace + ReportCaller: false + CommonConsumerTest: + debugLevel: trace + ReportCaller: false + # webui + WEBUI: + debugLevel: trace + ReportCaller: false + amfcfg.yaml: | + info: + version: 1.0.0 + description: AMF initial local configuration + configuration: + amfName: AMF + ngapIpList: + - 192.168.2.2 + - 192.168.20.20 + sbi: + scheme: http + registerIPv4: 192.168.2.2 + bindingIPv4: 192.168.2.2 + port: 29518 + serviceNameList: + - namf-comm + - namf-evts + - namf-mt + - namf-loc + - namf-oam + servedGuamiList: + - plmnId: + mcc: 208 + mnc: 93 + amfId: cafe00 + supportTaiList: + - plmnId: + mcc: 208 + mnc: 93 + tac: 1 + plmnSupportList: + - plmnId: + mcc: 208 + mnc: 93 + snssaiList: + - sst: 1 + sd: 010203 + - sst: 2 + sd: 112233 + supportDnnList: + - internet + nrfUri: http://192.168.2.5:8000 + security: + integrityOrder: + - NIA2 + cipheringOrder: + - NEA0 + networkName: + full: free5GC + short: free + t3502: 720 + t3512: 3600 + non3gppDeregistrationTimer: 3240 + # retransmission timer for paging message + t3513: + enable: true # true or false + expireTime: 6s # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + # retransmission timer for NAS Deregistration Request message + t3522: + enable: true # true or false + expireTime: 6s # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + # retransmission timer for NAS Registration Accept message + t3550: + enable: true # true or false + expireTime: 6s # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + # retransmission timer for NAS Authentication Request/Security Mode Command message + t3560: + enable: true # true or false + expireTime: 6s # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + # retransmission timer for NAS Notification message + t3565: + enable: true # true or false + expireTime: 6s # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + + # the kind of log output + # debugLevel: how detailed to output, value: trace, debug, info, warn, error, fatal, panic + # ReportCaller: enable the caller report or not, value: true or false + logger: + AMF: + debugLevel: debug + ReportCaller: false + NAS: + debugLevel: debug + ReportCaller: false + FSM: + debugLevel: debug + ReportCaller: false + NGAP: + debugLevel: debug + ReportCaller: false + Aper: + debugLevel: info + ReportCaller: false + PathUtil: + debugLevel: debug + ReportCaller: false + OpenApi: + debugLevel: debug + ReportCaller: false + smfcfg.yaml: | + info: + version: 1.0.0 + description: SMF initial local configuration + + configuration: + smfName: SMF + sbi: + scheme: http + registerIPv4: 192.168.2.3 + bindingIPv4: 192.168.2.3 + port: 29502 + tls: + key: free5gc/support/TLS/smf.key + pem: free5gc/support/TLS/smf.pem + serviceNameList: + - nsmf-pdusession + - nsmf-event-exposure + - nsmf-oam + snssaiInfos: # the S-NSSAI (Single Network Slice Selection Assistance Information) list supported by this AMF + - sNssai: # S-NSSAI (Single Network Slice Selection Assistance Information) + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + dnnInfos: # DNN information list + - dnn: internet # Data Network Name + dns: # the IP address of DNS + ipv4: 8.8.8.8 + ipv6: 2001:4860:4860::8888 + ueSubnet: 60.60.0.0/24 # should be CIDR type + - sNssai: # S-NSSAI (Single Network Slice Selection Assistance Information) + sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 112233 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + dnnInfos: # DNN information list + - dnn: internet # Data Network Name + dns: # the IP address of DNS + ipv4: 8.8.8.8 + ipv6: 2001:4860:4860::8888 + ueSubnet: 60.60.10.0/24 # should be CIDR type + pfcp: + addr: 192.168.2.3 + userplane_information: # list of userplane information + up_nodes: # information of userplane node (AN or UPF) + gNB1: # the name of the node + type: AN # the type of the node (AN or UPF) + UPF: # the name of the node + type: UPF # the type of the node (AN or UPF) + node_id: 192.168.2.13 # the IP/FQDN of N4 interface on this UPF (PFCP) + sNssaiUpfInfos: # S-NSSAI information list for this UPF + - sNssai: # S-NSSAI (Single Network Slice Selection Assistance Information) + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + dnnUpfInfoList: # DNN information list for this S-NSSAI + - dnn: internet + interfaces: # Interface list for this UPF + - interfaceType: N3 # the type of the interface (N3 or N9) + endpoints: # the IP address of this N3/N9 interface on this UPF + - 192.168.20.14 + networkInstance: internet # Data Network Name (DNN) + UPF2: # the name of the node + type: UPF # the type of the node (AN or UPF) + node_id: 192.168.2.15 # the IP/FQDN of N4 interface on this UPF (PFCP) + sNssaiUpfInfos: # S-NSSAI information list for this UPF + - sNssai: # S-NSSAI (Single Network Slice Selection Assistance Information) + sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 112233 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + dnnUpfInfoList: # DNN information list for this S-NSSAI + - dnn: internet + interfaces: # Interface list for this UPF + - interfaceType: N3 # the type of the interface (N3 or N9) + endpoints: # the IP address of this N3/N9 interface on this UPF + - 192.168.20.15 + networkInstance: internet # Data Network Name (DNN) + links: # the topology graph of userplane, A and B represent the two nodes of each link + - A: gNB1 + B: UPF + - A: gNB1 + B: UPF2 + nrfUri: http://192.168.2.5:8000 + ausfcfg.yaml: | + info: + version: 1.0.0 + description: AUSF initial local configuration + configuration: + sbi: + scheme: http + registerIPv4: 192.168.2.4 + bindingIPv4: 192.168.2.4 + port: 29509 + serviceNameList: + - nausf-auth + nrfUri: http://192.168.2.5:8000 + plmnSupportList: + - mcc: 208 + mnc: 93 + groupId: ausfGroup001 + logger: + AUSF: + debugLevel: trace + ReportCaller: false + PathUtil: + debugLevel: trace + ReportCaller: false + OpenApi: + debugLevel: trace + ReportCaller: false + nrfcfg.yaml: | + info: + version: 1.0.0 + description: NRF initial local configuration + configuration: + MongoDBName: "free5gc" + MongoDBUrl: "mongodb://mongodb-svc:27017" + DefaultServiceIP: "192.168.2.5" + sbi: # Service-based interface information + scheme: http # the protocol for sbi (http or https) + registerIPv4: 192.168.2.5 # IP used to serve NFs or register to another NRF + bindingIPv4: 192.168.2.5 # IP used to bind the service + port: 8000 # port used to bind the service + DefaultPlmnId: + mcc: "208" + mnc: "93" + serviceNameList: + - nnrf-nfm + - nnrf-disc + logger: + NRF: + debugLevel: info + ReportCaller: false + PathUtil: + debugLevel: info + ReportCaller: false + OpenApi: + debugLevel: info + ReportCaller: false + MongoDBLibrary: + debugLevel: info + ReportCaller: false + nssfcfg.yaml: | + info: + version: 1.0.0 + description: NSSF initial local configuration + + configuration: + nssfName: NSSF # the name of this NSSF + sbi: # Service-based interface information + scheme: http # the protocol for sbi (http or https) + registerIPv4: 192.168.2.6 # IP used to register to NRF + bindingIPv4: 192.168.2.6 # IP used to bind the service + port: 8000 # Port used to bind the service + serviceNameList: # the SBI services provided by this SMF, refer to TS 29.531 + - nnssf-nsselection # Nnssf_NSSelection service + - nnssf-nssaiavailability # Nnssf_NSSAIAvailability service + nrfUri: http://192.168.2.5:8000 # a valid URI of NRF + supportedPlmnList: # the PLMNs (Public land mobile network) list supported by this NSSF + - mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + supportedNssaiInPlmnList: # Supported S-NSSAI List for each PLMN + - plmnId: # Public Land Mobile Network ID, = + mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + supportedSnssaiList: # Supported S-NSSAIs of the PLMN + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 112233 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + nsiList: # List of available Network Slice Instance (NSI) + - snssai: # S-NSSAI of this NSI + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + nsiInformationList: # Information list of this NSI + # the NRF to be used to select the NFs/services within the selected NSI, and an optonal ID + - nrfId: http://192.168.2.5:8000/nnrf-nfm/v1/nf-instances + nsiId: 10 + - snssai: # S-NSSAI of this NSI + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + nsiInformationList: # Information list of this NSI + # the NRF to be used to select the NFs/services within the selected NSI, and an optonal ID + - nrfId: http://192.168.2.5:8000/nnrf-nfm/v1/nf-instances + nsiId: 11 + - snssai: # S-NSSAI of this NSI + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + nsiInformationList: # Information list of this NSI + # the NRF to be used to select the NFs/services within the selected NSI, and an optonal ID + - nrfId: http://192.168.2.5:8000/nnrf-nfm/v1/nf-instances + nsiId: 12 + - nrfId: http://192.168.2.5:8000/nnrf-nfm/v1/nf-instances + nsiId: 12 + - snssai: # S-NSSAI of this NSI + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + nsiInformationList: # Information list of this NSI + # the NRF to be used to select the NFs/services within the selected NSI, and an optonal ID + - nrfId: http://192.168.2.5:8000/nnrf-nfm/v1/nf-instances + nsiId: 13 + - snssai: # S-NSSAI of this NSI + sst: 2 # Slice/Service Type (uinteger, range: 0~255) + nsiInformationList: # Information list of this NSI + # the NRF to be used to select the NFs/services within the selected NSI, and an optonal ID + - nrfId: http://192.168.2.5:8000/nnrf-nfm/v1/nf-instances + nsiId: 20 + - snssai: # S-NSSAI of this NSI + sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + nsiInformationList: # Information list of this NSI + # the NRF to be used to select the NFs/services within the selected NSI, and an optonal ID + - nrfId: http://192.168.2.5:8000/nnrf-nfm/v1/nf-instances + nsiId: 21 + - snssai: # S-NSSAI of this NSI + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + nsiInformationList: # Information list of this NSI + # the NRF to be used to select the NFs/services within the selected NSI, and an optonal ID + - nrfId: http://192.168.2.5:8000/nnrf-nfm/v1/nf-instances + nsiId: 22 + - snssai: # S-NSSAI of this NSI + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 112233 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + nsiInformationList: # Information list of this NSI + # the NRF to be used to select the NFs/services within the selected NSI, and an optonal ID + - nrfId: http://192.168.2.5:8000/nnrf-nfm/v1/nf-instances + nsiId: 23 + amfSetList: # List of AMF Sets that my be assigned by this NSSF + - amfSetId: 1 # the AMF Set identifier + amfList: # Instance ID of the AMFs in this set + - ffa2e8d7-3275-49c7-8631-6af1df1d9d26 + - 0e8831c3-6286-4689-ab27-1e2161e15cb1 + - a1fba9ba-2e39-4e22-9c74-f749da571d0d + # URI of the NRF used to determine the list of candidate AMF(s) from the AMF Set + nrfAmfSet: http://192.168.2.5:8000/nnrf-nfm/v1/nf-instances + # the Nssai availability data information per TA supported by the AMF + supportedNssaiAvailabilityData: + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33456 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33457 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - amfSetId: 2 # the AMF Set identifier + # URI of the NRF used to determine the list of candidate AMF(s) from the AMF Set + nrfAmfSet: http://localhost:8084/nnrf-nfm/v1/nf-instances + # the Nssai availability data information per TA supported by the AMF + supportedNssaiAvailabilityData: + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33456 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33458 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + amfList: # List of AMFs that may be assigned by this NSSF + - nfId: 469de254-2fe5-4ca0-8381-af3f500af77c # ID of this AMF + # The NSSAI availability data information per TA supported by the AMF + supportedNssaiAvailabilityData: + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33456 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33457 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - nfId: fbe604a8-27b2-417e-bd7c-8a7be2691f8d # ID of this AMF + # The NSSAI availability data information per TA supported by the AMF + supportedNssaiAvailabilityData: + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33458 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33459 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - nfId: b9e6e2cb-5ce8-4cb6-9173-a266dd9a2f0c # ID of this AMF + # The NSSAI availability data information per TA supported by the AMF + supportedNssaiAvailabilityData: + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33456 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33458 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + taList: # List of supported tracking area and their related information of this NSSF instance + - tai: # Tracking Area Identity + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33456 # Tracking Area Code (uinteger, range: 0~16777215) + accessType: 3GPP_ACCESS # Access type of the tracking area + supportedSnssaiList: # List of supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + - tai: # Tracking Area Identity + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33457 # Tracking Area Code (uinteger, range: 0~16777215) + accessType: 3GPP_ACCESS # Access type of the tracking area + supportedSnssaiList: # List of supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33458 # Tracking Area Code (uinteger, range: 0~16777215) + accessType: 3GPP_ACCESS # Access type of the tracking area + supportedSnssaiList: # List of supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + restrictedSnssaiList: # List of restricted S-NSSAIs of the tracking area + - homePlmnId: # Home PLMN identifier + mcc: 310 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 560 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + sNssaiList: # the S-NSSAIs List + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33459 # Tracking Area Code (uinteger, range: 0~16777215) + accessType: 3GPP_ACCESS # Access type of the tracking area + supportedSnssaiList: # List of supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + restrictedSnssaiList: # List of restricted S-NSSAIs of the tracking area + - homePlmnId: # Home PLMN identifier + mcc: 310 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 560 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + sNssaiList: # the S-NSSAIs List + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + # List of mappings of S-NSSAI in the serving network and the value of the home network + mappingListFromPlmn: + - operatorName: NTT Docomo # Home PLMN name + homePlmnId: # Home PLMN identifier + mcc: 440 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 10 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + mappingOfSnssai: # List of S-NSSAIs mapping + - servingSnssai: # S-NSSAI in the serving network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + homeSnssai: # S-NSSAI in the home network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 1 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - servingSnssai: # S-NSSAI in the serving network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + homeSnssai: # S-NSSAI in the home network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - servingSnssai: # S-NSSAI in the serving network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + homeSnssai: # S-NSSAI in the home network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000004 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - servingSnssai: # S-NSSAI in the serving network + sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + homeSnssai: # S-NSSAI in the home network + sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - operatorName: AT&T Mobility # Home PLMN name + homePlmnId: # Home PLMN identifier + mcc: 310 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 560 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + mappingOfSnssai: + - servingSnssai: # S-NSSAI in the serving network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + homeSnssai: # S-NSSAI in the home network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - servingSnssai: # S-NSSAI in the serving network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + homeSnssai: # S-NSSAI in the home network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + + # the kind of log output + # debugLevel: how detailed to output, value: trace, debug, info, warn, error, fatal, panic + # ReportCaller: enable the caller report or not, value: true or false + logger: + NSSF: + debugLevel: info + ReportCaller: false + PathUtil: + debugLevel: info + ReportCaller: false + OpenApi: + debugLevel: info + ReportCaller: false + pcfcfg.yaml: | + info: + version: 1.0.0 + description: PCF initial local configuration + + configuration: + pcfName: PCF # the name of this PCF + sbi: # Service-based interface information + scheme: http # the protocol for sbi (http or https) + registerIPv4: 192.168.2.9 # IP used to register to NRF + bindingIPv4: 192.168.2.9 # IP used to bind the service + port: 8000 # port used to bind the service + timeFormat: 2019-01-02 15:04:05 # time format of this PCF + defaultBdtRefId: BdtPolicyId- # BDT Reference ID, indicating transfer policies of background data transfer. + nrfUri: http://192.168.2.5:8000 # a valid URI of NRF + serviceList: # the SBI services provided by this PCF, refer to TS 29.507 + - serviceName: npcf-am-policy-control # Npcf_AMPolicyControl service + - serviceName: npcf-smpolicycontrol # Npcf_SMPolicyControl service + suppFeat: 3fff # the features supported by Npcf_SMPolicyControl, name defined in TS 29.512 5.8-1, value defined in TS 29.571 5.2.2 + - serviceName: npcf-bdtpolicycontrol # Npcf_BDTPolicyControl service + - serviceName: npcf-policyauthorization # Npcf_PolicyAuthorization service + suppFeat: 3 # the features supported by Npcf_PolicyAuthorization, name defined in TS 29.514 5.8-1, value defined in TS 29.571 5.2.2 + - serviceName: npcf-eventexposure # Npcf_EventExposure service + - serviceName: npcf-ue-policy-control # Npcf_UEPolicyControl service + mongodb: # the mongodb connected by this PCF + name: free5gc # name of the mongodb + url: mongodb://mongodb-svc:27017 # a valid URL of the mongodb + + # the kind of log output + # debugLevel: how detailed to output, value: trace, debug, info, warn, error, fatal, panic + # ReportCaller: enable the caller report or not, value: true or false + logger: + PCF: + debugLevel: info + ReportCaller: false + PathUtil: + debugLevel: info + ReportCaller: false + OpenApi: + debugLevel: info + ReportCaller: false + udmcfg.yaml: | + info: + version: 1.0.0 + description: UDM initial local configuration + + configuration: + serviceNameList: # the SBI services provided by this UDM, refer to TS 29.503 + - nudm-sdm # Nudm_SubscriberDataManagement service + - nudm-uecm # Nudm_UEContextManagement service + - nudm-ueau # Nudm_UEAuthenticationManagement service + - nudm-ee # Nudm_EventExposureManagement service + - nudm-pp # Nudm_ParameterProvisionDataManagement service + sbi: # Service-based interface information + scheme: http # the protocol for sbi (http or https) + registerIPv4: 192.168.2.7 # IP used to register to NRF + bindingIPv4: 192.168.2.7 # IP used to bind the service + port: 8000 # Port used to bind the service + tls: # the local path of TLS key + log: free5gc/udmsslkey.log # UDM keylog + pem: free5gc/support/TLS/udm.pem # UDM TLS Certificate + key: free5gc/support/TLS/udm.key # UDM TLS Private key + nrfUri: http://192.168.2.5:8000 # a valid URI of NRF + + # test data set from TS33501-f60 Annex C.4 + keys: + udmProfileAHNPublicKey: 5a8d38864820197c3394b92613b20b91633cbd897119273bf8e4a6f4eec0a650 + udmProfileAHNPrivateKey: c53c22208b61860b06c62e5406a7b330c2b577aa5558981510d128247d38bd1d + udmProfileBHNPublicKey: 0472DA71976234CE833A6907425867B82E074D44EF907DFB4B3E21C1C2256EBCD15A7DED52FCBB097A4ED250E036C7B9C8C7004C4EEDC4F068CD7BF8D3F900E3B4 + udmProfileBHNPrivateKey: F1AB1074477EBCC7F554EA1C5FC368B1616730155E0041AC447D6301975FECDA + + # the kind of log output + # debugLevel: how detailed to output, value: trace, debug, info, warn, error, fatal, panic + # ReportCaller: enable the caller report or not, value: true or false + logger: + UDM: + debugLevel: trace + ReportCaller: false + OpenApi: + debugLevel: trace + ReportCaller: false + PathUtil: + debugLevel: trace + ReportCaller: false + udrcfg.yaml: | + info: + version: 1.0.0 + description: UDR initial local configuration + configuration: + sbi: + scheme: http + registerIPv4: 192.168.2.8 + bindingIPv4: 192.168.2.8 + port: 29504 + mongodb: + name: free5gc + url: mongodb://mongodb-svc:27017 + nrfUri: http://192.168.2.5:8000 + webuicfg.yaml: | + info: + version: 1.0.0 + description: WebUI initial local configuration + + configuration: + mongodb: + name: free5gc + url: mongodb://mongodb-svc:27017 + +kind: ConfigMap +metadata: + name: free5gc-configmap + namespace: default diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-mongodb.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-mongodb.yaml new file mode 100644 index 000000000..c9ccb1433 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-mongodb.yaml @@ -0,0 +1,64 @@ +apiVersion: v1 +kind: Service +metadata: + name: mongodb-svc + namespace: default +spec: + ports: + - port: 27017 + selector: + app: free5gc-mongodb +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: free5gc-mongodb + namespace: default +spec: + selector: + matchLabels: + app: free5gc-mongodb + template: + metadata: + labels: + app: free5gc-mongodb + spec: + containers: + - image: free5gmano/free5gc-mongodb + name: free5gc-mongodb + ports: + - containerPort: 27017 + name: mongodb + volumeMounts: + - name: mongodb-persistent-storage + mountPath: /data/db + volumes: + - name: mongodb-persistent-storage + persistentVolumeClaim: + claimName: mongodb-pv-claim +--- +kind: PersistentVolume +apiVersion: v1 +metadata: + name: mongodb-pv-volume + labels: + type: local +spec: + capacity: + storage: 1Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/mnt/free5gmongodb" +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongodb-pv-claim + namespace: default +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-nrf.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-nrf.yaml new file mode 100644 index 000000000..05296162d --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-nrf.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: free5gc-nrf-deployment + namespace: default +spec: + selector: + matchLabels: + app: free5gc-nrf + replicas: 1 + template: + metadata: + labels: + app: free5gc-nrf + spec: + containers: + - name: myapp-container + image: eno/free5gc + imagePullPolicy: Never + command: ["/bin/sh"] + args: ["-c", "./bin/nrf"] + volumeMounts: + - name: nrfcfg + mountPath: /go/src/free5gc/config/nrfcfg.yaml + subPath: nrfcfg.yaml + - name: free5gc + mountPath: /go/src/free5gc/config/free5GC.conf + subPath: free5GC.conf + initContainers: + - name: init-network-client + image: sdnvortex/network-controller:v0.4.9 + command: ["/go/bin/client"] + args: ["-s=unix:///tmp/vortex.sock", "-b=br1", "-n=eth1", "-i=192.168.2.5/23", "-g=192.168.3.254"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_UUID + valueFrom: + fieldRef: + fieldPath: metadata.uid + volumeMounts: + - mountPath: /tmp/ + name: grpc-sock + volumes: + - name: grpc-sock + hostPath: + path: /tmp/vortex/ + - name: nrfcfg + configMap: + name: free5gc-configmap + items: + - key: nrfcfg.yaml + path: nrfcfg.yaml + - name: free5gc + configMap: + name: free5gc-configmap + items: + - key: free5GC.conf + path: free5GC.conf diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-nssf.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-nssf.yaml new file mode 100644 index 000000000..4bdfd69a6 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-nssf.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: free5gc-nssf-deployment + namespace: default +spec: + selector: + matchLabels: + app: free5gc-nssf + replicas: 1 + template: + metadata: + labels: + app: free5gc-nssf + spec: + containers: + - name: myapp-container + image: eno/free5gc + imagePullPolicy: Never + command: ["/bin/sh"] + args: ["-c", "./bin/nssf"] + volumeMounts: + - name: nssfcfg + mountPath: /go/src/free5gc/config/nssfcfg.yaml + subPath: nssfcfg.yaml + - name: free5gc + mountPath: /go/src/free5gc/config/free5GC.conf + subPath: free5GC.conf + initContainers: + - name: init-network-client + image: sdnvortex/network-controller:v0.4.9 + command: ["/go/bin/client"] + args: ["-s=unix:///tmp/vortex.sock", "-b=br1", "-n=eth1", "-i=192.168.2.6/23", "-g=192.168.3.254"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_UUID + valueFrom: + fieldRef: + fieldPath: metadata.uid + volumeMounts: + - mountPath: /tmp/ + name: grpc-sock + volumes: + - name: grpc-sock + hostPath: + path: /tmp/vortex/ + - name: nssfcfg + configMap: + name: free5gc-configmap + items: + - key: nssfcfg.yaml + path: nssfcfg.yaml + - name: free5gc + configMap: + name: free5gc-configmap + items: + - key: free5GC.conf + path: free5GC.conf diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-pcf.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-pcf.yaml new file mode 100644 index 000000000..00fa2bdda --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-pcf.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: free5gc-pcf-deployment + namespace: default +spec: + selector: + matchLabels: + app: free5gc-pcf + replicas: 1 + template: + metadata: + labels: + app: free5gc-pcf + spec: + containers: + - name: myapp-container + image: eno/free5gc + imagePullPolicy: Never + command: ["/bin/sh"] + args: ["-c", "./bin/pcf"] + volumeMounts: + - name: pcfcfg + mountPath: /go/src/free5gc/config/pcfcfg.yaml + subPath: pcfcfg.yaml + - name: free5gc + mountPath: /go/src/free5gc/config/free5GC.conf + subPath: free5GC.conf + initContainers: + - name: init-network-client + image: sdnvortex/network-controller:v0.4.9 + command: ["/go/bin/client"] + args: ["-s=unix:///tmp/vortex.sock", "-b=br1", "-n=eth1", "-i=192.168.2.9/23", "-g=192.168.3.254", "--route-gw=192.168.20.0/24,192.168.3.254"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_UUID + valueFrom: + fieldRef: + fieldPath: metadata.uid + volumeMounts: + - mountPath: /tmp/ + name: grpc-sock + volumes: + - name: grpc-sock + hostPath: + path: /tmp/vortex/ + - name: pcfcfg + configMap: + name: free5gc-configmap + items: + - key: pcfcfg.yaml + path: pcfcfg.yaml + - name: free5gc + configMap: + name: free5gc-configmap + items: + - key: free5GC.conf + path: free5GC.conf diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-smf.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-smf.yaml new file mode 100644 index 000000000..0d513c39b --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-smf.yaml @@ -0,0 +1,66 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: free5gc-smf-deployment + namespace: default +spec: + selector: + matchLabels: + app: free5gc-smf + replicas: 1 + template: + metadata: + labels: + app: free5gc-smf + spec: + containers: + - name: myapp-container + image: eno/free5gc + imagePullPolicy: Never + command: ["/bin/sh"] + args: ["-c", "./bin/smf"] + volumeMounts: + - name: smfcfg + mountPath: /go/src/free5gc/config/smfcfg.yaml + subPath: smfcfg.yaml + - name: free5gc + mountPath: /go/src/free5gc/config/free5GC.conf + subPath: free5GC.conf + initContainers: + - name: init-network-client + image: sdnvortex/network-controller:v0.4.9 + command: ["/go/bin/client"] + args: ["-s=unix:///tmp/vortex.sock", "-b=br1", "-n=eth1", "-i=192.168.2.3/23", "-g=192.168.3.254", "--route-gw=192.168.20.0/24,192.168.3.254"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_UUID + valueFrom: + fieldRef: + fieldPath: metadata.uid + volumeMounts: + - mountPath: /tmp/ + name: grpc-sock + + volumes: + - name: grpc-sock + hostPath: + path: /tmp/vortex/ + - name: smfcfg + configMap: + name: free5gc-configmap + items: + - key: smfcfg.yaml + path: smfcfg.yaml + - name: free5gc + configMap: + name: free5gc-configmap + items: + - key: free5GC.conf + path: free5GC.conf diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-udm.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-udm.yaml new file mode 100644 index 000000000..480aa53fc --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-udm.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: free5gc-udm-deployment + namespace: default +spec: + selector: + matchLabels: + app: free5gc-udm + replicas: 1 + template: + metadata: + labels: + app: free5gc-udm + spec: + containers: + - name: myapp-container + image: eno/free5gc + imagePullPolicy: Never + command: ["/bin/sh"] + args: ["-c", "./bin/udm"] + volumeMounts: + - name: udmcfg + mountPath: /go/src/free5gc/config/udmcfg.yaml + subPath: udmcfg.yaml + - name: free5gc + mountPath: /go/src/free5gc/config/free5GC.conf + subPath: free5GC.conf + initContainers: + - name: init-network-client + image: sdnvortex/network-controller:v0.4.9 + command: ["/go/bin/client"] + args: ["-s=unix:///tmp/vortex.sock", "-b=br1", "-n=eth1", "-i=192.168.2.7/23", "-g=192.168.3.254"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_UUID + valueFrom: + fieldRef: + fieldPath: metadata.uid + volumeMounts: + - mountPath: /tmp/ + name: grpc-sock + volumes: + - name: grpc-sock + hostPath: + path: /tmp/vortex/ + - name: udmcfg + configMap: + name: free5gc-configmap + items: + - key: udmcfg.yaml + path: udmcfg.yaml + - name: free5gc + configMap: + name: free5gc-configmap + items: + - key: free5GC.conf + path: free5GC.conf diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-udr.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-udr.yaml new file mode 100644 index 000000000..a55cdf269 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-udr.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: free5gc-udr-deployment + namespace: default +spec: + selector: + matchLabels: + app: free5gc-udr + replicas: 1 + template: + metadata: + labels: + app: free5gc-udr + spec: + containers: + - name: myapp-container + image: eno/free5gc + imagePullPolicy: Never + command: ["/bin/sh"] + args: ["-c", "./bin/udr"] + volumeMounts: + - name: udrcfg + mountPath: /go/src/free5gc/config/udrcfg.yaml + subPath: udrcfg.yaml + - name: free5gc + mountPath: /go/src/free5gc/config/free5GC.conf + subPath: free5GC.conf + initContainers: + - name: init-network-client + image: sdnvortex/network-controller:v0.4.9 + command: ["/go/bin/client"] + args: ["-s=unix:///tmp/vortex.sock", "-b=br1", "-n=eth1", "-i=192.168.2.8/23", "-g=192.168.3.254"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_UUID + valueFrom: + fieldRef: + fieldPath: metadata.uid + volumeMounts: + - mountPath: /tmp/ + name: grpc-sock + volumes: + - name: grpc-sock + hostPath: + path: /tmp/vortex/ + - name: udrcfg + configMap: + name: free5gc-configmap + items: + - key: udrcfg.yaml + path: udrcfg.yaml + - name: free5gc + configMap: + name: free5gc-configmap + items: + - key: free5GC.conf + path: free5GC.conf diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-upf.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-upf.yaml new file mode 100644 index 000000000..18e86e0e6 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-upf.yaml @@ -0,0 +1,105 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: free5gc-upf-deployment + namespace: default +spec: + selector: + matchLabels: + app: free5gc-upf + replicas: 1 + template: + metadata: + labels: + app: free5gc-upf + spec: + containers: + - name: myapp-container + image: eno/free5gc + securityContext: + privileged: true + imagePullPolicy: Never + command: ["/bin/sh"] + args: ["-c", "ip addr add 60.60.0.100 dev lo && ip link set dev lo up && ./NFs/upf/build/bin/free5gc-upfd -f /go/src/free5gc/NFs/upf/build/config/upfcfg.yaml"] + volumeMounts: + - mountPath: /dev/net/tun + name: tun-volume + - name: free5gc + mountPath: /go/src/free5gc/config/free5GC.conf + subPath: free5GC.conf + ports: + - containerPort: 2152 + initContainers: + - name: init-network-client-dn + image: sdnvortex/network-controller:v0.4.9 + command: ["/go/bin/client"] + args: ["-s=unix:///tmp/vortex.sock", "-b=br3", "-n=eth3", "-i=192.168.52.254/24"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_UUID + valueFrom: + fieldRef: + fieldPath: metadata.uid + volumeMounts: + - mountPath: /tmp/ + name: grpc-sock + - name: init-network-client-gtp + image: sdnvortex/network-controller:v0.4.9 + command: ["/go/bin/client"] + args: ["-s=unix:///tmp/vortex.sock", "-b=br2", "-n=eth1", "-i=192.168.20.14/24", "-g=192.168.20.254"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_UUID + valueFrom: + fieldRef: + fieldPath: metadata.uid + volumeMounts: + - mountPath: /tmp/ + name: grpc-sock + - name: init-network-client + image: sdnvortex/network-controller:v0.4.9 + command: ["/go/bin/client"] + args: ["-s=unix:///tmp/vortex.sock", "-b=br1", "-n=eth2", "-i=192.168.2.13/23", "-g=192.168.2.254"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_UUID + valueFrom: + fieldRef: + fieldPath: metadata.uid + volumeMounts: + - mountPath: /tmp/ + name: grpc-sock + volumes: + - name: grpc-sock + hostPath: + path: /tmp/vortex/ + - name: tun-volume + hostPath: + path: /dev/net/tun + - name: free5gc + configMap: + name: free5gc-configmap + items: + - key: free5GC.conf + path: free5GC.conf diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-webui.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-webui.yaml new file mode 100644 index 000000000..9fbb08f6e --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/free5gc-webui.yaml @@ -0,0 +1,84 @@ + +apiVersion: v1 +kind: Service +metadata: + name: webui-svc + namespace: default +spec: + ports: + - name: http + protocol: TCP + port: 5000 + targetPort: 5000 + nodePort: 30050 + selector: + app: free5gc-webui + type: NodePort +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: free5gc-webui-deployment + namespace: default +spec: + selector: + matchLabels: + app: free5gc-webui + replicas: 1 + template: + metadata: + labels: + app: free5gc-webui + spec: + containers: + - name: myapp-container + image: eno/free5gc + imagePullPolicy: Never + command: ["/bin/sh"] + args: ["-c", "./webconsole/bin/webconsole"] + volumeMounts: + - name: webuicfg + mountPath: /go/src/free5gc/config/webuicfg.yaml + subPath: webuicfg.yaml + - name: free5gc + mountPath: /go/src/free5gc/config/free5GC.conf + subPath: free5GC.conf + ports: + - containerPort: 5000 + initContainers: + - name: init-network-client + image: sdnvortex/network-controller:v0.4.9 + command: ["/go/bin/client"] + args: ["-s=unix:///tmp/vortex.sock", "-b=br1", "-n=eth1", "-i=192.168.2.11/23", "-g=192.168.3.254"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_UUID + valueFrom: + fieldRef: + fieldPath: metadata.uid + volumeMounts: + - mountPath: /tmp/ + name: grpc-sock + volumes: + - name: grpc-sock + hostPath: + path: /tmp/vortex/ + - name: webuicfg + configMap: + name: free5gc-configmap + items: + - key: webuicfg.yaml + path: webuicfg.yaml + - name: free5gc + configMap: + name: free5gc-configmap + items: + - key: free5GC.conf + path: free5GC.conf diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/unix-daemonset.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/unix-daemonset.yaml new file mode 100644 index 000000000..d097680c1 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Files/kubernetes/unix-daemonset.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: network-controller-server-unix + namespace: kube-system +spec: + selector: + matchLabels: + name: network-controller-server-unix + template: + metadata: + labels: + name: network-controller-server-unix + spec: + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: network-controller-server-unix + image: sdnvortex/network-controller:v0.4.9 + securityContext: + privileged: true + command: ["/go/bin/server"] + args: ["-unix=/tmp/vortex.sock", "-netlink-gc"] + volumeMounts: + - mountPath: /var/run/docker/netns:shared + name: docker-ns + #mountPropagation: Bidirectional + - mountPath: /var/run/docker.sock + name: docker-sock + - mountPath: /var/run/openvswitch/db.sock + name: ovs-sock + - mountPath: /tmp/ + name: grpc-sock + volumes: + - name: docker-ns + hostPath: + path: /run/docker/netns + - name: docker-sock + hostPath: + path: /run/docker.sock + - name: ovs-sock + hostPath: + path: /run/openvswitch/db.sock + - name: grpc-sock + hostPath: + path: /tmp/vortex + hostNetwork: true diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Scripts/free5gc_mgmt_cnf.py b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Scripts/free5gc_mgmt_cnf.py new file mode 100644 index 000000000..cd168a982 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/Scripts/free5gc_mgmt_cnf.py @@ -0,0 +1,785 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import time + +import eventlet +from oslo_log import log as logging +import paramiko +import yaml + +from tacker.common import cmd_executer +from tacker.common import exceptions +from tacker.common import log +from tacker.vnflcm import utils as vnflcm_utils +from tacker.vnfm.mgmt_drivers import vnflcm_abstract_driver + +LOG = logging.getLogger(__name__) +FREE5GC_CMD_TIMEOUT = 30 + + +class Free5gcMgmtDriverCnf(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver): + def get_type(self): + return 'mgmt-drivers-free5gc-cnf' + + def get_name(self): + return 'mgmt-drivers-free5gc-cnf' + + def get_description(self): + return 'Tacker VNFMgmt Free5gc CNF Driver' + + @log.log + def instantiate_start(self, context, vnf_instance, + instantiate_vnf_request, grant, + grant_request, **kwargs): + vnf_package_path = vnflcm_utils._get_vnf_package_path( + context, vnf_instance.vnfd_id) + kubernetes_file_paths = instantiate_vnf_request.\ + additional_params.get('lcm-kubernetes-def-files') + for path in kubernetes_file_paths: + if 'configmap' in path: + configmap_path = os.path.join(vnf_package_path, path) + if 'amf' in path: + amf_path = os.path.join(vnf_package_path, path) + if 'smf' in path: + smf_path = os.path.join(vnf_package_path, path) + if 'upf' in path: + upf_path = os.path.join(vnf_package_path, path) + with open(amf_path) as f: + results = yaml.safe_load_all(f) + for result in results: + if result.get('kind') == 'Deployment': + amf_init_containers = result.get('spec').get( + 'template').get('spec').get('initContainers') + amf_ip_list = [] + for amf_init_container in amf_init_containers: + amf_ip_args = amf_init_container.get('args') + for amf_ip_arg in amf_ip_args: + if '-i=' in amf_ip_arg: + amf_ip = amf_ip_arg.replace('-i=', '') + amf_ip = amf_ip.partition('/')[0] + amf_ip_list.append(amf_ip) + with open(smf_path) as f: + results = yaml.safe_load_all(f) + for result in results: + if result.get('kind') == 'Deployment': + smf_init_containers = result.get('spec').get( + 'template').get('spec').get('initContainers') + smf_ip_list = [] + for smf_init_container in smf_init_containers: + smf_ip_args = smf_init_container.get('args') + for smf_ip_arg in smf_ip_args: + if '-i=' in smf_ip_arg: + smf_ip = smf_ip_arg.replace('-i=', '') + smf_ip = smf_ip.partition('/')[0] + smf_ip_list.append(smf_ip) + with open(upf_path) as f: + results = yaml.safe_load_all(f) + for result in results: + if result.get('kind') == 'Deployment': + upf_init_containers = result.get('spec').get( + 'template').get('spec').get('initContainers') + upf_ip_list = [] + for upf_init_container in upf_init_containers: + upf_ip_args = upf_init_container.get('args') + for upf_ip_arg in upf_ip_args: + if '-i=' in upf_ip_arg: + upf_ip = upf_ip_arg.replace('-i=', '') + upf_ip = upf_ip.partition('/')[0] + upf_ip_list.append(upf_ip) + + with open(configmap_path, encoding='utf-8') as f: + results = yaml.safe_load_all(f) + for result in results: + # check amfcfg.yaml in configmap + amf_file = result.get('data').get('amfcfg.yaml') + index_start = \ + amf_file.index('ngapIpList') + len('ngapIpList') + 1 + index_end = amf_file.index('sbi') + amf_ip_str = amf_file[index_start:index_end] + count = 0 + for amf_ip in amf_ip_list: + if amf_ip in amf_ip_str: + count = count + 1 + if count == 0: + LOG.error('The configmap of amfcfg.yaml is invalid.' + ' "ngapIpList" may be wrong.') + raise exceptions.MgmtDriverOtherError( + 'The configmap of amfcfg.yaml is invalid.' + ' "ngapIpList" may be wrong.') + + # check smfcfg.yaml in configmap + smf_file = result.get('data').get('smfcfg.yaml') + index_start = smf_file.index('pfcp') + len('pfcp') + 1 + index_end = smf_file.index('userplane_information') + smf_ip_str = smf_file[index_start:index_end] + for smf_pfcp in smf_ip_list: + if smf_pfcp in smf_ip_str: + break + else: + LOG.error('The configmap of smfcfg.yaml is invalid.' + ' "pfcp" may be wrong.') + raise exceptions.MgmtDriverOtherError( + 'The configmap of smfcfg.yaml is invalid.' + ' "pfcp" may be wrong.') + index_start2 = smf_file.index('UPF:') + len('UPF:') + 1 + index_end2 = smf_file.index('sNssaiUpfInfos') + upf_pfcp_ip_str = smf_file[index_start2:index_end2] + for upf_ip in upf_ip_list: + if upf_ip in upf_pfcp_ip_str: + break + else: + LOG.error('The configmap of smfcfg.yaml is invalid.' + ' The node_id of UPF may be wrong.') + raise exceptions.MgmtDriverOtherError( + 'The configmap of smfcfg.yaml is invalid.' + ' The node_id of UPF may be wrong.') + + def _execute_command(self, commander, ssh_command, timeout, type, retry): + eventlet.monkey_patch() + while retry >= 0: + try: + with eventlet.Timeout(timeout, True): + result = commander.execute_command( + ssh_command, input_data=None) + break + except eventlet.timeout.Timeout: + LOG.debug('It is time out, When execute command: ' + '{}.'.format(ssh_command)) + retry -= 1 + if retry < 0: + LOG.error('It is time out, When execute command: ' + '{}.'.format(ssh_command)) + raise exceptions.MgmtDriverOtherError( + error_message='It is time out, When execute command: ' + '{}.'.format(ssh_command)) + time.sleep(30) + if type == 'common': + err = result.get_stderr() + if err: + LOG.error(err) + raise exceptions.MgmtDriverRemoteCommandError(err_info=err) + return result.get_stdout() + + def _check_values(self, additional_param): + if not additional_param.get('master_node_username'): + LOG.error('The master_node_username in the ' + 'additionalParams cannot be None.') + raise exceptions.MgmtDriverNotFound( + param='master_node_username') + if not additional_param.get('master_node_password'): + LOG.error('The master_node_password in the ' + 'additionalParams cannot be None.') + raise exceptions.MgmtDriverNotFound( + param='master_node_username') + if not additional_param.get('ssh_master_node_ip'): + LOG.error('The ssh_master_node_ip in the ' + 'additionalParams cannot be None.') + raise exceptions.MgmtDriverNotFound( + param='ssh_master_node_ip') + + def _send_and_receive_file(self, host, user, password, + remote_file, local_file, operation): + connect = paramiko.Transport(host, 22) + connect.connect(username=user, password=password) + sftp = paramiko.SFTPClient.from_transport(connect) + if operation == 'receive': + sftp.get(remote_file, local_file) + else: + sftp.put(local_file, remote_file) + connect.close() + + @log.log + def instantiate_end(self, context, vnf_instance, + instantiate_vnf_request, grant, + grant_request, **kwargs): + additional_param = instantiate_vnf_request.\ + additional_params.get('free5gc', {}) + self._check_values(additional_param) + ssh_master_node_ip = additional_param.get('ssh_master_node_ip') + master_node_username = additional_param.get('master_node_username') + master_node_password = additional_param.get('master_node_password') + if not additional_param.get('upf_config_file_path'): + upf_config_file_path = \ + '/go/src/free5gc/NFs/upf/build/config/upfcfg.yaml' + if not additional_param.get('smf_config_file_path'): + smf_config_file_path = '/go/src/free5gc/config/smfcfg.yaml' + + commander = cmd_executer.RemoteCommandExecutor( + user=master_node_username, password=master_node_password, + host=ssh_master_node_ip, + timeout=30) + # get upf ip from smf + ssh_command = "kubectl get pod | grep smf | awk '{print $1}'" + smf_pod_name = self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3)[0].replace('\n', '') + ssh_command = ("kubectl cp {smf_pod_name}:{smf_config_file_path}" + " /tmp/smfcfg.yaml" + .format(smf_pod_name=smf_pod_name, + smf_config_file_path=smf_config_file_path)) + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + local_smf_path = '/tmp/smfcfg.yaml' + self._send_and_receive_file( + ssh_master_node_ip, master_node_username, master_node_password, + '/tmp/smfcfg.yaml', local_smf_path, 'receive') + upf_gtpu_list = [] + with open(local_smf_path) as f: + file_content = yaml.safe_load(f) + upf_pfcp_ip = file_content['configuration'][ + 'userplane_information']['up_nodes']['UPF']['node_id'] + upf_gtpu_interface_list = file_content['configuration'][ + 'userplane_information']['up_nodes']['UPF']['interfaces'] + for upf_gtpu_interface in upf_gtpu_interface_list: + upf_gtpu_list = (upf_gtpu_interface['endpoints'] + + upf_gtpu_list) + + # modify upf info + upf_example_file_path = \ + '/go/src/free5gc/NFs/upf/build/config/upfcfg.yaml' + ssh_command = "kubectl get pod | grep upf | awk '{print $1}'" + upf_pod_name = self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3)[0].replace('\n', '') + ssh_command = ("kubectl cp {upf_pod_name}:{upf_example_file_path}" + " /tmp/upfcfg.yaml -c myapp-container" + .format(upf_pod_name=upf_pod_name, + upf_example_file_path=upf_example_file_path)) + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + local_upf_path = '/tmp/upfcfg.yaml' + self._send_and_receive_file( + ssh_master_node_ip, master_node_username, master_node_password, + '/tmp/upfcfg.yaml', local_upf_path, 'receive') + with open(local_upf_path, 'r') as f: + upf_content = yaml.safe_load(f) + upf_content['configuration']['pfcp'][0]['addr'] = upf_pfcp_ip + for index in range(len(upf_gtpu_list)): + upf_content['configuration']['gtpu'][index]['addr'] =\ + upf_gtpu_list[index] + with open(local_upf_path, 'w') as nf: + yaml.safe_dump(upf_content, nf, default_flow_style=False) + self._send_and_receive_file( + ssh_master_node_ip, master_node_username, master_node_password, + '/tmp/upfcfg.yaml', local_upf_path, 'send') + ssh_command = ("kubectl cp /tmp/upfcfg.yaml" + " {upf_pod_name}:{upf_config_file_path}" + " -c myapp-container" + .format(upf_pod_name=upf_pod_name, + upf_config_file_path=upf_config_file_path)) + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + + # start upf process + ssh_command = ('cat </dev/null\n' + 'kubectl exec' + ' {} -i -- sh' + '<< eof\n' + 'ip link delete upfgtp\neof\n' + 'kubectl exec' + ' {} -i -- sh' + '<< eof\n' + './NFs/upf/build/bin/free5gc-upfd -f {}\neof' + '\nEOF\n' + .format(upf_pod_name, upf_pod_name, + upf_config_file_path)) + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + ssh_command = "sudo chmod 777 run_upf.sh" + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + ssh_command = "nohup ./run_upf.sh > upf.txt 2>&1 &" + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + + # restart smf + ssh_command = "kubectl get pod {} -o yaml | " \ + "kubectl replace --force -f -".format(smf_pod_name) + self._execute_command( + commander, ssh_command, 120, + 'common', 0) + time.sleep(120) + ssh_command = "kubectl get pod | grep smf | awk '{print $1}'" + smf_pod_name = self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3)[0].replace('\n', '') + ssh_command = "kubectl get pod {} | " \ + "grep 'Running'".format(smf_pod_name) + result = self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 0) + if not result: + LOG.error('SMF restart failed. Please check' + ' you k8s-cluster environment.') + raise exceptions.MgmtDriverOtherError( + 'SMF restart failed. Please check you' + ' k8s-cluster environment.') + commander.close_session() + os.remove(local_upf_path) + os.remove(local_smf_path) + + @log.log + def terminate_start(self, context, vnf_instance, + terminate_vnf_request, grant, + grant_request, **kwargs): + pass + + @log.log + def terminate_end(self, context, vnf_instance, + terminate_vnf_request, grant, + grant_request, **kwargs): + pass + + @log.log + def scale_start(self, context, vnf_instance, + scale_vnf_request, grant, + grant_request, **kwargs): + pass + + @log.log + def scale_end(self, context, vnf_instance, + scale_vnf_request, grant, + grant_request, **kwargs): + additional_param = vnf_instance.instantiated_vnf_info.\ + additional_params.get('free5gc', {}) + ssh_master_node_ip = additional_param.get('ssh_master_node_ip') + master_node_username = additional_param.get('master_node_username') + master_node_password = additional_param.get('master_node_password') + if not additional_param.get('upf_config_file_path'): + upf_config_file_path = \ + '/go/src/free5gc/NFs/upf/build/config/upfcfg.yaml' + if not additional_param.get('smf_config_file_path'): + smf_config_file_path = '/go/src/free5gc/config/smfcfg.yaml' + + commander = cmd_executer.RemoteCommandExecutor( + user=master_node_username, password=master_node_password, + host=ssh_master_node_ip, + timeout=30) + ssh_command = "kubectl get pod | grep smf | awk '{print $1}'" + smf_pod_name = self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3)[0].replace('\n', '') + # get upf2's ip from smfcfg.yaml + ssh_command = ("kubectl cp {smf_pod_name}:{smf_config_file_path}" + " /tmp/smfcfg.yaml -c myapp-container" + .format(smf_pod_name=smf_pod_name, + smf_config_file_path=smf_config_file_path)) + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + local_smf_path = '/tmp/smfcfg.yaml' + self._send_and_receive_file( + ssh_master_node_ip, master_node_username, master_node_password, + '/tmp/smfcfg.yaml', local_smf_path, 'receive') + upf_gtpu_list = [] + with open(local_smf_path) as f: + file_content = yaml.safe_load(f) + upf_pfcp_ip = file_content['configuration'][ + 'userplane_information']['up_nodes']['UPF2']['node_id'] + upf_gtpu_interface_list = file_content['configuration'][ + 'userplane_information']['up_nodes']['UPF2']['interfaces'] + for upf_gtpu_interface in upf_gtpu_interface_list: + upf_gtpu_list = (upf_gtpu_interface['endpoints'] + + upf_gtpu_list) + + # modify upf2's config file + ssh_command = "kubectl get pod | grep upf | awk '{print $5}'" + upf_pod_age_list = self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + count = 1 + age_all = {} + age_day = 0 + age_h = 0 + age_m = 0 + age_s = 0 + for age in upf_pod_age_list: + if 'd' in age: + age_day = age.split('d')[0] + if 'h' in age: + age_h = age.split('d')[1].split('h')[0] + elif 'h' in age and not age_h: + age_h = age.split('h')[0] + if 'm' in age: + age_m = age.split('h')[1].split('m')[0] + elif 'm' in age and not age_m: + age_m = age.split('m')[0] + if 's' in age: + age_s = age.split('m')[1].split('s')[0] + elif 's' in age and not age_s: + age_s = age.split('s')[0] + age_all[count] = \ + int(age_day) * 24 * 60 * 60 + int(age_h) * 60 * 60 +\ + int(age_m) * 60 + int(age_s) * 60 + count = count + 1 + age1 = age_all[1] + age2 = age_all[2] + if age1 > age2: + scale_count = 1 + else: + scale_count = 0 + ssh_command = ("kubectl get pod | grep upf | grep Running | awk '{" + "print $1}'") + upf_pod_name_list = self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + upf2_pod_name = upf_pod_name_list[scale_count].replace('\n', '') + upf_example_file_path = \ + '/go/src/free5gc/NFs/upf/build/config/upfcfg.yaml' + ssh_command = ("kubectl cp {upf_pod_name}:{upf_example_file_path}" + " /tmp/upfcfg.yaml -c myapp-container" + .format(upf_pod_name=upf2_pod_name, + upf_example_file_path=upf_example_file_path)) + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + local_upf_path = '/tmp/upfcfg.yaml' + self._send_and_receive_file( + ssh_master_node_ip, master_node_username, master_node_password, + '/tmp/upfcfg.yaml', local_upf_path, 'receive') + with open(local_upf_path, 'r') as f: + upf_content = yaml.safe_load(f) + upf_content['configuration']['pfcp'][0]['addr'] = upf_pfcp_ip + for index in range(len(upf_gtpu_list)): + upf_content['configuration']['gtpu'][index]['addr'] = \ + upf_gtpu_list[index] + with open(local_upf_path, 'w') as nf: + yaml.safe_dump(upf_content, nf, default_flow_style=False) + self._send_and_receive_file( + ssh_master_node_ip, master_node_username, master_node_password, + '/tmp/upfcfg.yaml', local_upf_path, 'send') + ssh_command = ("kubectl cp /tmp/upfcfg.yaml" + " {upf_pod_name}:{upf_config_file_path} " + "-c myapp-container" + .format(upf_pod_name=upf2_pod_name, + upf_config_file_path=upf_config_file_path)) + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + ssh_command = ("kubectl exec {upf2_pod_name} -c myapp-container -- " + "ifconfig eth1 {ip}/24" + .format(upf2_pod_name=upf2_pod_name, + ip=upf_gtpu_list[0])) + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + ssh_command = ("kubectl exec {upf2_pod_name} -c myapp-container -- " + "ifconfig eth2 {ip}/23" + .format(upf2_pod_name=upf2_pod_name, ip=upf_pfcp_ip)) + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + ssh_command = ("kubectl exec {upf2_pod_name} -c myapp-container -- " + "ifconfig eth3 192.168.52.253/24" + .format(upf2_pod_name=upf2_pod_name)) + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + # start upf process + ssh_command = ('cat </dev/null\n' + 'kubectl exec' + ' {} -i -- sh' + '<< eof\n' + 'ip link delete upfgtp\neof\n' + 'kubectl exec' + ' {} -i -- sh' + '<< eof\n' + './NFs/upf/build/bin/free5gc-upfd -f {}\neof' + '\nEOF\n'.format(upf2_pod_name, upf2_pod_name, + upf_config_file_path)) + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + ssh_command = "sudo chmod 777 run_upf2.sh" + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + ssh_command = "nohup ./run_upf2.sh > upf2.txt 2>&1 &" + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + + # restrat smf process + ssh_command = "kubectl get pod {} -o yaml | " \ + "kubectl replace --force -f -".format(smf_pod_name) + self._execute_command( + commander, ssh_command, 120, + 'common', 0) + time.sleep(120) + ssh_command = "kubectl get pod | grep smf | awk '{print $1}'" + smf_pod_name = self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3)[0].replace('\n', '') + ssh_command = "kubectl get pod {} | " \ + "grep 'Running'".format(smf_pod_name) + result = self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 0) + if not result: + LOG.error('SMF restart failed. Please check' + ' you k8s-cluster environment.') + raise exceptions.MgmtDriverOtherError( + 'SMF restart failed. Please check you' + ' k8s-cluster environment.') + commander.close_session() + os.remove(local_smf_path) + os.remove(local_upf_path) + + # if pod-affinity rule exists, check the pod deployed on different + # worker + affinity_flag = False + artifact_files = vnf_instance.instantiated_vnf_info.\ + additional_params.get('lcm-kubernetes-def-files', {}) + vnf_package_path = vnflcm_utils._get_vnf_package_path( + context, vnf_instance.vnfd_id) + for artifact_file in artifact_files: + if 'upf' in artifact_file: + upf_file_path = os.path.join( + vnf_package_path, artifact_file) + LOG.debug('upf_path:{}'.format(upf_file_path)) + with open(upf_file_path) as f: + yaml_content_all = yaml.safe_load_all(f.read()) + for yaml_content in yaml_content_all: + if (yaml_content['spec']['template']['spec'] + .get('affinity')): + affinity_rule = (yaml_content['spec']['template'] + ['spec'].get('affinity')) + if affinity_rule.get('podAntiAffinity'): + affinity_flag = True + LOG.debug('affinity_flag:{}'.format(affinity_flag)) + if affinity_flag: + commander = cmd_executer.RemoteCommandExecutor( + user=master_node_username, password=master_node_password, + host=ssh_master_node_ip, + timeout=30) + ssh_command = ("kubectl get pod -o wide | grep 'upf' | awk '{" + "print $7}'") + result = self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 0) + if result[0] == result[1]: + LOG.error('The pod-affinity rule doesn\'t worker.' + ' Please check your yaml file {}'.format( + upf_file_path)) + raise exceptions.MgmtDriverOtherError( + 'The pod-affinity rule doesn\'t worker.' + ' Please check your yaml file {}'.format( + upf_file_path)) + else: + LOG.debug('The pod has deployed on different worker node.') + + @log.log + def heal_start(self, context, vnf_instance, + heal_vnf_request, grant, + grant_request, **kwargs): + pass + + @log.log + def heal_end(self, context, vnf_instance, + heal_vnf_request, grant, + grant_request, **kwargs): + additional_param = vnf_instance.instantiated_vnf_info. \ + additional_params.get('free5gc', {}) + ssh_master_node_ip = additional_param.get('ssh_master_node_ip') + master_node_username = additional_param.get('master_node_username') + master_node_password = additional_param.get('master_node_password') + if not additional_param.get('smf_config_file_path'): + smf_config_file_path = '/go/src/free5gc/config/smfcfg.yaml' + if not additional_param.get('upf_config_file_path'): + upf_config_file_path = \ + '/go/src/free5gc/NFs/upf/build/config/upfcfg.yaml' + commander = cmd_executer.RemoteCommandExecutor( + user=master_node_username, password=master_node_password, + host=ssh_master_node_ip, + timeout=30) + ssh_command = "kubectl get pod | grep upf | awk '{print $5}'" + upf_pod_age_list = self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + if len(upf_pod_age_list) > 1: + # get upf ip + ssh_command = "kubectl get pod | grep smf | awk '{print $1}'" + smf_pod_name = self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3)[0].replace('\n', '') + ssh_command = ("kubectl cp {smf_pod_name}:{smf_config_file_path}" + " /tmp/smfcfg.yaml -c myapp-container" + .format(smf_pod_name=smf_pod_name, + smf_config_file_path=smf_config_file_path)) + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + local_smf_path = '/tmp/smfcfg.yaml' + self._send_and_receive_file( + ssh_master_node_ip, master_node_username, master_node_password, + '/tmp/smfcfg.yaml', local_smf_path, 'receive') + with open(local_smf_path) as f: + file_content = yaml.safe_load(f) + upf1_pfcp_ip = file_content['configuration'][ + 'userplane_information']['up_nodes']['UPF']['node_id'] + upf2_pfcp_ip = file_content['configuration'][ + 'userplane_information']['up_nodes']['UPF2']['node_id'] + ssh_command = "kubectl get pod | grep upf | awk '{print $1}'" + upf_pod_name_list = self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + for upf_pod in upf_pod_name_list: + upf_example_file_path = \ + '/go/src/free5gc/NFs/upf/build/config/upfcfg.yaml' + ssh_command = "kubectl exec {} -c myapp-container --" \ + " cat {}".format(upf_pod.replace('\n', ''), + upf_example_file_path) + results = self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + for result in results: + if upf1_pfcp_ip in result: + flag = 'UPF2' + unhealed_upf_pod_name = upf_pod + break + if upf2_pfcp_ip in result: + flag = 'UPF' + unhealed_upf_pod_name = upf_pod + upf_pod_name_list.remove(unhealed_upf_pod_name) + upf_pod_name = upf_pod_name_list[0].replace('\n', '') + upf_gtpu_list = [] + with open(local_smf_path) as f: + file_content = yaml.safe_load(f) + if flag == 'UPF': + upf_pfcp_ip = file_content['configuration'][ + 'userplane_information']['up_nodes']['UPF']['node_id'] + upf_gtpu_interface_list = ( + file_content['configuration']['userplane_information'] + ['up_nodes']['UPF']['interfaces']) + for upf_gtpu_interface in upf_gtpu_interface_list: + upf_gtpu_list = (upf_gtpu_interface['endpoints'] + + upf_gtpu_list) + else: + upf_pfcp_ip = file_content['configuration'][ + 'userplane_information']['up_nodes']['UPF2']['node_id'] + upf_gtpu_interface_list = ( + file_content['configuration']['userplane_information'] + ['up_nodes']['UPF2']['interfaces']) + for upf_gtpu_interface in upf_gtpu_interface_list: + upf_gtpu_list = (upf_gtpu_interface['endpoints'] + + upf_gtpu_list) + # modify upf config file + upf_example_file_path = \ + '/go/src/free5gc/NFs/upf/build/config/upfcfg.yaml' + ssh_command = ("kubectl cp {upf_pod_name}:{upf_example_file_path}" + " /tmp/upfcfg.yaml -c myapp-container".format( + upf_pod_name=upf_pod_name, + upf_example_file_path=upf_example_file_path)) + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + local_upf_path = '/tmp/upfcfg.yaml' + self._send_and_receive_file( + ssh_master_node_ip, master_node_username, master_node_password, + '/tmp/upfcfg.yaml', local_upf_path, 'receive') + with open(local_upf_path, 'r') as f: + upf_content = yaml.safe_load(f) + upf_content['configuration']['pfcp'][0]['addr'] = upf_pfcp_ip + for index in range(len(upf_gtpu_list)): + upf_content['configuration']['gtpu'][index]['addr'] = \ + upf_gtpu_list[index] + with open(local_upf_path, 'w') as nf: + yaml.safe_dump(upf_content, nf, default_flow_style=False) + self._send_and_receive_file( + ssh_master_node_ip, master_node_username, master_node_password, + '/tmp/upfcfg.yaml', local_upf_path, 'send') + ssh_command = ("kubectl cp /tmp/upfcfg.yaml" + " {upf_pod_name}:{upf_config_file_path}" + " -c myapp-container" + .format(upf_pod_name=upf_pod_name, + upf_config_file_path=upf_config_file_path)) + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + + # start upf process + ssh_command = ('cat </dev/null\n' + 'kubectl exec' + ' {} -i -- sh' + '<< eof\n' + 'ip link delete upfgtp\neof\n' + 'kubectl exec' + ' {} -i -- sh' + '<< eof\n' + './NFs/upf/build/bin/free5gc-upfd -f {}\neof' + '\nEOF\n' + .format(upf_pod_name, upf_pod_name, + upf_config_file_path)) + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + ssh_command = "sudo chmod 777 run_upf.sh" + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + ssh_command = "nohup ./run_upf.sh > upf_heal.txt 2>&1 &" + self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3) + + # restart smf + ssh_command = "kubectl get pod {} -o yaml | " \ + "kubectl replace --force -f -".format(smf_pod_name) + self._execute_command( + commander, ssh_command, 120, + 'common', 0) + time.sleep(120) + ssh_command = "kubectl get pod | grep smf | awk '{print $1}'" + smf_pod_name = self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 3)[0].replace('\n', '') + ssh_command = "kubectl get pod {} | " \ + "grep 'Running'".format(smf_pod_name) + result = self._execute_command( + commander, ssh_command, FREE5GC_CMD_TIMEOUT, + 'common', 0) + if not result: + LOG.error('SMF restart failed. Please check' + ' you k8s-cluster environment.') + raise exceptions.MgmtDriverOtherError( + 'SMF restart failed. Please check you' + ' k8s-cluster environment.') + commander.close_session() + os.remove(local_upf_path) + os.remove(local_smf_path) + else: + self.instantiate_end(context, vnf_instance, + vnf_instance.instantiated_vnf_info, grant, + grant_request, **kwargs) + + @log.log + def change_external_connectivity_start( + self, context, vnf_instance, + change_ext_conn_request, grant, + grant_request, **kwargs): + pass + + @log.log + def change_external_connectivity_end( + self, context, vnf_instance, + change_ext_conn_request, grant, + grant_request, **kwargs): + pass diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/TOSCA-Metadata/TOSCA.meta b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/TOSCA-Metadata/TOSCA.meta new file mode 100644 index 000000000..5701a1fe8 --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/TOSCA-Metadata/TOSCA.meta @@ -0,0 +1,74 @@ +TOSCA-Meta-File-Version: 1.0 +Created-by: dummy_user +CSAR-Version: 1.1 +Entry-Definitions: Definitions/free5gc_top.vnfd.yaml + +Name: Files/kubernetes/free5gc-amf.yaml +Content-Type: application/yaml +Algorithm: SHA-256 +Hash: 9ff05bcde8a286273eb9ff6b24250cc9e57be9e7e032faf8293691e18cfa6aa0 + +Name: Files/kubernetes/free5gc-ausf.yaml +Content-Type: application/yaml +Algorithm: SHA-256 +Hash: b47b97941ce0815dc0c80317e5323d51d29f1937fab37ef47d081652f0773100 + +Name: Files/kubernetes/free5gc-configmap.yaml +Content-Type: application/yaml +Algorithm: SHA-256 +Hash: cdee7916e30fb98775b493c42a3cc8a40b2db5a0e478767609deeb0c0d3aec9d + +Name: Files/kubernetes/free5gc-mongodb.yaml +Content-Type: application/yaml +Algorithm: SHA-256 +Hash: ec2ee0dc124c573ff0af795ce36ed83358ad026099c576769612e79cde3bf4d4 + +Name: Files/kubernetes/free5gc-nrf.yaml +Content-Type: application/yaml +Algorithm: SHA-256 +Hash: e7042c937431aa3d65a01b5602adb23a98a280c8575dc54f02ab50e4174cf2dc + +Name: Files/kubernetes/free5gc-nssf.yaml +Content-Type: application/yaml +Algorithm: SHA-256 +Hash: e871ac354e333a3c038b202f5014fcd56068ded2a2f7317e529d1585a3327d60 + +Name: Files/kubernetes/free5gc-pcf.yaml +Content-Type: application/yaml +Algorithm: SHA-256 +Hash: 43e3307266eea8a95f90a27848d719820c2aa6c5d13e741e5e83cb94e36bec8f + +Name: Files/kubernetes/free5gc-smf.yaml +Content-Type: application/yaml +Algorithm: SHA-256 +Hash: ce9e16e8418967d265430a7ed77736f85cba317098d9b526293f09babd24ed72 + +Name: Files/kubernetes/free5gc-udm.yaml +Content-Type: application/yaml +Algorithm: SHA-256 +Hash: d5b451fcb0d66c38f8312e515f882f3fe7d4bae4cf17991bb6142670b70d2224 + +Name: Files/kubernetes/free5gc-udr.yaml +Content-Type: application/yaml +Algorithm: SHA-256 +Hash: c44011cbf73ea59f021326620612142e0455116f7d1ec86234ab319e2c9db131 + +Name: Files/kubernetes/free5gc-upf.yaml +Content-Type: application/yaml +Algorithm: SHA-256 +Hash: 2ea6b86e65f41a7327f2a4af410cc619be67edff00329ecf12bc842c0a007707 + +Name: Files/kubernetes/free5gc-webui.yaml +Content-Type: application/yaml +Algorithm: SHA-256 +Hash: a2e33991eacd9235717e958e02aecfd59fa07b95844c699cb1d85df99a449125 + +Name: Files/kubernetes/unix-daemonset.yaml +Content-Type: application/yaml +Algorithm: SHA-256 +Hash: cf3d6b578c568e730d9f392754fb644b6c10e1096a29cb47105f8e396f047dff + +Name: Scripts/free5gc_mgmt_cnf.py +Content-Type: text/x-python +Algorithm: SHA-256 +Hash: diff --git a/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/inst_param.yaml b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/inst_param.yaml new file mode 100644 index 000000000..7a5e27c0e --- /dev/null +++ b/samples/free5gc/cnf_on_vm/no_affinity/sample_free5gc_cnf_package/inst_param.yaml @@ -0,0 +1,32 @@ +{ + "flavourId": "simple", + "additionalParams": { + "free5gc": { + "ssh_master_node_ip": "10.10.0.166", + "master_node_username": "ubuntu", + "master_node_password": "ubuntu", + }, + "lcm-kubernetes-def-files": [ + "Files/kubernetes/unix-daemonset.yaml", + "Files/kubernetes/free5gc-configmap.yaml", + "Files/kubernetes/free5gc-mongodb.yaml", + "Files/kubernetes/free5gc-nrf.yaml", + "Files/kubernetes/free5gc-udr.yaml", + "Files/kubernetes/free5gc-upf.yaml", + "Files/kubernetes/free5gc-pcf.yaml", + "Files/kubernetes/free5gc-ausf.yaml", + "Files/kubernetes/free5gc-nssf.yaml", + "Files/kubernetes/free5gc-udm.yaml", + "Files/kubernetes/free5gc-amf.yaml", + "Files/kubernetes/free5gc-webui.yaml", + "Files/kubernetes/free5gc-smf.yaml", + ], + }, + "vimConnectionInfo": [ + { + "id": "kubernetes_vim", + "vimId": "9b723654-33cf-4c76-b3dd-904510f5499d", + "vimType": "kubernetes", + }, + ], +}