heat_template_version: queens description: > OpenStack Nova Compute service configured with Puppet parameters: ServiceData: default: {} description: Dictionary packing service data type: json ServiceNetMap: default: {} description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults. type: json DefaultPasswords: default: {} type: json RoleName: default: '' description: Role name on which the service is applied type: string RoleParameters: default: {} description: Parameters specific to the role type: json EndpointMap: default: {} description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json NovaRbdPoolName: default: vms type: string description: The pool name for RBD backend ephemeral storage. tags: - role_specific CephClusterName: type: string default: ceph description: The Ceph cluster name. constraints: - allowed_pattern: "[a-zA-Z0-9]+" description: > The Ceph cluster name must be at least 1 character and contain only letters and numbers. CephClientUserName: default: openstack type: string CephClientKey: description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. type: string hidden: true constraints: - allowed_pattern: "^[a-zA-Z0-9+/]{38}==$" CephClusterFSID: type: string description: The Ceph cluster FSID. Must be a UUID. CinderEnableNfsBackend: default: false description: Whether to enable or not the NFS backend for Cinder type: boolean NovaNfsEnabled: default: false description: Whether to enable or not the NFS backend for Nova type: boolean tags: - role_specific NovaNfsShare: default: '' description: NFS share to mount for nova storage (when NovaNfsEnabled is true) type: string tags: - role_specific NovaNfsOptions: default: 'context=system_u:object_r:nfs_t:s0' description: NFS mount options for nova storage (when NovaNfsEnabled is true) type: string tags: - role_specific NovaNfsVersion: default: '4' description: > NFS version used for nova storage (when NovaNfsEnabled is true). Since NFSv3 does not support full locking a NFSv4 version need to be used. To not break current installations the default is the previous hard coded version 4. type: string constraints: - allowed_pattern: "^4.?[0-9]?" tags: - role_specific CinderEnableRbdBackend: default: false description: Whether to enable or not the Rbd backend for Cinder type: boolean NovaEnableRbdBackend: default: false description: Whether to enable the Rbd backend for Nova ephemeral storage. type: boolean tags: - role_specific NovaComputeLibvirtVifDriver: default: '' description: Libvirt VIF driver configuration for the network type: string NovaPCIPassthrough: description: > List of PCI Passthrough whitelist parameters. Example - NovaPCIPassthrough: - vendor_id: "8086" product_id: "154c" address: "0000:05:00.0" physical_network: "datacentre" For different formats, refer to the nova.conf documentation for pci_passthrough_whitelist configuration type: json default: '' tags: - role_specific NovaVcpuPinSet: description: > A list or range of physical CPU cores to reserve for virtual machine processes. Ex. NovaVcpuPinSet: ['4-12','^8'] will reserve cores from 4-12 excluding 8 type: comma_delimited_list default: [] tags: - role_specific NovaReservedHostMemory: description: > Reserved RAM for host processes. type: number default: 4096 constraints: - range: { min: 512 } tags: - role_specific NovaReservedHugePages: description: > A list of valid key=value which reflect NUMA node ID, page size (Default unit is KiB) and number of pages to be reserved. Example - NovaReservedHugePages: ["node:0,size:2048,count:64","node:1,size:1GB,count:1"] will reserve on NUMA node 0 64 pages of 2MiB and on NUMA node 1 1 page of 1GiB type: comma_delimited_list default: [] tags: - role_specific KernelArgs: default: "" type: string description: Kernel Args to apply to the host tags: - role_specific OvsDpdkSocketMemory: default: "" description: > Sets the amount of hugepage memory to assign per NUMA node. It is recommended to use the socket closest to the PCIe slot used for the desired DPDK NIC. The format should be in ", , ", where the value is specified in MB. For example: "1024,0". type: string tags: - role_specific MonitoringSubscriptionNovaCompute: default: 'overcloud-nova-compute' type: string NovaComputeLoggingSource: type: json default: tag: openstack.nova.compute path: /var/log/nova/nova-compute.log UpgradeLevelNovaCompute: type: string description: Nova Compute upgrade level default: '' MigrationSshKey: type: json description: > SSH key for migration. Expects a dictionary with keys 'public_key' and 'private_key'. Values should be identical to SSH public/private key files. default: public_key: '' private_key: '' MigrationSshPort: default: 2022 description: Target port for migration over ssh type: number VerifyGlanceSignatures: default: False description: Whether to verify image signatures. type: boolean NovaLibvirtRxQueueSize: description: > virtio-net RX queue size. Valid values are 256, 512, 1024 default: 512 type: number constraints: - allowed_values: [ 256, 512, 1024 ] tags: - role_specific NovaLibvirtTxQueueSize: description: > virtio-net TX queue size. Valid values are 256, 512, 1024 default: 512 type: number constraints: - allowed_values: [ 256, 512, 1024 ] tags: - role_specific NovaLibvirtVolumeUseMultipath: default: false description: Whether to enable or not the multipath connection of the volumes. type: boolean tags: - role_specific CinderPassword: description: The password for the cinder service and db account. type: string hidden: true KeystoneRegion: type: string default: 'regionOne' description: Keystone region for endpoint NovaLibvirtMemStatsPeriodSeconds: description: > A number of seconds to memory usage statistics period, zero or negative value mean to disable memory usage statistics. default: 10 type: number tags: - role_specific NovaResumeGuestsStateOnHostBoot: default: false description: Whether to start running instance on compute host reboot type: boolean tags: - role_specific NovaCPUAllocationRatio: type: number description: Virtual CPU to physical CPU allocation ratio. default: 0.0 tags: - role_specific NovaRAMAllocationRatio: type: number description: Virtual RAM to physical RAM allocation ratio. default: 1.0 tags: - role_specific NovaDiskAllocationRatio: type: number description: Virtual disk to physical disk allocation ratio. default: 0.0 tags: - role_specific conditions: enable_live_migration_tunnelled: or: - and: - equals: [{get_param: NovaNfsEnabled}, true] - equals: [{get_param: [RoleParameters, NovaNfsEnabled]}, ''] - equals: [{get_param: [RoleParameters, NovaNfsEnabled]}, true] - equals: [{get_param: [RoleParameters, NovaEnableRbdBackend]}, true] - and: - equals: [{get_param: [RoleParameters, NovaEnableRbdBackend]}, ''] - equals: [{get_param: NovaEnableRbdBackend}, true] reserved_huge_pages_set: not: and: - equals: [{get_param: [RoleParameters, NovaReservedHugePages]}, ""] - equals: [{get_param: NovaReservedHugePages}, []] ovs_dpdk_socket_memory_not_set: and: - equals: [{get_param: [RoleParameters, OvsDpdkSocketMemory]}, ""] - equals: [{get_param: OvsDpdkSocketMemory}, ""] resources: NovaBase: type: ./nova-base.yaml properties: ServiceData: {get_param: ServiceData} ServiceNetMap: {get_param: ServiceNetMap} DefaultPasswords: {get_param: DefaultPasswords} EndpointMap: {get_param: EndpointMap} RoleName: {get_param: RoleName} RoleParameters: {get_param: RoleParameters} # Merging role-specific parameters (RoleParameters) with the default parameters. # RoleParameters will have the precedence over the default parameters. RoleParametersValue: type: OS::Heat::Value properties: type: json value: map_replace: - map_replace: - nova::compute::vcpu_pin_set: NovaVcpuPinSet nova::compute::reserved_host_memory: NovaReservedHostMemory nova::compute::reserved_huge_pages: NovaReservedHugePages nova::compute::libvirt::rx_queue_size: NovaLibvirtRxQueueSize nova::compute::libvirt::tx_queue_size: NovaLibvirtTxQueueSize nova::compute::libvirt::volume_use_multipath: NovaLibvirtVolumeUseMultipath nova::compute::rbd::libvirt_images_rbd_pool: NovaRbdPoolName tripleo::profile::base::nova::compute::nova_nfs_enabled: NovaNfsEnabled nfs_backend_enable: NovaNfsEnabled nfs_share: NovaNfsShare nfs_options: NovaNfsOptions nfs_vers: NovaNfsVersion nova::compute::libvirt::mem_stats_period_seconds: NovaLibvirtMemStatsPeriodSeconds nova::compute::resume_guests_state_on_host_boot: NovaResumeGuestsStateOnHostBoot resume_guests_state_on_host_boot: NovaResumeGuestsStateOnHostBoot nova::compute::rbd::ephemeral_storage: NovaEnableRbdBackend nova::cpu_allocation_ratio: NovaCPUAllocationRatio nova::ram_allocation_ratio: NovaRAMAllocationRatio nova::disk_allocation_ratio: NovaDiskAllocationRatio - values: {get_param: [RoleParameters]} - values: NovaVcpuPinSet: {get_param: NovaVcpuPinSet} NovaReservedHostMemory: {get_param: NovaReservedHostMemory} NovaReservedHugePages: #"repeat" function is run for the case when OvsDpdkSocketMemory is set # and when neither global or role based NovaReservedHugePages are set. if: - reserved_huge_pages_set - get_param: NovaReservedHugePages - if: - ovs_dpdk_socket_memory_not_set - get_param: NovaReservedHugePages - repeat: for_each: <%node%>: yaql: expression: range(0,len($.data.dpdk_p)).join(",").split(",") data: dpdk_p: if: - {equals: [{get_param: [RoleParameters, OvsDpdkSocketMemory]}, ""]} - str_split: [',',{get_param: OvsDpdkSocketMemory}] - str_split: [',',{get_param: [RoleParameters, OvsDpdkSocketMemory]}] <%size%>: yaql: expression: let(hzx => regex("([0-9]+[K|M|G])").search($.data.kern_p+$.data.kern_g)) -> let(hz =>switch($hzx = "4K" => "4", $hzx = "2M" => "2048", $hzx = "1G" => "1048576", $hzx => "2048", $hzx = null => "2048")) -> [$hz]*len($.data.dpdk_p) data: dpdk_p: if: - {equals: [{get_param: [RoleParameters, OvsDpdkSocketMemory]}, ""]} - str_split: [',',{get_param: OvsDpdkSocketMemory}] - str_split: [',',{get_param: [RoleParameters, OvsDpdkSocketMemory]}] kern_p: {get_param: [RoleParameters, KernelArgs]} kern_g: {get_param: KernelArgs} <%count%>: yaql: expression: let(hzx => regex("([0-9]+[K|M|G])").search($.data.kern_p+$.data.kern_g)) -> let(hz =>int(switch($hzx = "4K" => "4", $hzx = "2M" => "2048", $hzx = "1G" => "1048576", $hzx => "2048", $hzx = null => "2048"))) -> $.data.dpdk_p.select(int($)*1024/$hz).join(",").split(',') data: dpdk_p: if: - {equals: [{get_param: [RoleParameters, OvsDpdkSocketMemory]}, ""]} - str_split: [',',{get_param: OvsDpdkSocketMemory}] - str_split: [',',{get_param: [RoleParameters, OvsDpdkSocketMemory]}] kern_p: {get_param: [RoleParameters, KernelArgs]} kern_g: {get_param: KernelArgs} template: >- node:<%node%>,size:<%size%>,count:<%count%> permutations: false NovaLibvirtRxQueueSize: {get_param: NovaLibvirtRxQueueSize} NovaLibvirtTxQueueSize: {get_param: NovaLibvirtTxQueueSize} NovaLibvirtVolumeUseMultipath: {get_param: NovaLibvirtVolumeUseMultipath} NovaRbdPoolName: {get_param: NovaRbdPoolName} NovaNfsEnabled: {get_param: NovaNfsEnabled} NovaNfsShare: {get_param: NovaNfsShare} NovaNfsOptions: {get_param: NovaNfsOptions} NovaNfsVersion: {get_param: NovaNfsVersion} NovaLibvirtMemStatsPeriodSeconds: {get_param: NovaLibvirtMemStatsPeriodSeconds} NovaResumeGuestsStateOnHostBoot: {get_param: NovaResumeGuestsStateOnHostBoot} NovaEnableRbdBackend: {get_param: NovaEnableRbdBackend} NovaCPUAllocationRatio: {get_param: NovaCPUAllocationRatio} NovaRAMAllocationRatio: {get_param: NovaRAMAllocationRatio} NovaDiskAllocationRatio: {get_param: NovaDiskAllocationRatio} outputs: role_data: description: Role data for the Nova Compute service. value: service_name: nova_compute monitoring_subscription: {get_param: MonitoringSubscriptionNovaCompute} config_settings: map_merge: - get_attr: [NovaBase, role_data, config_settings] - get_attr: [RoleParametersValue, value] - nova::compute::libvirt::manage_libvirt_services: false nova::compute::pci::passthrough: str_replace: template: "JSON_PARAM" params: map_replace: - map_replace: - JSON_PARAM: NovaPCIPassthrough - values: {get_param: [RoleParameters]} - values: NovaPCIPassthrough: {get_param: NovaPCIPassthrough} # we manage migration in nova common puppet profile nova::compute::libvirt::migration_support: false tripleo::profile::base::nova::migration::client::nova_compute_enabled: true tripleo::profile::base::nova::migration::client::ssh_private_key: {get_param: [ MigrationSshKey, private_key ]} tripleo::profile::base::nova::migration::client::ssh_port: {get_param: MigrationSshPort} nova::compute::rbd::libvirt_images_rbd_ceph_conf: list_join: - '' - - '/etc/ceph/' - {get_param: CephClusterName} - '.conf' nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} nova::compute::rbd::rbd_keyring: list_join: - '.' - - 'client' - {get_param: CephClientUserName} tripleo::profile::base::nova::compute::cinder_nfs_backend: {get_param: CinderEnableNfsBackend} rbd_persistent_storage: {get_param: CinderEnableRbdBackend} nova::cinder::username: 'cinder' nova::cinder::auth_type: 'v3password' nova::cinder::project_name: 'service' nova::cinder::password: {get_param: CinderPassword} nova::cinder::auth_url: {get_param: [EndpointMap, KeystoneV3Internal, uri]} nova::cinder::region_name: {get_param: KeystoneRegion} nova::compute::rbd::libvirt_rbd_secret_key: {get_param: CephClientKey} nova::compute::rbd::libvirt_rbd_secret_uuid: {get_param: CephClusterFSID} nova::compute::instance_usage_audit: true nova::compute::instance_usage_audit_period: 'hour' # TUNNELLED mode provides a security improvement for migration, but # can't be used in combination with block migration. So we only enable it # when shared storage is available (Ceph RDB is currently the only option). # See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12 # In future versions of QEMU (2.6, mostly), danpb's native # encryption work will obsolete the need to use TUNNELLED transport # mode. nova::migration::live_migration_tunnelled: if: - enable_live_migration_tunnelled - true - false nova::compute::neutron::libvirt_vif_driver: {get_param: NovaComputeLibvirtVifDriver} # NOTE: bind IP is found in hiera replacing the network name with the # local node IP for the given network; replacement examples # (eg. for internal_api): # internal_api -> IP # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR nova::compute::vncserver_proxyclient_address: str_replace: template: "%{hiera('$NETWORK')}" params: $NETWORK: {get_param: [ServiceNetMap, NovaVncProxyNetwork]} nova::compute::vncproxy_host: {get_param: [EndpointMap, NovaPublic, host_nobrackets]} nova::vncproxy::common::vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]} nova::vncproxy::common::vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host_nobrackets]} nova::vncproxy::common::vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]} nova::compute::verify_glance_signatures: {get_param: [VerifyGlanceSignatures]} step_config: | # TODO(emilien): figure how to deal with libvirt profile. # We'll probably treat it like we do with Neutron plugins. # Until then, just include it in the default nova-compute role. include tripleo::profile::base::nova::compute::libvirt service_config_settings: fluentd: tripleo_fluentd_groups_nova_compute: - nova tripleo_fluentd_sources_nova_compute: - {get_param: NovaComputeLoggingSource} collectd: tripleo.collectd.plugins.nova_compute: - virt collectd::plugin::virt::connection: 'qemu:///system' host_prep_tasks: - name: Mount Nova NFS Share vars: nfs_backend_enable: {get_attr: [RoleParametersValue, value, nfs_backend_enable]} nfs_share: {get_attr: [RoleParametersValue, value, nfs_share]} nfs_options: {get_attr: [RoleParametersValue, value, nfs_options]} nfs_vers: {get_attr: [RoleParametersValue, value, nfs_vers]} mount: name=/var/lib/nova/instances src="{{nfs_share}}" fstype=nfs4 opts="_netdev,bg,{{nfs_options}},vers={{nfs_vers}},nfsvers={{nfs_vers}}" state=mounted when: nfs_backend_enable|bool - name: is Nova Resume Guests State On Host Boot enabled set_fact: resume_guests_state_on_host_boot_enabled: {get_attr: [RoleParametersValue, value, resume_guests_state_on_host_boot]} - name: install libvirt-guests systemd unit file when: resume_guests_state_on_host_boot_enabled|bool block: - name: libvirt-guests unit to stop nova_compute container before shutdown VMs copy: dest: /etc/systemd/system/libvirt-guests.service content: | [Unit] Description=Suspend/Resume Running libvirt Guests Requires=virt-guest-shutdown.target After=network.target After=time-sync.target After=virt-guest-shutdown.target After=docker.service After=paunch-container-shutdown.service After=rhel-push-plugin.service Documentation=man:libvirtd(8) Documentation=https://libvirt.org [Service] EnvironmentFile=-/etc/sysconfig/libvirt-guests # Hack just call traditional service until we factor # out the code ExecStart=/usr/libexec/libvirt-guests.sh start ExecStop=/bin/docker stop nova_compute ExecStop=/usr/libexec/libvirt-guests.sh stop Type=oneshot RemainAfterExit=yes StandardOutput=journal+console TimeoutStopSec=0 [Install] WantedBy=multi-user.target - name: libvirt-guests enable VM shutdown on compute reboot/shutdown systemd: name: libvirt-guests enabled: yes state: started daemon_reload: yes upgrade_tasks: - name: Stop nova-compute service when: step|int == 1 service: name=openstack-nova-compute state=stopped # If not already set by puppet (e.g a pre-ocata version), set the # upgrade_level for compute to "auto" - name: Set compute upgrade level to auto when: step|int == 3 ini_file: str_replace: template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL" params: LEVEL: {get_param: UpgradeLevelNovaCompute} - name: install openstack-nova-migration when: step|int == 3 yum: name=openstack-nova-migration state=latest - name: Start nova-compute service when: step|int == 6 service: name=openstack-nova-compute state=started