heat_template_version: rocky

description: >
  OpenStack containerized Nova Compute service

parameters:
  DockerNovaComputeImage:
    description: image
    type: string
  DockerNovaLibvirtConfigImage:
    description: The container image to use for the nova_libvirt config_volume
    type: string
  DockerNovaComputeUlimit:
    default: ['nofile=131072']
    description: ulimit for Nova Compute Container
    type: comma_delimited_list
  NovaComputeLoggingSource:
    type: json
    default:
      tag: openstack.nova.compute
      path: /var/log/containers/nova/nova-compute.log
  ServiceData:
    default: {}
    description: Dictionary packing service data
    type: json
  ServiceNetMap:
    default: {}
    description: Mapping of service_name -> network name. Typically set
                 via parameter_defaults in the resource registry.  This
                 mapping overrides those in ServiceNetMapDefaults.
    type: json
  DefaultPasswords:
    default: {}
    type: json
  RoleName:
    default: ''
    description: Role name on which the service is applied
    type: string
  RoleParameters:
    default: {}
    description: Parameters specific to the role
    type: json
  EndpointMap:
    default: {}
    description: Mapping of service endpoint -> protocol. Typically set
                 via parameter_defaults in the resource registry.
    type: json
  CephClientUserName:
    default: openstack
    type: string
  CephClusterName:
    type: string
    default: ceph
    description: The Ceph cluster name.
    constraints:
    - allowed_pattern: "[a-zA-Z0-9]+"
      description: >
        The Ceph cluster name must be at least 1 character and contain only
        letters and numbers.
  NovaComputeOptVolumes:
    default: []
    description: list of optional vo
    type: comma_delimited_list
  NovaComputeOptEnvVars:
    default: []
    description: list of optional en
    type: comma_delimited_list
  EnableInstanceHA:
    default: false
    description: Whether to enable an Instance Ha configurarion or not.
                 This setup requires the Compute role to have the
                 PacemakerRemote service added to it.
    type: boolean
  NovaRbdPoolName:
    default: vms
    type: string
    description: The pool name for RBD backend ephemeral storage.
    tags:
      - role_specific
  CephClientKey:
    description: The Ceph client key. Can be created with ceph-authtool --gen-print-key.
    type: string
    hidden: true
  CephClusterFSID:
    type: string
    description: The Ceph cluster FSID. Must be a UUID.
  CinderEnableNfsBackend:
    default: false
    description: Whether to enable or not the NFS backend for Cinder
    type: boolean
  NovaNfsEnabled:
    default: false
    description: Whether to enable or not the NFS backend for Nova
    type: boolean
  NovaNfsShare:
    default: ''
    description: NFS share to mount for nova storage (when NovaNfsEnabled is true)
    type: string
  NovaNfsOptions:
    default: 'context=system_u:object_r:nfs_t:s0'
    description: NFS mount options for nova storage (when NovaNfsEnabled is true)
    type: string
  NovaNfsVersion:
    default: '4'
    description: >
      NFS version used for nova storage (when NovaNfsEnabled is true). Since
      NFSv3 does not support full locking a NFSv4 version need to be used.
      To not break current installations the default is the previous hard
      coded version 4.
    type: string
    constraints:
      - allowed_pattern: "^4.?[0-9]?"
  CinderEnableRbdBackend:
    default: false
    description: Whether to enable or not the Rbd backend for Cinder
    type: boolean
  NovaEnableRbdBackend:
    default: false
    description: Whether to enable or not the Rbd backend for Nova
    type: boolean
  NovaComputeLibvirtVifDriver:
    default: ''
    description: Libvirt VIF driver configuration for the network
    type: string
  NovaPCIPassthrough:
    description: >
      List of PCI Passthrough whitelist parameters.
      Example -
      NovaPCIPassthrough:
        - vendor_id: "8086"
          product_id: "154c"
          address: "0000:05:00.0"
          physical_network: "datacentre"
      For different formats, refer to the nova.conf documentation for
      pci_passthrough_whitelist configuration
    type: json
    default: ''
  NovaVcpuPinSet:
    description: >
      A list or range of physical CPU cores to reserve for virtual machine
      processes.
      Ex. NovaVcpuPinSet: ['4-12','^8'] will reserve cores from 4-12 excluding 8
    type: comma_delimited_list
    default: []
    tags:
      - role_specific
  NovaComputeCpuSharedSet:
    description: >
      A list or range of physical CPU cores will be used for best-effort guest
      vCPU resources (e.g. emulator threads in libvirt/QEMU).
      Ex. NovaComputeCpuSharedSet: [4-12,^8,15] will reserve cores from 4-12
      and 15, excluding 8.
    type: comma_delimited_list
    default: []
    tags:
      - role_specific
  NovaReservedHostMemory:
    description: >
      Reserved RAM for host processes.
    type: number
    default: 4096
    constraints:
      - range: { min: 512 }
    tags:
      - role_specific
  MonitoringSubscriptionNovaCompute:
    default: 'overcloud-nova-compute'
    type: string
  MigrationSshKey:
    type: json
    description: >
      SSH key for migration.
      Expects a dictionary with keys 'public_key' and 'private_key'.
      Values should be identical to SSH public/private key files.
    default:
      public_key: ''
      private_key: ''
  MigrationSshPort:
    default: 2022
    description: Target port for migration over ssh
    type: number
  VerifyGlanceSignatures:
    default: False
    description: Whether to verify image signatures.
    type: boolean
  NovaAutoDisabling:
    default: '10'
    description: Max number of consecutive build failures before the nova-compute will disable itself.
    type: string
  NeutronPhysnetNUMANodesMapping:
    description: |
      Map of physnet name as key and NUMA nodes as value.
      Ex. NeutronPhysnetNUMANodesMapping: {'foo': [0, 1], 'bar': [1]} where `foo` and `bar` are
      physnet names and corresponding values are list of associated numa_nodes.
    type: json
    default: {}
    tags:
      - role_specific
  NeutronTunnelNUMANodes:
    description: Used to configure NUMA affinity for all tunneled networks.
    type: comma_delimited_list
    default: []
    tags:
      - role_specific
  NovaResumeGuestsStateOnHostBoot:
    default: false
    description: Whether to start running instance on compute host reboot
    type: boolean
    tags:
      - role_specific
  NovaLibvirtRxQueueSize:
    description: >
      virtio-net RX queue size. Valid values are 256, 512, 1024
    default: 512
    type: number
    constraints:
      - allowed_values: [ 256, 512, 1024 ]
    tags:
      - role_specific
  NovaLibvirtTxQueueSize:
    description: >
      virtio-net TX queue size. Valid values are 256, 512, 1024
    default: 512
    type: number
    constraints:
      - allowed_values: [ 256, 512, 1024 ]
    tags:
      - role_specific
  NovaLibvirtFileBackedMemory:
    description: >
      Available capacity in MiB for file-backed memory.
    default: 0
    type: number
    tags:
      - role_specific
  NovaLibvirtVolumeUseMultipath:
    default: false
    description: Whether to enable or not the multipath connection of the volumes.
    type: boolean
    tags:
      - role_specific
  NovaHWMachineType:
    description: >
      To specify a default machine type per host architecture.
    default: 'x86_64=pc-i440fx-rhel7.6.0,aarch64=virt-rhel7.6.0,ppc64=pseries-rhel7.6.0,ppc64le=pseries-rhel7.6.0'
    type: string
    tags:
      - role_specific
  DeployIdentifier:
    default: ''
    type: string
    description: >
      Setting this to a unique value will re-run any deployment tasks which
      perform configuration on a Heat stack-update.
  NovaAdditionalCell:
    default: false
    description: Whether this is an cell additional to the default cell.
    type: boolean
  NovaComputeEnableKsm:
    default: false
    description: Whether to enable KSM on compute nodes or not. Especially
                 in NFV use case one wants to keep it disabled.
    type: boolean
    tags:
      - role_specific

resources:

  ContainersCommon:
    type: ../containers-common.yaml

  MySQLClient:
    type: ../../deployment/database/mysql-client.yaml

  NovaComputeCommon:
    type: ./nova-compute-common-container-puppet.yaml
    properties:
      EndpointMap: {get_param: EndpointMap}
      ServiceData: {get_param: ServiceData}
      ServiceNetMap: {get_param: ServiceNetMap}
      DefaultPasswords: {get_param: DefaultPasswords}
      RoleName: {get_param: RoleName}
      RoleParameters: {get_param: RoleParameters}

  NovaLogging:
    type: OS::TripleO::Services::Logging::NovaCommon
    properties:
      DockerNovaImage: {get_param: DockerNovaComputeImage}
      NovaServiceName: 'compute'

  NovaBase:
    type: ./nova-base-puppet.yaml
    properties:
      ServiceData: {get_param: ServiceData}
      ServiceNetMap: {get_param: ServiceNetMap}
      DefaultPasswords: {get_param: DefaultPasswords}
      EndpointMap: {get_param: EndpointMap}
      RoleName: {get_param: RoleName}
      RoleParameters: {get_param: RoleParameters}

  # Merging role-specific parameters (RoleParameters) with the default parameters.
  # RoleParameters will have the precedence over the default parameters.
  RoleParametersValue:
    type: OS::Heat::Value
    properties:
      type: json
      value:
        map_replace:
          - map_replace:
            - nova::compute::vcpu_pin_set: NovaVcpuPinSet
              nova::compute::cpu_shared_set: NovaComputeCpuSharedSet
              nova::compute::reserved_host_memory: NovaReservedHostMemory
              nova::compute::neutron_physnets_numa_nodes_mapping: NeutronPhysnetNUMANodesMapping
              nova::compute::neutron_tunnel_numa_nodes: NeutronTunnelNUMANodes
              nova::compute::resume_guests_state_on_host_boot: NovaResumeGuestsStateOnHostBoot
              nova::compute::libvirt::rx_queue_size: NovaLibvirtRxQueueSize
              nova::compute::libvirt::tx_queue_size: NovaLibvirtTxQueueSize
              nova::compute::libvirt::file_backed_memory: NovaLibvirtFileBackedMemory
              nova::compute::libvirt::volume_use_multipath: NovaLibvirtVolumeUseMultipath
              nova::compute::libvirt::libvirt_hw_machine_type: NovaHWMachineType
              compute_enable_ksm: NovaComputeEnableKsm
              nova::compute::rbd::libvirt_images_rbd_pool: NovaRbdPoolName
            - values: {get_param: [RoleParameters]}
          - values:
              NovaVcpuPinSet: {get_param: NovaVcpuPinSet}
              NovaComputeCpuSharedSet: {get_param: NovaComputeCpuSharedSet}
              NovaReservedHostMemory: {get_param: NovaReservedHostMemory}
              NeutronPhysnetNUMANodesMapping: {get_param: NeutronPhysnetNUMANodesMapping}
              NeutronTunnelNUMANodes: {get_param: NeutronTunnelNUMANodes}
              NovaResumeGuestsStateOnHostBoot: {get_param: NovaResumeGuestsStateOnHostBoot}
              NovaLibvirtRxQueueSize: {get_param: NovaLibvirtRxQueueSize}
              NovaLibvirtTxQueueSize: {get_param: NovaLibvirtTxQueueSize}
              NovaLibvirtFileBackedMemory: {get_param: NovaLibvirtFileBackedMemory}
              NovaLibvirtVolumeUseMultipath: {get_param: NovaLibvirtVolumeUseMultipath}
              NovaHWMachineType: {get_param: NovaHWMachineType}
              NovaComputeEnableKsm: {get_param: NovaComputeEnableKsm}
              NovaRbdPoolName: {get_param: NovaRbdPoolName}

conditions:
  enable_instance_ha: {equals: [{get_param: EnableInstanceHA}, true]}

  enable_live_migration_tunnelled:
    or:
      - equals: [{get_param: NovaNfsEnabled}, true]
      - equals: [{get_param: NovaEnableRbdBackend}, true]

  libvirt_file_backed_memory_enabled:
    not:
      or:
      -  equals: [{get_param: NovaLibvirtFileBackedMemory}, '']
      -  equals: [{get_param: [RoleParameters, NovaLibvirtFileBackedMemory]}, '']
      -  equals: [{get_param: NovaLibvirtFileBackedMemory}, 0]
      -  equals: [{get_param: [RoleParameters, NovaLibvirtFileBackedMemory]}, 0]

  is_not_additional_cell: {equals: [{get_param: NovaAdditionalCell}, false]}

outputs:
  role_data:
    description: Role data for the Nova Compute service.
    value:
      service_name: nova_compute
      monitoring_subscription: {get_param: MonitoringSubscriptionNovaCompute}
      config_settings:
        map_merge:
          - get_attr: [NovaLogging, config_settings]
          - get_attr: [NovaBase, role_data, config_settings]
          - get_attr: [RoleParametersValue, value]
          - nova::compute::libvirt::manage_libvirt_services: false
            nova::compute::pci::passthrough:
              str_replace:
                template: "JSON_PARAM"
                params:
                  map_replace:
                    - map_replace:
                      - JSON_PARAM: NovaPCIPassthrough
                      - values: {get_param: [RoleParameters]}
                    - values:
                        NovaPCIPassthrough: {get_param: NovaPCIPassthrough}
            # we manage migration in nova common puppet profile
            nova::compute::libvirt::migration_support: false
            tripleo::profile::base::nova::migration::client::nova_compute_enabled: true
            tripleo::profile::base::nova::migration::client::ssh_private_key: {get_param: [ MigrationSshKey, private_key ]}
            tripleo::profile::base::nova::migration::client::ssh_port: {get_param: MigrationSshPort}
            nova::compute::rbd::libvirt_images_rbd_ceph_conf:
              list_join:
              - ''
              - - '/etc/ceph/'
                - {get_param: CephClusterName}
                - '.conf'
            nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
            nova::compute::rbd::rbd_keyring:
              list_join:
              - '.'
              - - 'client'
                - {get_param: CephClientUserName}
            tripleo::profile::base::nova::compute::cinder_nfs_backend: {get_param: CinderEnableNfsBackend}
            tripleo::profile::base::nova::compute::nova_nfs_enabled: {get_param: NovaNfsEnabled}
            rbd_persistent_storage: {get_param: CinderEnableRbdBackend}
            nova::compute::rbd::libvirt_rbd_secret_key: {get_param: CephClientKey}
            nova::compute::rbd::libvirt_rbd_secret_uuid: {get_param: CephClusterFSID}
            nova::compute::instance_usage_audit: true
            nova::compute::instance_usage_audit_period: 'hour'
            nova::compute::consecutive_build_service_disable_threshold: {get_param: NovaAutoDisabling}
            nova::compute::rbd::ephemeral_storage: {get_param: NovaEnableRbdBackend}
            # TUNNELLED mode provides a security improvement for migration, but
            # can't be used in combination with block migration. So we only enable it
            # when shared storage is available (Ceph RDB is currently the only option).
            # See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12
            # In future versions of QEMU (2.6, mostly), danpb's native
            # encryption work will obsolete the need to use TUNNELLED transport
            # mode.
            nova::migration::live_migration_tunnelled:
              if:
              - enable_live_migration_tunnelled
              - true
              - false
            nova::compute::neutron::libvirt_vif_driver: {get_param: NovaComputeLibvirtVifDriver}
            # NOTE: bind IP is found in hiera replacing the network name with the
            # local node IP for the given network; replacement examples
            # (eg. for internal_api):
            # internal_api -> IP
            # internal_api_uri -> [IP]
            # internal_api_subnet - > IP/CIDR
            nova::compute::vncserver_proxyclient_address:
              str_replace:
                template:
                  "%{hiera('$NETWORK')}"
                params:
                  $NETWORK: {get_param: [ServiceNetMap, NovaVncProxyNetwork]}
            nova::compute::vncproxy_host: {get_param: [EndpointMap, NovaPublic, host_nobrackets]}
            nova::vncproxy::common::vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyCellPublic, protocol]}
            nova::vncproxy::common::vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyCellPublic, host_nobrackets]}
            nova::vncproxy::common::vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyCellPublic, port]}
            nova::compute::verify_glance_signatures: {get_param: [VerifyGlanceSignatures]}
            # if libvirt_file_backed_memory_enabled we have to set ram_allocation_ratio to 1.0
            nova::ram_allocation_ratio:
              if:
              - libvirt_file_backed_memory_enabled
              - '1.0'
              - null
      service_config_settings:
        fluentd:
          tripleo_fluentd_groups_nova_compute:
            - nova
          tripleo_fluentd_sources_nova_compute:
            - {get_param: NovaComputeLoggingSource}
        collectd:
          tripleo.collectd.plugins.nova_compute:
            - virt
          collectd::plugin::virt::connection: 'qemu:///system'
      puppet_config:
        config_volume: nova_libvirt
        puppet_tags: nova_config,nova_paste_api_ini
        step_config:
          list_join:
            - "\n"
            - - # TODO(emilien): figure how to deal with libvirt profile.
                # We'll probably treat it like we do with Neutron plugins.
                # Until then, just include it in the default nova-compute role.
                include tripleo::profile::base::nova::compute::libvirt
              - {get_attr: [MySQLClient, role_data, step_config]}
        config_image: {get_param: DockerNovaLibvirtConfigImage}
      kolla_config:
        /var/lib/kolla/config_files/nova_compute.json:
          command:
            list_join:
            - ' '
            - - if:
                - enable_instance_ha
                - /var/lib/nova/instanceha/check-run-nova-compute
                - /usr/bin/nova-compute
              - get_attr: [NovaLogging, cmd_extra_args]
          config_files:
            - source: "/var/lib/kolla/config_files/src/*"
              dest: "/"
              merge: true
              preserve_properties: true
            - source: "/var/lib/kolla/config_files/src-iscsid/*"
              dest: "/etc/iscsi/"
              merge: true
              preserve_properties: true
            - source: "/var/lib/kolla/config_files/src-ceph/"
              dest: "/etc/ceph/"
              merge: true
              preserve_properties: true
          permissions:
            - path: /var/log/nova
              owner: nova:nova
              recurse: true
            - path:
                str_replace:
                  template: /etc/ceph/CLUSTER.client.USER.keyring
                  params:
                    CLUSTER: {get_param: CephClusterName}
                    USER: {get_param: CephClientUserName}
              owner: nova:nova
              perm: '0600'
      container_config_scripts:
        map_merge:
          - {get_attr: [ContainersCommon, container_config_scripts]}
          - {get_attr: [NovaComputeCommon, container_config_scripts]}
      docker_config:
        step_3:
          nova_statedir_owner:
            image: &nova_compute_image {get_param: DockerNovaComputeImage}
            net: none
            user: root
            privileged: false
            detach: false
            volumes:
              - /var/lib/nova:/var/lib/nova:shared,z
              - /var/lib/container-config-scripts/:/container-config-scripts/:z
            command: "/container-config-scripts/pyshim.sh /container-config-scripts/nova_statedir_ownership.py"
            environment:
              # NOTE: this should force this container to re-run on each
              # update (scale-out, etc.)
              - list_join:
                  - ''
                  - - 'TRIPLEO_DEPLOY_IDENTIFIER='
                    - {get_param: DeployIdentifier}
        step_4:
          nova_wait_for_placement_service:
            start_order: 2
            image: *nova_compute_image
            user: root
            net: host
            privileged: false
            detach: false
            volumes:
              - /var/lib/container-config-scripts/:/container-config-scripts/:z
              - /var/lib/config-data/puppet-generated/nova_libvirt/etc/nova:/etc/nova:ro
            command: "/container-config-scripts/pyshim.sh /container-config-scripts/nova_wait_for_placement_service.py"
          nova_compute:
            start_order: 3
            image: *nova_compute_image
            ulimit: {get_param: DockerNovaComputeUlimit}
            ipc: host
            net: host
            privileged: true
            user: nova
            restart: always
            healthcheck:
              test:
                list_join:
                  - ' '
                  - - '/openstack/healthcheck'
                    - yaql:
                        expression: str($.data.port)
                        data:
                          port: {get_attr: [NovaBase, role_data, config_settings, 'nova::rabbit_port']}
            volumes:
              list_concat:
                - {get_attr: [ContainersCommon, volumes]}
                - {get_attr: [NovaLogging, volumes]}
                - {get_param: NovaComputeOptVolumes}
                -
                  - /var/lib/kolla/config_files/nova_compute.json:/var/lib/kolla/config_files/config.json:ro
                  - /var/lib/config-data/puppet-generated/nova_libvirt/:/var/lib/kolla/config_files/src:ro
                  - /etc/iscsi:/var/lib/kolla/config_files/src-iscsid:ro
                  - /etc/ceph:/var/lib/kolla/config_files/src-ceph:ro
                  - /dev:/dev
                  - /lib/modules:/lib/modules:ro
                  - /run:/run
                  - /var/lib/iscsi:/var/lib/iscsi:z
                  - /var/lib/nova:/var/lib/nova:shared,z
                  - /var/lib/libvirt:/var/lib/libvirt:shared,z
                  - /sys/class/net:/sys/class/net
                  - /sys/bus/pci:/sys/bus/pci
            environment:
              list_concat:
                - {get_param: NovaComputeOptEnvVars}
                -
                  - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
        step_5:
          if:
            - is_not_additional_cell
            - nova_cellv2_discover_hosts:
                start_order: 0
                image: *nova_compute_image
                net: host
                detach: false
                volumes:
                  list_concat:
                    - {get_attr: [ContainersCommon, volumes]}
                    -
                      - /var/lib/config-data/nova_libvirt/etc/my.cnf.d/:/etc/my.cnf.d/:ro
                      - /var/lib/config-data/nova_libvirt/etc/nova/:/etc/nova/:ro
                      - /var/log/containers/nova:/var/log/nova
                      - /var/lib/container-config-scripts/:/container-config-scripts/
                user: root
                command: "/container-config-scripts/pyshim.sh /container-config-scripts/nova_cell_v2_discover_host.py"
            - {}
      host_prep_tasks:
        list_concat:
        - {get_attr: [NovaLogging, host_prep_tasks]}
        - - name: Mount Nova NFS Share
            vars:
              nfs_backend_enable: {get_param: NovaNfsEnabled}
              nfs_share: {get_param: NovaNfsShare}
              nfs_options: {get_param: NovaNfsOptions}
              nfs_vers: {get_param: NovaNfsVersion}
            mount: name=/var/lib/nova/instances src="{{nfs_share}}" fstype=nfs4 opts="_netdev,bg,{{nfs_options}},vers={{nfs_vers}},nfsvers={{nfs_vers}}" state=mounted
            when: nfs_backend_enable|bool
          - name: is Nova Resume Guests State On Host Boot enabled
            set_fact:
              resume_guests_state_on_host_boot_enabled: {get_param: NovaResumeGuestsStateOnHostBoot}
          - name: install libvirt-guests systemd unit file
            when: resume_guests_state_on_host_boot_enabled|bool
            block:
              - name: libvirt-guests unit to stop nova_api container before shutdown VMs
                copy:
                  dest: /etc/systemd/system/libvirt-guests.service
                  content: |
                    [Unit]
                    Description=Suspend/Resume Running libvirt Guests
                    Requires=virt-guest-shutdown.target
                    After=network.target
                    After=time-sync.target
                    After=virt-guest-shutdown.target
                    After=docker.service
                    After=paunch-container-shutdown.service
                    After=rhel-push-plugin.service
                    Documentation=man:libvirtd(8)
                    Documentation=https://libvirt.org

                    [Service]
                    EnvironmentFile=-/etc/sysconfig/libvirt-guests
                    # Hack just call traditional service until we factor
                    # out the code
                    ExecStart=/usr/libexec/libvirt-guests.sh start
                    ExecStop=/bin/{{container_cli}} stop nova_compute
                    ExecStop=/usr/libexec/libvirt-guests.sh stop
                    Type=oneshot
                    RemainAfterExit=yes
                    StandardOutput=journal+console
                    TimeoutStopSec=0

                    [Install]
                    WantedBy=multi-user.target
              - name: libvirt-guests enable VM shutdown on compute reboot/shutdown
                systemd:
                  name: libvirt-guests
                  enabled: yes
                  daemon_reload: yes
          - name: create persistent directories
            file:
              path: "{{ item.path }}"
              state: directory
              setype: "{{ item.setype }}"
            with_items:
              - { 'path': /var/lib/nova, 'setype': svirt_sandbox_file_t }
              - { 'path': /var/lib/nova/instances, 'setype': svirt_sandbox_file_t }
              - { 'path': /var/lib/libvirt, 'setype': svirt_sandbox_file_t }
          - name: ensure ceph configurations exist
            file:
              path: /etc/ceph
              state: directory
          - name: is Instance HA enabled
            set_fact:
              instance_ha_enabled: {get_param: EnableInstanceHA}
          - name: enable virt_sandbox_use_netlink for healthcheck
            seboolean:
              name: virt_sandbox_use_netlink
              persistent: yes
              state: yes
          - name: install Instance HA recovery script
            when: instance_ha_enabled|bool
            block:
            - name: prepare Instance HA script directory
              file:
                path: /var/lib/nova/instanceha
                state: directory
            - name: install Instance HA script that runs nova-compute
              copy:
                content: {get_file: ../../extraconfig/tasks/instanceha/check-run-nova-compute}
                dest: /var/lib/nova/instanceha/check-run-nova-compute
                mode: 0755
            - name: Get list of instance HA compute nodes
              command: hiera -c /etc/puppet/hiera.yaml compute_instanceha_short_node_names
              register: iha_nodes
            - name: If instance HA is enabled on the node activate the evacuation completed check
              file: path=/var/lib/nova/instanceha/enabled state=touch
              when: iha_nodes.stdout|lower | search('"'+ansible_hostname|lower+'"')
          - name: is KSM enabled
            set_fact:
              compute_ksm_enabled: {get_attr: [RoleParametersValue, value, compute_enable_ksm]}
          - name: disable KSM on compute
            when: not compute_ksm_enabled|bool
            block:
            - name: Populate service facts (ksm)
              service_facts: # needed to make yaml happy
            - name: disable KSM services
              service:
                name: "{{ item }}"
                state: stopped
                enabled: no
              with_items:
                - ksm.service
                - ksmtuned.service
              when: "'ksm.service' in ansible_facts.services"
              register: ksmdisabled
            # When KSM is disabled, any memory pages that were shared prior to
            # deactivating KSM are still shared. To delete all of the PageKSM
            # in the system, we use:
            - name: delete PageKSM after disable ksm on compute
              command: echo 2 >/sys/kernel/mm/ksm/run
              when: ksmdisabled.changed
          - name: enable KSM on compute
            when: compute_ksm_enabled|bool
            block:
            - name: Populate service facts (ksm)
              service_facts: # needed to make yaml happy
            # mschuppert: we can remove the CentOS/RHEL split here when CentOS8/
            # RHEL8 is available and we have the same package name providing the
            # KSM services
            - name: make sure package providing ksmtuned is installed (CentOS)
              package:
                name: qemu-kvm-common-ev
                state: present
              when: ansible_distribution == 'CentOS'
            - name: make sure package providing ksmtuned is installed (RHEL)
              package:
                name: qemu-kvm-common-rhev
                state: present
              when: ansible_distribution == 'RedHat'
            - name: enable ksmtunded
              service:
                name: "{{ item }}"
                state: started
                enabled: yes
              with_items:
                - ksm.service
                - ksmtuned.service
      upgrade_tasks:
        - name: Remove openstack-nova-compute and python-nova package during upgrade
          package:
            name:
              - openstack-nova-compute
              - python-nova
            state: removed
          ignore_errors: True
          when: step|int == 2
      update_tasks:
        - name: Remove openstack-nova-compute and python-nova package during upgrade
          package:
            name:
              - openstack-nova-compute
              - python-nova
            state: removed
          ignore_errors: True
          when: step|int == 2
      post_upgrade_tasks:
        - when: step|int == 1
          import_role:
            name: tripleo-docker-rm
          vars:
            containers_to_rm:
              - nova_compute
      fast_forward_upgrade_tasks:
        - when:
            - step|int == 0
            - release == 'ocata'
          block:
            - name: Check if nova-compute is deployed
              command:  systemctl is-enabled --quiet openstack-nova-compute
              ignore_errors: True
              register: nova_compute_enabled_result
            - name: Set fact nova_compute_enabled
              set_fact:
                nova_compute_enabled: "{{ nova_compute_enabled_result.rc == 0 }}"
        - when:
            - step|int == 1
            - release == 'ocata'
          block:
            - name: Stop and disable nova-compute service
              service: name=openstack-nova-compute state=stopped
              when:
                - nova_compute_enabled|bool
            - name: Set upgrade marker in nova statedir
              file: path=/var/lib/nova/upgrade_marker state=touch owner=nova group=nova
              when:
                - nova_compute_enabled|bool