Merge pull request #73 from markgoddard/add-compute-group
Add support for virtualised compute
This commit is contained in:
commit
c3db775203
README.rst
ansible
compute-node-discovery.ymlcompute-node-provide.ymldell-compute-node-boot-mode.ymldell-compute-node-discovery.ymldell-compute-node-inventory.yml
group_vars
kolla-openstack.ymlovercloud-introspection-rules-dell-lldp-workaround.ymlovercloud-introspection-rules.ymlovercloud-ipa-build.ymlovercloud-ipa-images.ymlprovision-net.ymlroles/kolla-ansible/templates
doc/source
etc/kayobe
@ -32,6 +32,8 @@ Features
|
||||
* Discovery, introspection and provisioning of bare metal compute hosts
|
||||
using `OpenStack ironic <https://docs.openstack.org/developer/ironic/>`_ and
|
||||
`ironic inspector <https://docs.openstack.org/developer/ironic-inspector/>`_
|
||||
* Virtualised compute using `OpenStack nova
|
||||
<https://docs.openstack.org/developer/nova/>`_
|
||||
* Containerised workloads on bare metal using `OpenStack magnum
|
||||
<https://docs.openstack.org/developer/magnum/>`_
|
||||
* Big data on bare metal using `OpenStack sahara
|
||||
@ -41,5 +43,3 @@ In the near future we aim to add support for the following:
|
||||
|
||||
* Control plane and workload monitoring and log aggregation using `OpenStack
|
||||
monasca <https://wiki.openstack.org/wiki/Monasca>`_
|
||||
* Virtualised compute using `OpenStack nova
|
||||
<https://docs.openstack.org/developer/nova/>`_
|
||||
|
@ -1,8 +1,8 @@
|
||||
---
|
||||
- include: dell-compute-node-inventory.yml
|
||||
|
||||
- name: Ensure compute nodes are PXE booted
|
||||
hosts: compute
|
||||
- name: Ensure baremetal compute nodes are PXE booted
|
||||
hosts: baremetal-compute
|
||||
gather_facts: no
|
||||
vars:
|
||||
controller_host: "{{ groups['controllers'][0] }}"
|
||||
@ -19,7 +19,7 @@
|
||||
# be respected when using delegate_to.
|
||||
ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}"
|
||||
|
||||
- name: Ensure compute nodes are powered off
|
||||
- name: Ensure baremetal compute nodes are powered off
|
||||
command: ipmitool -U {{ ipmi_username }} -P {{ ipmi_password }} -H {{ ipmi_address }} -I lanplus chassis power off
|
||||
delegate_to: "{{ controller_host }}"
|
||||
vars:
|
||||
@ -31,7 +31,7 @@
|
||||
pause:
|
||||
seconds: 5
|
||||
|
||||
- name: Ensure compute nodes are set to boot via PXE
|
||||
- name: Ensure baremetal compute nodes are set to boot via PXE
|
||||
command: ipmitool -U {{ ipmi_username }} -P {{ ipmi_password }} -H {{ ipmi_address }} -I lanplus chassis bootdev pxe
|
||||
delegate_to: "{{ controller_host }}"
|
||||
vars:
|
||||
@ -43,7 +43,7 @@
|
||||
pause:
|
||||
seconds: 5
|
||||
|
||||
- name: Ensure compute nodes are powered on
|
||||
- name: Ensure baremetal compute nodes are powered on
|
||||
command: ipmitool -U {{ ipmi_username }} -P {{ ipmi_password }} -H {{ ipmi_address }} -I lanplus chassis power on
|
||||
delegate_to: "{{ controller_host }}"
|
||||
vars:
|
||||
|
@ -1,14 +1,14 @@
|
||||
---
|
||||
# This playbook will ensure that all compute nodes in the overcloud ironic
|
||||
# inventory are available. Supported initial states include 'enroll' and
|
||||
# This playbook will ensure that all baremetal compute nodes in the overcloud
|
||||
# ironic inventory are available. Supported initial states include 'enroll' and
|
||||
# 'manageable'.
|
||||
|
||||
- name: Ensure compute nodes are available in ironic
|
||||
- name: Ensure baremetal compute nodes are available in ironic
|
||||
hosts: controllers[0]
|
||||
vars:
|
||||
venv: "{{ virtualenv_path }}/shade"
|
||||
# Set this to a colon-separated list of compute node hostnames to provide.
|
||||
# If unset, all compute nodes will be provided.
|
||||
# Set this to a colon-separated list of baremetal compute node hostnames to
|
||||
# provide. If unset, all baremetal compute nodes will be provided.
|
||||
compute_node_limit: ""
|
||||
compute_node_limit_list: "{{ compute_node_limit.split(':') }}"
|
||||
roles:
|
||||
@ -74,7 +74,7 @@
|
||||
- name: Fail if any ironic nodes are not available
|
||||
fail:
|
||||
msg: >
|
||||
Failed to make compute node {{ item['Name'] }} available in ironic.
|
||||
Failed to make baremetal compute node {{ item['Name'] }} available in ironic.
|
||||
Provisioning state is {{ item['Provisioning State'] }}.
|
||||
with_items: "{{ ironic_nodes }}"
|
||||
when: item['Provisioning State'] != 'available'
|
||||
|
@ -1,11 +1,11 @@
|
||||
---
|
||||
# Set the boot mode (BIOS, UEFI) of Dell compute nodes.
|
||||
# Set the boot mode (BIOS, UEFI) of Dell baremetal compute nodes.
|
||||
|
||||
# Add compute nodes to the Ansible inventory.
|
||||
# Add Dell baremetal compute nodes to the Ansible inventory.
|
||||
- include: dell-compute-node-boot-mode.yml
|
||||
|
||||
- name: Ensure compute nodes boot mode is set
|
||||
hosts: compute
|
||||
- name: Ensure Dell baremetal compute nodes boot mode is set
|
||||
hosts: baremetal-compute
|
||||
gather_facts: no
|
||||
vars:
|
||||
# Set this to the required boot mode. One of 'bios' or 'uefi'.
|
||||
|
@ -1,11 +1,11 @@
|
||||
---
|
||||
# Configure the compute nodes to PXE boot.
|
||||
# Configure the Dell baremetal compute nodes to PXE boot.
|
||||
|
||||
# Add compute nodes to the Ansible inventory.
|
||||
# Add Dell baremetal compute nodes to the Ansible inventory.
|
||||
- include: dell-compute-node-inventory.yml
|
||||
|
||||
- name: Ensure compute nodes are PXE booted
|
||||
hosts: compute
|
||||
- name: Ensure Dell baremetal compute nodes are PXE booted
|
||||
hosts: baremetal-compute
|
||||
gather_facts: no
|
||||
vars:
|
||||
# Set this to the index of the inteface on which to enable PXE.
|
||||
|
@ -1,17 +1,18 @@
|
||||
---
|
||||
- name: Ensure compute nodes are present in the Ansible inventory
|
||||
- name: Ensure Dell baremetal compute nodes are present in the Ansible inventory
|
||||
hosts: config-mgmt
|
||||
gather_facts: no
|
||||
vars:
|
||||
# Set this to a colon-separated list of compute node hostnames on which to
|
||||
# trigger discovery. If unset, all compute nodes will be triggered.
|
||||
# Set this to a colon-separated list of baremeal compute node hostnames on
|
||||
# which to trigger discovery. If unset, all compute nodes will be
|
||||
# triggered.
|
||||
compute_node_limit: ""
|
||||
compute_node_limit_list: "{{ compute_node_limit.split(':') }}"
|
||||
tasks:
|
||||
- name: Add hosts for the compute nodes
|
||||
- name: Add hosts for the Dell baremetal compute nodes
|
||||
add_host:
|
||||
name: "{{ item.key }}"
|
||||
groups: compute
|
||||
groups: baremetal-compute
|
||||
# SSH configuration to access the BMC.
|
||||
ansible_host: "{{ item.value }}"
|
||||
ansible_user: "{{ ipmi_username }}"
|
||||
@ -24,8 +25,8 @@
|
||||
- not compute_node_limit or item.key | replace('-idrac', '') in compute_node_limit_list
|
||||
run_once: True
|
||||
|
||||
- name: Ensure compute nodes are present in the Ansible inventory
|
||||
hosts: compute
|
||||
- name: Ensure Dell baremetal compute nodes are present in the Ansible inventory
|
||||
hosts: baremetal-compute
|
||||
gather_facts: no
|
||||
vars:
|
||||
compute_node_limit: ""
|
||||
@ -33,7 +34,9 @@
|
||||
tasks:
|
||||
- name: Set facts for the compute nodes for IPMI addresses
|
||||
set_fact:
|
||||
bmc_type: idrac
|
||||
ipmi_address: "{{ idrac_network_ips[inventory_hostname] }}"
|
||||
# Don't add hosts that already exist.
|
||||
when: not compute_node_limit or inventory_hostname in compute_node_limit_list
|
||||
when:
|
||||
- not ipmi_address
|
||||
- not compute_node_limit or inventory_hostname in compute_node_limit_list
|
||||
run_once: True
|
||||
|
115
ansible/group_vars/all/compute
Normal file
115
ansible/group_vars/all/compute
Normal file
@ -0,0 +1,115 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Compute node configuration.
|
||||
|
||||
# User with which to access the computes via SSH during bootstrap, in order
|
||||
# to setup the Kayobe user account.
|
||||
compute_bootstrap_user: "{{ lookup('env', 'USER') }}"
|
||||
|
||||
###############################################################################
|
||||
# Compute network interface configuration.
|
||||
|
||||
# List of networks to which compute nodes are attached.
|
||||
compute_network_interfaces: >
|
||||
{{ (compute_default_network_interfaces +
|
||||
compute_extra_network_interfaces) | unique | list }}
|
||||
|
||||
# List of default networks to which compute nodes are attached.
|
||||
compute_default_network_interfaces: >
|
||||
{{ [provision_oc_net_name,
|
||||
internal_net_name,
|
||||
storage_net_name] | unique | list }}
|
||||
|
||||
# List of extra networks to which compute nodes are attached.
|
||||
compute_extra_network_interfaces: []
|
||||
|
||||
###############################################################################
|
||||
# Compute node BIOS configuration.
|
||||
|
||||
# Dict of compute BIOS options. Format is same as that used by stackhpc.drac
|
||||
# role.
|
||||
compute_bios_config: "{{ compute_bios_config_default | combine(compute_bios_config_extra) }}"
|
||||
|
||||
# Dict of default compute BIOS options. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
compute_bios_config_default: {}
|
||||
|
||||
# Dict of additional compute BIOS options. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
compute_bios_config_extra: {}
|
||||
|
||||
###############################################################################
|
||||
# Compute node RAID configuration.
|
||||
|
||||
# List of compute RAID volumes. Format is same as that used by stackhpc.drac
|
||||
# role.
|
||||
compute_raid_config: "{{ compute_raid_config_default + compute_raid_config_extra }}"
|
||||
|
||||
# List of default compute RAID volumes. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
compute_raid_config_default: []
|
||||
|
||||
# List of additional compute RAID volumes. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
compute_raid_config_extra: []
|
||||
|
||||
###############################################################################
|
||||
# Compute node LVM configuration.
|
||||
|
||||
# List of compute volume groups. See mrlesmithjr.manage-lvm role for
|
||||
# format.
|
||||
compute_lvm_groups: "{{ compute_lvm_groups_default + compute_lvm_groups_extra }}"
|
||||
|
||||
# Default list of compute volume groups. See mrlesmithjr.manage-lvm role for
|
||||
# format.
|
||||
compute_lvm_groups_default:
|
||||
- "{{ compute_lvm_group_data }}"
|
||||
|
||||
# Additional list of compute volume groups. See mrlesmithjr.manage-lvm role
|
||||
# for format.
|
||||
compute_lvm_groups_extra: []
|
||||
|
||||
# Compute LVM volume group for data. See mrlesmithjr.manage-lvm role for
|
||||
# format.
|
||||
compute_lvm_group_data:
|
||||
vgname: data
|
||||
disks: "{{ compute_lvm_group_data_disks | join(',') }}"
|
||||
create: True
|
||||
lvnames: "{{ compute_lvm_group_data_lvs }}"
|
||||
|
||||
# List of disks for use by compute LVM data volume group. Default to an
|
||||
# invalid value to require configuration.
|
||||
compute_lvm_group_data_disks:
|
||||
- changeme
|
||||
|
||||
# List of LVM logical volumes for the data volume group.
|
||||
compute_lvm_group_data_lvs:
|
||||
- "{{ compute_lvm_group_data_lv_docker_volumes }}"
|
||||
|
||||
# Docker volumes LVM backing volume.
|
||||
compute_lvm_group_data_lv_docker_volumes:
|
||||
lvname: docker-volumes
|
||||
size: "{{ compute_lvm_group_data_lv_docker_volumes_size }}"
|
||||
create: True
|
||||
filesystem: "{{ compute_lvm_group_data_lv_docker_volumes_fs }}"
|
||||
mount: True
|
||||
mntp: /var/lib/docker/volumes
|
||||
|
||||
# Size of docker volumes LVM backing volume.
|
||||
compute_lvm_group_data_lv_docker_volumes_size: 75%VG
|
||||
|
||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||
compute_lvm_group_data_lv_docker_volumes_fs: ext4
|
||||
|
||||
###############################################################################
|
||||
# Compute node sysctl configuration.
|
||||
|
||||
# Dict of sysctl parameters to set.
|
||||
compute_sysctl_parameters: {}
|
||||
|
||||
###############################################################################
|
||||
# Compute node user configuration.
|
||||
|
||||
# List of users to create. This should be in a format accepted by the
|
||||
# singleplatform-eng.users role.
|
||||
compute_users: "{{ users_default }}"
|
@ -323,7 +323,7 @@ inspector_dell_switch_lldp_workaround_group:
|
||||
# data which may be useful in environments without Swift.
|
||||
|
||||
# Whether the inspection data store is enabled.
|
||||
inspector_store_enabled: "{{ not kolla_enable_swift | bool }}"
|
||||
inspector_store_enabled: "{{ kolla_enable_ironic | bool and not kolla_enable_swift | bool }}"
|
||||
|
||||
# Port on which the inspection data store should listen.
|
||||
inspector_store_port: 8080
|
||||
|
@ -227,6 +227,9 @@ kolla_overcloud_inventory_top_level_group_map:
|
||||
network:
|
||||
groups:
|
||||
- network
|
||||
compute:
|
||||
groups:
|
||||
- compute
|
||||
|
||||
# List of names of top level kolla-ansible groups. Any of these groups which
|
||||
# have no hosts mapped to them will be provided with an empty group definition.
|
||||
|
7
ansible/group_vars/compute/ansible-user
Normal file
7
ansible/group_vars/compute/ansible-user
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
# User with which to access the computes via SSH.
|
||||
ansible_user: "{{ kayobe_ansible_user }}"
|
||||
|
||||
# User with which to access the computes before the kayobe_ansible_user
|
||||
# account has been created.
|
||||
bootstrap_user: "{{ compute_bootstrap_user }}"
|
7
ansible/group_vars/compute/bios
Normal file
7
ansible/group_vars/compute/bios
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Compute node BIOS configuration.
|
||||
|
||||
# Dict of monitoring node BIOS options. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
bios_config: "{{ compute_bios_config }}"
|
6
ansible/group_vars/compute/lvm
Normal file
6
ansible/group_vars/compute/lvm
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Compute node LVM configuration.
|
||||
|
||||
# List of LVM volume groups.
|
||||
lvm_groups: "{{ compute_lvm_groups }}"
|
6
ansible/group_vars/compute/network
Normal file
6
ansible/group_vars/compute/network
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Network interface attachments.
|
||||
|
||||
# List of networks to which these nodes are attached.
|
||||
network_interfaces: "{{ compute_network_interfaces | unique | list }}"
|
7
ansible/group_vars/compute/raid
Normal file
7
ansible/group_vars/compute/raid
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Compute node RAID configuration.
|
||||
|
||||
# List of compute node RAID volumes. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
raid_config: "{{ compute_raid_config }}"
|
3
ansible/group_vars/compute/sysctl
Normal file
3
ansible/group_vars/compute/sysctl
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
# Dict of sysctl parameters to set.
|
||||
sysctl_parameters: "{{ compute_sysctl_parameters }}"
|
4
ansible/group_vars/compute/users
Normal file
4
ansible/group_vars/compute/users
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
# List of users to create. This should be in a format accepted by the
|
||||
# singleplatform-eng.users role.
|
||||
users: "{{ compute_users }}"
|
@ -1,6 +1,14 @@
|
||||
---
|
||||
|
||||
- name: Check whether Ironic is enabled
|
||||
hosts: controllers
|
||||
tasks:
|
||||
- name: Create controllers group with ironic enabled
|
||||
group_by:
|
||||
key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
|
||||
|
||||
- name: Ensure locally built Ironic Python Agent images are copied
|
||||
hosts: controllers[0]
|
||||
hosts: controllers_with_ironic_enabled_True
|
||||
vars:
|
||||
# These are the filenames generated by overcloud-ipa-build.yml.
|
||||
ipa_image_name: "ipa"
|
||||
@ -139,17 +147,19 @@
|
||||
}}
|
||||
with_items: "{{ kolla_neutron_ml2_generic_switch_hosts }}"
|
||||
|
||||
- name: Set facts containing IPA kernel and ramdisk URLs
|
||||
set_fact:
|
||||
kolla_inspector_ipa_kernel_upstream_url: "{{ inspector_ipa_kernel_upstream_url }}"
|
||||
kolla_inspector_ipa_ramdisk_upstream_url: "{{ inspector_ipa_ramdisk_upstream_url }}"
|
||||
when: not ipa_build_images | bool
|
||||
- block:
|
||||
- name: Set facts containing IPA kernel and ramdisk URLs
|
||||
set_fact:
|
||||
kolla_inspector_ipa_kernel_upstream_url: "{{ inspector_ipa_kernel_upstream_url }}"
|
||||
kolla_inspector_ipa_ramdisk_upstream_url: "{{ inspector_ipa_ramdisk_upstream_url }}"
|
||||
when: not ipa_build_images | bool
|
||||
|
||||
- name: Set facts containing IPA kernel and ramdisk paths
|
||||
set_fact:
|
||||
kolla_inspector_ipa_kernel_path: "{{ image_cache_path }}/{{ ipa_image_name }}/{{ ipa_images_kernel_name }}"
|
||||
kolla_inspector_ipa_ramdisk_path: "{{ image_cache_path }}/{{ ipa_image_name }}/{{ ipa_images_ramdisk_name }}"
|
||||
when: ipa_build_images | bool
|
||||
- name: Set facts containing IPA kernel and ramdisk paths
|
||||
set_fact:
|
||||
kolla_inspector_ipa_kernel_path: "{{ image_cache_path }}/{{ ipa_image_name }}/{{ ipa_images_kernel_name }}"
|
||||
kolla_inspector_ipa_ramdisk_path: "{{ image_cache_path }}/{{ ipa_image_name }}/{{ ipa_images_ramdisk_name }}"
|
||||
when: ipa_build_images | bool
|
||||
when: kolla_enable_ironic | bool
|
||||
tags:
|
||||
- config
|
||||
roles:
|
||||
|
@ -8,8 +8,16 @@
|
||||
# each ironic node that matches against the switch system and the relevant
|
||||
# interface name, then sets the node's name appropriately.
|
||||
|
||||
- name: Group controller hosts in systems requiring the workaround
|
||||
- name: Check whether Ironic is enabled
|
||||
hosts: controllers
|
||||
tasks:
|
||||
- name: Create controllers group with ironic enabled
|
||||
group_by:
|
||||
key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
|
||||
|
||||
- name: Group controller hosts in systems requiring the workaround
|
||||
hosts: controllers_with_ironic_enabled_True
|
||||
gather_facts: False
|
||||
tasks:
|
||||
- name: Group controller hosts in systems requiring the Dell switch LLDP workaround
|
||||
group_by:
|
||||
@ -18,6 +26,7 @@
|
||||
- name: Ensure introspection rules for Dell switch LLDP workarounds are registered in Ironic Inspector
|
||||
# Only required to run on a single host.
|
||||
hosts: controllers_require_workaround_True[0]
|
||||
gather_facts: False
|
||||
vars:
|
||||
all_switch_interfaces: []
|
||||
ironic_inspector_rules: []
|
||||
|
@ -1,7 +1,15 @@
|
||||
---
|
||||
- name: Check whether Ironic is enabled
|
||||
hosts: controllers
|
||||
tasks:
|
||||
- name: Create controllers group with ironic enabled
|
||||
group_by:
|
||||
key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
|
||||
|
||||
- name: Ensure introspection rules are registered in Ironic Inspector
|
||||
# Only required to run on a single host.
|
||||
hosts: controllers[0]
|
||||
hosts: controllers_with_ironic_enabled_True[0]
|
||||
gather_facts: False
|
||||
vars:
|
||||
venv: "{{ virtualenv_path }}/shade"
|
||||
pre_tasks:
|
||||
|
@ -4,8 +4,16 @@
|
||||
#
|
||||
# The images will be stored in {{ image_cache_path }}/{{ ipa_image_name }}.
|
||||
|
||||
- name: Check whether Ironic is enabled
|
||||
hosts: controllers
|
||||
tasks:
|
||||
- name: Create controllers group with ironic enabled
|
||||
group_by:
|
||||
key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
|
||||
|
||||
- name: Ensure Ironic Python Agent images are built and installed
|
||||
hosts: controllers[0]
|
||||
hosts: controllers_with_ironic_enabled_True[0]
|
||||
gather_facts: False
|
||||
vars:
|
||||
ipa_image_name: "ipa"
|
||||
tasks:
|
||||
|
@ -1,6 +1,14 @@
|
||||
---
|
||||
- name: Check whether Ironic is enabled
|
||||
hosts: controllers
|
||||
tasks:
|
||||
- name: Create controllers group with ironic enabled
|
||||
group_by:
|
||||
key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
|
||||
|
||||
- name: Ensure Ironic Python Agent (IPA) images are downloaded and registered
|
||||
hosts: controllers[0]
|
||||
hosts: controllers_with_ironic_enabled_True[0]
|
||||
gather_facts: False
|
||||
vars:
|
||||
# These are the filenames generated by overcloud-ipa-build.yml.
|
||||
ipa_image_name: "ipa"
|
||||
|
@ -1,7 +1,15 @@
|
||||
---
|
||||
- name: Check whether Ironic is enabled
|
||||
hosts: controllers
|
||||
tasks:
|
||||
- name: Create controllers group with ironic enabled
|
||||
group_by:
|
||||
key: "controllers_with_ironic_enabled_{{ kolla_enable_ironic }}"
|
||||
|
||||
- name: Ensure provisioning network and subnet are registered in neutron
|
||||
# Only required to run on a single host.
|
||||
hosts: controllers[0]
|
||||
hosts: controllers_with_ironic_enabled_True[0]
|
||||
gather_facts: False
|
||||
pre_tasks:
|
||||
- name: Validate OpenStack password authentication parameters
|
||||
fail:
|
||||
|
@ -15,7 +15,7 @@
|
||||
# Top level {{ group }} group.
|
||||
[{{ group }}]
|
||||
# These hostnames must be resolvable from your deployment host
|
||||
{% for host in groups[group] %}
|
||||
{% for host in groups.get(group, []) %}
|
||||
{% set host_hv=hostvars[host] %}
|
||||
{{ host }}{% for hv_name in kolla_overcloud_inventory_pass_through_host_vars %}{% if hv_name in host_hv %} {{ hv_name }}={{ host_hv[hv_name] }}{% endif %}{% endfor %}
|
||||
|
||||
|
@ -41,6 +41,9 @@ Network
|
||||
Monitoring
|
||||
Monitoring host run the control plane and workload monitoring services.
|
||||
Currently, kayobe does not deploy any services onto monitoring hosts.
|
||||
Virtualised compute hypervisors
|
||||
Virtualised compute hypervisors run the tenant Virtual Machines (VMs) and
|
||||
associated OpenStack services for compute, networking and storage.
|
||||
|
||||
Networks
|
||||
========
|
||||
|
10
doc/source/configuration/index.rst
Normal file
10
doc/source/configuration/index.rst
Normal file
@ -0,0 +1,10 @@
|
||||
===================
|
||||
Configuration Guide
|
||||
===================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
kayobe
|
||||
network
|
||||
kolla-ansible
|
@ -1,6 +1,6 @@
|
||||
=============
|
||||
Configuration
|
||||
=============
|
||||
====================
|
||||
Kayobe Configuration
|
||||
====================
|
||||
|
||||
This section covers configuration of Kayobe. As an Ansible-based project,
|
||||
Kayobe is for the most part configured using YAML files.
|
||||
|
@ -594,6 +594,20 @@ a list of names of additional networks to attach. Alternatively, the list may
|
||||
be completely overridden by setting ``monitoring_network_interfaces``. These
|
||||
variables are found in ``${KAYOBE_CONFIG_PATH}/monitoring.yml``.
|
||||
|
||||
Virtualised Compute Hosts
|
||||
-------------------------
|
||||
|
||||
By default, virtualised compute hosts are attached to the following networks:
|
||||
|
||||
* overcloud provisioning network
|
||||
* internal network
|
||||
* storage network
|
||||
|
||||
This list may be extended by setting ``compute_extra_network_interfaces`` to a
|
||||
list of names of additional networks to attach. Alternatively, the list may be
|
||||
completely overridden by setting ``compute_network_interfaces``. These
|
||||
variables are found in ``${KAYOBE_CONFIG_PATH}/compute.yml``.
|
||||
|
||||
Other Hosts
|
||||
-----------
|
||||
|
||||
|
@ -23,9 +23,7 @@ Documentation
|
||||
architecture
|
||||
installation
|
||||
usage
|
||||
configuration/kayobe
|
||||
configuration/network
|
||||
configuration/kolla-ansible
|
||||
configuration/index
|
||||
deployment
|
||||
upgrading
|
||||
administration
|
||||
|
@ -24,6 +24,8 @@ Features
|
||||
which hosts run the nova compute service for ironic. This may be used to
|
||||
avoid the experimental HA nova compute service for ironic, by specifying a
|
||||
single host.
|
||||
* Adds support for deployment of virtualised compute hosts. These hosts should
|
||||
be added to the ``[compute]`` group.
|
||||
|
||||
Upgrade Notes
|
||||
-------------
|
||||
|
101
etc/kayobe/compute.yml
Normal file
101
etc/kayobe/compute.yml
Normal file
@ -0,0 +1,101 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Compute node configuration.
|
||||
|
||||
# User with which to access the computes via SSH during bootstrap, in order
|
||||
# to setup the Kayobe user account.
|
||||
#compute_bootstrap_user:
|
||||
|
||||
###############################################################################
|
||||
# Network interface attachments.
|
||||
|
||||
# List of networks to which compute nodes are attached.
|
||||
#compute_network_interfaces:
|
||||
|
||||
# List of default networks to which compute nodes are attached.
|
||||
#compute_default_network_interfaces:
|
||||
|
||||
# List of extra networks to which compute nodes are attached.
|
||||
#compute_extra_network_interfaces:
|
||||
|
||||
###############################################################################
|
||||
# Compute node BIOS configuration.
|
||||
|
||||
# Dict of compute BIOS options. Format is same as that used by stackhpc.drac
|
||||
# role.
|
||||
#compute_bios_config:
|
||||
|
||||
# Dict of default compute BIOS options. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
#compute_bios_config_default:
|
||||
|
||||
# Dict of additional compute BIOS options. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
#compute_bios_config_extra:
|
||||
|
||||
###############################################################################
|
||||
# Compute node RAID configuration.
|
||||
|
||||
# List of compute RAID volumes. Format is same as that used by stackhpc.drac
|
||||
# role.
|
||||
#compute_raid_config:
|
||||
|
||||
# List of default compute RAID volumes. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
#compute_raid_config_default:
|
||||
|
||||
# List of additional compute RAID volumes. Format is same as that used by
|
||||
# stackhpc.drac role.
|
||||
#compute_raid_config_extra:
|
||||
|
||||
###############################################################################
|
||||
# Compute node LVM configuration.
|
||||
|
||||
# List of compute volume groups. See mrlesmithjr.manage-lvm role for
|
||||
# format.
|
||||
#compute_lvm_groups:
|
||||
|
||||
# Default list of compute volume groups. See mrlesmithjr.manage-lvm role for
|
||||
# format.
|
||||
#compute_lvm_groups_default:
|
||||
|
||||
# Additional list of compute volume groups. See mrlesmithjr.manage-lvm role
|
||||
# for format.
|
||||
#compute_lvm_groups_extra:
|
||||
|
||||
# Compute LVM volume group for data. See mrlesmithjr.manage-lvm role for
|
||||
# format.
|
||||
#compute_lvm_group_data:
|
||||
|
||||
# List of disks for use by compute LVM data volume group. Default to an
|
||||
# invalid value to require configuration.
|
||||
#compute_lvm_group_data_disks:
|
||||
|
||||
# List of LVM logical volumes for the data volume group.
|
||||
#compute_lvm_group_data_lvs:
|
||||
|
||||
# Docker volumes LVM backing volume.
|
||||
#compute_lvm_group_data_lv_docker_volumes:
|
||||
|
||||
# Size of docker volumes LVM backing volume.
|
||||
#compute_lvm_group_data_lv_docker_volumes_size:
|
||||
|
||||
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
|
||||
#compute_lvm_group_data_lv_docker_volumes_fs:
|
||||
|
||||
###############################################################################
|
||||
# Compute node sysctl configuration.
|
||||
|
||||
# Dict of sysctl parameters to set.
|
||||
#compute_sysctl_parameters:
|
||||
|
||||
###############################################################################
|
||||
# Compute node user configuration.
|
||||
|
||||
# List of users to create. This should be in a format accepted by the
|
||||
# singleplatform-eng.users role.
|
||||
#compute_users:
|
||||
|
||||
###############################################################################
|
||||
# Dummy variable to allow Ansible to accept this file.
|
||||
workaround_ansible_issue_8743: yes
|
27
etc/kayobe/inventory/group_vars/compute/network-interfaces
Normal file
27
etc/kayobe/inventory/group_vars/compute/network-interfaces
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
###############################################################################
|
||||
# Network interface definitions for the compute group.
|
||||
|
||||
# Overcloud provisioning network IP information.
|
||||
# provision_oc_net_interface:
|
||||
# provision_oc_net_bridge_ports:
|
||||
# provision_oc_net_bond_slaves:
|
||||
|
||||
# Internal network IP information.
|
||||
# internal_net_interface:
|
||||
# internal_net_bridge_ports:
|
||||
# internal_net_bond_slaves:
|
||||
|
||||
# External network IP information.
|
||||
# external_net_interface:
|
||||
# external_net_bridge_ports:
|
||||
# external_net_bond_slaves:
|
||||
|
||||
# Storage network IP information.
|
||||
# storage_net_interface:
|
||||
# storage_net_bridge_ports:
|
||||
# storage_net_bond_slaves:
|
||||
|
||||
###############################################################################
|
||||
# Dummy variable to allow Ansible to accept this file.
|
||||
workaround_ansible_issue_8743: yes
|
@ -15,10 +15,14 @@ controllers
|
||||
[monitoring]
|
||||
# Empty group to provide declaration of monitoring group.
|
||||
|
||||
[compute]
|
||||
# Empty group to provide declaration of compute group.
|
||||
|
||||
[overcloud:children]
|
||||
controllers
|
||||
network
|
||||
monitoring
|
||||
compute
|
||||
|
||||
[docker:children]
|
||||
# Hosts in this group will have Docker installed.
|
||||
@ -26,6 +30,13 @@ seed
|
||||
controllers
|
||||
network
|
||||
monitoring
|
||||
compute
|
||||
|
||||
###############################################################################
|
||||
# Baremetal compute node groups.
|
||||
|
||||
[baremetal-compute]
|
||||
# Empty group to provide declaration of baremetal-compute group.
|
||||
|
||||
###############################################################################
|
||||
# Networking groups.
|
||||
|
@ -18,6 +18,9 @@ localhost ansible_connection=local
|
||||
# Add controller nodes here if required. These hosts will provide the
|
||||
# OpenStack overcloud.
|
||||
|
||||
[baremetal-compute]
|
||||
# Add baremetal compute nodes here if required.
|
||||
|
||||
[mgmt-switches]
|
||||
# Add management network switches here if required.
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user