fuel-astute/examples/example_new.yaml

441 lines
14 KiB
YAML

#Simple node declaration. Includes YAML reference referred in `nodes` section
node_01: &node_01
# == role
# Specifies role of the node
# [primary-controller|controller|storage|swift-proxy|primary-swift-proxy]
# Default: unspecified
role: primary-controller
# == network_data
# Array of network interfaces hashes
# === name: scalar or array of one or more of [management|fixed|public|storage|admin(**deprecated)|floating(**deprecated)]
# === ip: IP address to be configured by puppet on this interface
# === dev: interface device name
# === netmask: network mask for the interface
# === vlan: vlan ID for the interface
# === gateway: IP address of gateway (**not used**)
network_data:
- name: public
ip: 10.20.0.94
dev: eth0
netmask: 255.255.255.0
gateway: 10.20.0.1
- name:
- management
- storage
ip: 10.20.1.94
netmask: 255.255.255.0
dev: eth1
- name: fixed
dev: eth2
# == public_br
# Name of the public bridge for Quantum-enabled configuration
public_br: br-ex
# == internal_br
# Name of the internal bridge for Quantum-enabled configuration
internal_br: br-mgmt
# == id ** TO BE DOCUMENTED. Suspected: node id in mcollective server.cfg.
id: 1
# == default_gateway
# Default gateway for the node
default_gateway: 10.20.0.1
# == id ** TO BE DOCUMENTED
uid: 1
# == mac
# MAC address of the interface being used for network boot.
mac: 64:43:7B:CA:56:DD
# == name
# name of the system in cobbler
name: controller-01
# == ip
# IP issued by cobbler DHCP server to this node during network boot.
ip: 10.20.0.94
# == profile
# Cobbler profile for the node.
# Default: centos-x86_64
# [centos-x86_64|rhel-x86_64]
# CAUTION:
# rhel-x86_64 is created only after rpmcache class is run on master node
profile: centos-x86_64
# == fqdn
# Fully-qualified domain name of the node
fqdn: controller-01.domain.tld
# == power_type
# Cobbler power-type. Consult cobbler documentation for available options.
# Default: ssh
power_type: ssh
# == power_user
# Username for cobbler to manage power of this machine
# Default: unset
power_user: root
# == power_pass
# Password/credentials for cobbler to manage power of this machine
# Default: unset
power_pass: /root/.ssh/bootstrap.rsa
# == power_address
# IP address of the device managing the node power state.
# Default: unset
power_address: 10.20.0.94
# == netboot_enabled
# Disable/enable netboot for this node.
netboot_enabled: '1'
# == name_servers
# DNS name servers for this node during provisioning phase.
name_servers: ! '"10.20.0.2"'
# == puppet_master
# Hostname or IP address of puppet master node
puppet_master: fuel.domain.tld
# == ks_meta
# Kickstart metadata used during provisioning
ks_meta:
# == ks_spaces
# Kickstart data for disk partitioning
# The simplest way to calculate is to use REST call to nailgun api,
# recalculate disk size into MiB and dump the following config. Workflow is as follows:
# GET request to http://<fuel-master-node>:8000/api/nodes
# Parse JSON and derive disk data from meta['disks']. Set explicitly which disk is system and which is for cinder.
# $system_disk_size=floor($system_disk_meta['disks']['size']/1048756)
# $system_disk_path=$system_disk_meta['disks']['disk']
# $cinder_disk_size=floor($cinder_disk_meta['disks']['size']/1048756)
#
# $cinder_disk_path=$cinder_disk_meta['disks']['disk']
#
# All further calculations are made in MiB
# Calculation of system partitions
#
# For each node:
# calculate size of physical volume for operating system:
# $pv_size = $system_disk_size - 200 - 1
# declare $swap_size
# calculate size of root partition:
# $free_vg_size = $pv_size - $swap_size
# $free_extents = floor($free_vg_size/32)
# $system_disk_size = 32 * $free_extents
# ks_spaces: '"[
#{\"type\": \"disk\", \"id\": \"$system_disk_path\",
#\"volumes\":
#[
# {\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200},
# {\"type\": \"mbr\"},
# {\"size\": $pv_size, \"type\": \"pv\", \"vg\": \"os\"}
#],
#\"size\": $system_disk_size
#},
#{\"type\": \"vg\", \"id\": \"os\", \"volumes\":
#[
# {\"mount\": \"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": $system_disk_size },
# {\"mount\": \"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": $swap_size}
#]
#},
#{\"type\": \"disk\", \"id\": \"$path_to_cinder_disk\",
#\"volumes\":
#[
# {\"type\": \"mbr\"},
# {\"size\": $cinder_disk_size, \"type\": \"pv\", \"vg\": \"cinder-volumes\"}
#],
#\"size\": $cinder_disk_size
#}
#]"'
ks_spaces: '"[{\"type\": \"disk\", \"id\": \"disk/by-path/pci-0000:00:06.0-virtio-pci-virtio3\",
\"volumes\": [{\"mount\": \"/boot\", \"type\": \"partition\", \"size\": 200},
{\"type\": \"mbr\"}, {\"size\": 20000, \"type\": \"pv\", \"vg\": \"os\"}],
\"size\": 20480}, {\"type\": \"vg\", \"id\": \"os\", \"volumes\": [{\"mount\":
\"/\", \"type\": \"lv\", \"name\": \"root\", \"size\": 10240 }, {\"mount\":
\"swap\", \"type\": \"lv\", \"name\": \"swap\", \"size\": 2048}]}]"'
# == mco_enable
# If mcollective should be installed and enabled on the node
mco_enable: 1
# == mco_vhost
# Mcollective AMQP virtual host
mco_vhost: mcollective
# == mco_pskey
# **NOT USED**
mco_pskey: unset
# == mco_user
# Mcollective AMQP user
mco_user: mcollective
# == puppet_enable
# should puppet agent start on boot
# Default: 0
puppet_enable: 0
# == install_log_2_syslog
# Enable/disable on boot remote logging
# Default: 1
install_log_2_syslog: 1
# == mco_password
# Mcollective AMQP password
mco_password: marionette
# == puppet_auto_setup
# Whether to install puppet during provisioning
# Default: 1
puppet_auto_setup: 1
# == puppet_master
# hostname or IP of puppet master server
puppet_master: fuel.domain.tld
# == puppet_auto_setup
# Whether to install mcollective during provisioning
# Default: 1
mco_auto_setup: 1
# == auth_key
# Public RSA key to be added to cobbler authorized keys
auth_key: ! '""'
# == puppet_version
# Which puppet version to install on the node
puppet_version: 2.7.19
# == mco_connector
# Mcollective AMQP driver.
# Default: rabbitmq
mco_connector: rabbitmq
# == mco_host
# AMQP host to which Mcollective agent should connect
mco_host: 10.20.0.2
# == interfaces
# Hash of interfaces configured during provision state
interfaces:
eth0:
ip_address: 10.20.0.94
netmask: 255.255.255.0
dns_name: controller-01.domain.tld
static: '1'
mac_address: 64:43:7B:CA:56:DD
# == interfaces_extra
# extra interfaces information
interfaces_extra:
eth2:
onboot: 'no'
peerdns: 'no'
eth1:
onboot: 'no'
peerdns: 'no'
eth0:
onboot: 'yes'
peerdns: 'no'
# == meta
# Outdated stuff needed for log parsing during astute jobs.
meta:
memory:
total: 778694656
interfaces:
- mac: 64:D8:E1:F6:66:43
max_speed: 100
name: eth2
ip: 10.22.0.94
netmask: 255.255.255.0
current_speed: 100
- mac: 64:C8:E2:3B:FD:6E
max_speed: 100
name: eth1
ip: 10.21.0.94
netmask: 255.255.255.0
current_speed: 100
- name: eth0
ip: 10.20.0.94
netmask: 255.255.255.0
mac: 64:43:7B:CA:56:DD
max_speed: 100
current_speed: 100
disks:
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-2:0:0:0
name: sdc
size: 2411724800000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-1:0:0:0
name: sdb
size: 536870912000
- model: VBOX HARDDISK
disk: disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0
name: sda
size: 17179869184
system:
serial: '0'
version: '1.2'
fqdn: bootstrap
family: Virtual Machine
manufacturer: VirtualBox
cpu:
real: 0
total: 1
spec:
- model: Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz
frequency: 2397
error_type:
#Nodes array. Includes references to corresponding nodes' sections.
nodes:
- <<: *node_01
#Openstack cluster attributes used during deployment.
attributes:
# == master_ip
# IP of puppet master.
master_ip: 10.20.0.2
# == use_cow_images:
# Whether to use cow images
use_cow_images: true
# == libvirt_type
# Nova libvirt hypervisor type
# Values: qemu|kvm
# Default: kvm
libvirt_type: qemu
# == dns_nameservers
# array of DNS servers configured during deployment phase.
dns_nameservers:
- 10.20.0.1
# This parameter specifies the verbosity level of log messages
# in openstack components config.
# Debug would have set DEBUG level and ignore verbose settings, if any.
# Verbose would have set INFO level messages
# In case of non debug and non verbose - WARNING, default level would have set.
# Note: if syslog on, this default level may be configured (for syslog) with syslog_log_level option.
# == verbose
# whether to enable verbosity
# Default: true
verbose: true
# == debug
# whether to enable debug
# Default: false
debug: true
# == auto_assign_floating_ip
# Whether to assign floating IPs automatically
auto_assign_floating_ip: true
# == start_guests_on_host_boot
# Default: true
start_guests_on_host_boot: true
# == create_networks
# whether to create fixed or floating networks
create_networks: true
# == compute_scheduler_driver
# Nova scheduler driver class
compute_scheduler_driver: nova.scheduler.multi.MultiScheduler
# == quantum
# Whether quantum is enabled
# Default: true
quantum: true
# == master_hostname
# Which controller node to treat as master node. Used only certainty during deployment.
master_hostname: controller-01
# == nagios
# Whether to enable nagios clients on the nodes
nagios: false
# == proj_name
# name of nagios project
proj_name: test
# == nagios_master
# nagios master server name
nagios_master: fuelweb.domain.tld
# == management_vip
# Virtual IP address for internal services (MySQL, AMQP, internal OpenStack endpoints)
management_vip: 10.20.1.200
# == public_vip
# Virtual IP address for public services (Horizon, public OpenStack endpoints)
public_vip: 10.20.0.200
#Nova-network part, gets ignored if $quantum = `false`
novanetwork_parameters:
vlan_start: <1-1024>
# == network_manager
# Which nova-network manager to use
network_manager: String
# == network_size
# which network size to use during fixed network range segmentation
network_size: <Integer>
#Quantum part, used only if quantum='true'
quantum_parameters:
# == tenant_network_type
# Which type of network segmentation to use.
# Values: gre|vlan
tenant_network_type: gre
# == segment_range
# Range of IDs for network segmentation. Consult Quantum documentation.
# Values: gre|vlan
segment_range: ! '300:500'
# == metadata_proxy_shared_secret
# Shared secret for metadata proxy services
# Values: gre|vlan
metadata_proxy_shared_secret: quantum
# Below go credentials and access parameters for main OpenStack components
mysql:
root_password: root
glance:
db_password: glance
user_password: glance
swift:
user_password: swift_pass
nova:
db_password: nova
user_password: nova
access:
password: admin
user: admin
tenant: admin
email: admin@example.org
keystone:
db_password: keystone
admin_token: nova
quantum_access:
user_password: quantum
db_password: quantum
rabbit:
password: nova
user: nova
cinder:
password: cinder
user: cinder
# == floating_network_range
# CIDR (for quantum == true) or array if IPs (for quantum == false)
# Used for creation of floating networks/IPs during deployment
floating_network_range: 10.20.0.150/26
# == fixed_network_range
# CIDR for fixed network created during deployment.
fixed_network_range: 10.20.2.0/24
# == base_syslog
# Main syslog server configuration.
base_syslog:
syslog_port: '514'
syslog_server: 10.20.0.2
# == syslog
# Additional syslog servers configuration.
syslog:
syslog_port: '514'
syslog_transport: udp
syslog_server: ''
# == use_unicast_corosync
# which communaction protocol to use for corosync
use_unicast_corosync: false
# == horizon_use_ssl
# Dashboard(horizon) https/ssl mode
# false: normal mode with no encryption
# 'default': uses keys supplied with the ssl module package
# 'exist': assumes that the keys (domain name based certificate) are provisioned in advance
# 'custom': require fileserver static mount point [ssl_certs] and hostname based certificate existence
horizon_use_ssl: false
# == cinder_nodes
# Which nodes to use as cinder-volume backends
# Array of values 'all'|<hostname>|<internal IP address of node>|'controller'|<node_role>
cinder_nodes:
- controller
# == ntp_servers
# List of ntp servers
ntp_servers:
- pool.ntp.org
# == deployment_id
# Id if deployment used do differentiate environments
deployment_id: 1
# == deployment_mode
# [ha|ha_full|multinode|single|ha_minimal]
deployment_mode: ha
# == deployment_source
# [web|cli]
deployment_source: cli
# == deployment_engine
# [simplepuppet(**deprecated**)|nailyfact]
# Default: nailyfact
deployment_engine: nailyfact
#Cobbler engine parameters
engine:
url: http://localhost/cobbler_api
username: cobbler
password: cobbler