Browse Source

Synchronise configuration with kayobe 3.1.0

Mark Goddard 1 year ago
parent
commit
d3c1c01b6f

+ 10
- 0
etc/kayobe/cadvisor.yml View File

@@ -0,0 +1,10 @@
1
+---
2
+###############################################################################
3
+# cAdvisor configuration.
4
+
5
+# Whether cAdvisor is enabled.
6
+#cadvisor_enabled:
7
+
8
+###############################################################################
9
+# Dummy variable to allow Ansible to accept this file.
10
+workaround_ansible_issue_8743: yes

+ 10
- 0
etc/kayobe/compute.yml View File

@@ -83,6 +83,16 @@
83 83
 # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
84 84
 #compute_lvm_group_data_lv_docker_volumes_fs:
85 85
 
86
+###############################################################################
87
+# Compute node Ceph configuration.
88
+
89
+# List of Ceph disks.
90
+# The format is a list of dict like :
91
+# - { osd: "/dev/sdb", journal: "/dev/sdc" }
92
+# - { osd: "/dev/sdd" }
93
+# Journal variable is not mandatory.
94
+#compute_ceph_disks:
95
+
86 96
 ###############################################################################
87 97
 # Compute node sysctl configuration.
88 98
 

+ 10
- 0
etc/kayobe/controllers.yml View File

@@ -86,6 +86,16 @@
86 86
 # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
87 87
 #controller_lvm_group_data_lv_docker_volumes_fs:
88 88
 
89
+###############################################################################
90
+# Controller node Ceph configuration.
91
+
92
+# List of Ceph disks.
93
+# The format is a list of dict like :
94
+# - { osd: "/dev/sdb", journal: "/dev/sdc" }
95
+# - { osd: "/dev/sdd" }
96
+# Journal variable is not mandatory.
97
+#controller_ceph_disks:
98
+
89 99
 ###############################################################################
90 100
 # Controller node sysctl configuration.
91 101
 

+ 47
- 0
etc/kayobe/grafana.yml View File

@@ -0,0 +1,47 @@
1
+---
2
+###############################################################################
3
+# Grafana configuration.
4
+
5
+# Grafana local admin user name. If you are deploying Monasca Grafana this
6
+# should not conflict with an OpenStack user name.
7
+#grafana_local_admin_user_name:
8
+
9
+# Path to git repo containing Grafana dashboards. Eg.
10
+# https://github.com/stackhpc/grafana-reference-dashboards.git
11
+#grafana_monitoring_node_dashboard_repo:
12
+
13
+# Dashboard repo version. Optional, defaults to 'HEAD'.
14
+#grafana_monitoring_node_dashboard_repo_version:
15
+
16
+# Path to which Grafana dashboards will be cloned to a monitoring node
17
+#grafana_monitoring_node_dashboard_repo_checkout_path:
18
+
19
+# The path, relative to the grafana_monitoring_node_dashboard_repo_checkout_path
20
+# containing the dashboards. Eg. /prometheus/control_plane
21
+#grafana_monitoring_node_dashboard_repo_path:
22
+
23
+# The Grafana organisation for the control plane. Note that for Monasca
24
+# Grafana with domain support the format is:
25
+# organisation_name@openstack_domain
26
+#grafana_control_plane_organisation:
27
+
28
+# A dict of datasources to configure. See the stackhpc.grafana-conf role
29
+# for all supported datasources. Example:
30
+#
31
+# grafana_datasources:
32
+#   monasca_api:
33
+#     port: 8082
34
+#     host: monasca-api
35
+#   monasca_log_api:
36
+#     port: 5607
37
+#     host: monasca-log-api
38
+#   elasticsearch:
39
+#     port: 9200
40
+#     host: monasca-elasticsearch
41
+#     project_id: "some_id"
42
+#
43
+#grafana_datasources:
44
+
45
+###############################################################################
46
+# Dummy variable to allow Ansible to accept this file.
47
+workaround_ansible_issue_8743: yes

+ 0
- 1
etc/kayobe/inventory/hosts.example View File

@@ -1,7 +1,6 @@
1 1
 # Kayobe hosts inventory file. This file should be modified to define the hosts
2 2
 # and their top-level group membership.
3 3
 
4
-[config-mgmt]
5 4
 # This host acts as the configuration management control host. This must be
6 5
 # localhost.
7 6
 localhost ansible_connection=local

+ 7
- 0
etc/kayobe/ironic.yml View File

@@ -65,6 +65,13 @@
65 65
 # raid_interface field set.
66 66
 #kolla_ironic_default_raid_interface:
67 67
 
68
+# Specify the list of rescue interfaces to load during service initialization.
69
+#kolla_ironic_enabled_rescue_interfaces:
70
+
71
+# Default rescue interface to be used for nodes that do not have
72
+# rescue_interface field set.
73
+#kolla_ironic_default_rescue_interface:
74
+
68 75
 # Specify the list of storage interfaces to load during
69 76
 # service initialization.
70 77
 #kolla_ironic_enabled_storage_interfaces:

+ 5
- 0
etc/kayobe/kolla.yml View File

@@ -131,6 +131,10 @@
131 131
 ###############################################################################
132 132
 # Kolla-ansible configuration.
133 133
 
134
+# Virtualenv directory where Kolla-ansible's ansible modules will execute
135
+# remotely on the target nodes. If None, no virtualenv will be used.
136
+#kolla_ansible_target_venv:
137
+
134 138
 # Whether TLS is enabled for the external API endpoints.
135 139
 #kolla_enable_tls_external:
136 140
 
@@ -176,6 +180,7 @@
176 180
 #kolla_enable_manila_backend_generic:
177 181
 #kolla_enable_manila_backend_hnas:
178 182
 #kolla_enable_mistral:
183
+#kolla_enable_monasca:
179 184
 #kolla_enable_mongodb:
180 185
 #kolla_enable_multipathd:
181 186
 #kolla_enable_murano:

+ 15
- 0
etc/kayobe/overcloud.yml View File

@@ -13,6 +13,21 @@
13 13
 # should not be added to the inventory.
14 14
 #overcloud_group_hosts_map:
15 15
 
16
+# To prevent some network issues you can choose to disable cloud-init
17
+#disable_cloud_init:
18
+
19
+###############################################################################
20
+# Overcloud host image configuration.
21
+
22
+# The CentOS cloud images from 7.2 (1511) onwards have a bogus name server
23
+# entry in /etc/resolv.conf, 10.0.2.3. Cloud-init only appends name server
24
+# entries to this file, and will not remove this bogus entry. Typically this
25
+# leads to a delay of around 30 seconds when connecting via SSH, due to a
26
+# timeout in NSS. The workaround employed here is to remove this bogus entry
27
+# from the image using virt-customize, if it exists. See
28
+# https://bugs.centos.org/view.php?id=14369.
29
+#overcloud_host_image_workaround_resolv_enabled:
30
+
16 31
 ###############################################################################
17 32
 # Dummy variable to allow Ansible to accept this file.
18 33
 workaround_ansible_issue_8743: yes

+ 7
- 0
etc/kayobe/seed-hypervisor.yml View File

@@ -1,4 +1,11 @@
1 1
 ---
2
+###############################################################################
3
+# Seed hypervisor node configuration.
4
+
5
+# User with which to access the seed hypervisor via SSH during bootstrap, in
6
+# order to setup the Kayobe user account.
7
+#seed_hypervisor_bootstrap_user:
8
+
2 9
 ###############################################################################
3 10
 # Seed hypervisor network interface configuration.
4 11
 

+ 111
- 0
etc/kayobe/storage.yml View File

@@ -0,0 +1,111 @@
1
+---
2
+###############################################################################
3
+# Storage node configuration.
4
+
5
+# User with which to access the storages via SSH during bootstrap, in order
6
+# to setup the Kayobe user account.
7
+#storage_bootstrap_user:
8
+
9
+###############################################################################
10
+# Network interface attachments.
11
+
12
+# List of networks to which storage nodes are attached.
13
+#storage_network_interfaces:
14
+
15
+# List of default networks to which storage nodes are attached.
16
+#storage_default_network_interfaces:
17
+
18
+# List of extra networks to which storage nodes are attached.
19
+#storage_extra_network_interfaces:
20
+
21
+###############################################################################
22
+# Storage node BIOS configuration.
23
+
24
+# Dict of storage BIOS options. Format is same as that used by stackhpc.drac
25
+# role.
26
+#storage_bios_config:
27
+
28
+# Dict of default storage BIOS options. Format is same as that used by
29
+# stackhpc.drac role.
30
+#storage_bios_config_default:
31
+
32
+# Dict of additional storage BIOS options. Format is same as that used by
33
+# stackhpc.drac role.
34
+#storage_bios_config_extra:
35
+
36
+###############################################################################
37
+# Storage node RAID configuration.
38
+
39
+# List of storage RAID volumes. Format is same as that used by stackhpc.drac
40
+# role.
41
+#storage_raid_config:
42
+
43
+# List of default storage RAID volumes. Format is same as that used by
44
+# stackhpc.drac role.
45
+#storage_raid_config_default:
46
+
47
+# List of additional storage RAID volumes. Format is same as that used by
48
+# stackhpc.drac role.
49
+#storage_raid_config_extra:
50
+
51
+###############################################################################
52
+# Storage node LVM configuration.
53
+
54
+# List of storage volume groups. See mrlesmithjr.manage-lvm role for
55
+# format.
56
+#storage_lvm_groups:
57
+
58
+# Default list of storage volume groups. See mrlesmithjr.manage-lvm role for
59
+# format.
60
+#storage_lvm_groups_default:
61
+
62
+# Additional list of storage volume groups. See mrlesmithjr.manage-lvm role
63
+# for format.
64
+#storage_lvm_groups_extra:
65
+
66
+# Storage LVM volume group for data. See mrlesmithjr.manage-lvm role for
67
+# format.
68
+#storage_lvm_group_data:
69
+
70
+# List of disks for use by storage LVM data volume group. Default to an
71
+# invalid value to require configuration.
72
+#storage_lvm_group_data_disks:
73
+
74
+# List of LVM logical volumes for the data volume group.
75
+#storage_lvm_group_data_lvs:
76
+
77
+# Docker volumes LVM backing volume.
78
+#storage_lvm_group_data_lv_docker_volumes:
79
+
80
+# Size of docker volumes LVM backing volume.
81
+#storage_lvm_group_data_lv_docker_volumes_size:
82
+
83
+# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
84
+#storage_lvm_group_data_lv_docker_volumes_fs:
85
+
86
+###############################################################################
87
+# Storage node Ceph configuration.
88
+
89
+# List of Ceph disks.
90
+# The format is a list of dict like :
91
+# - { osd: "/dev/sdb", journal: "/dev/sdc" }
92
+# - { osd: "/dev/sdd" }
93
+# Journal variable is not mandatory.
94
+#storage_ceph_disks:
95
+
96
+###############################################################################
97
+# Storage node sysctl configuration.
98
+
99
+# Dict of sysctl parameters to set.
100
+#storage_sysctl_parameters:
101
+
102
+###############################################################################
103
+# Storage node user configuration.
104
+
105
+# List of users to create. This should be in a format accepted by the
106
+# singleplatform-eng.users role.
107
+#storage_users:
108
+
109
+###############################################################################
110
+# Dummy variable to allow Ansible to accept this file.
111
+workaround_ansible_issue_8743: yes

Loading…
Cancel
Save