Make AIO networks configurable
We carry *_range variables without any real use as of today. With that sometimes it makes total sense to override some AIO subnet range in case it does intersect with already existing subnet on AIO VM. This also should be useful and more flexible for future patches which may add situational complexity. Change-Id: Ibe46a54a4eca981a73a9b16e0945ec2944c1db87 Signed-off-by: Dmitriy Rabotyagov <dmitriy.rabotyagov@cleura.com>
This commit is contained in:
@@ -1,3 +1,3 @@
|
||||
registration_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
metering-alarm_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
key-manager_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
reservation_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
@@ -1,9 +1,9 @@
|
||||
# The compute host that the ceilometer compute agent will be running on.
|
||||
metering-compute_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
# The infra nodes that the central agents will be running on
|
||||
metering-infra_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,26 +1,26 @@
|
||||
# The infra nodes where the Ceph mon services will run
|
||||
ceph-mon_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
# The nodes that the Ceph OSD disks will be running on
|
||||
ceph-osd_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
{% if 'manila' not in bootstrap_host_scenarios_expanded %}
|
||||
# The nodes that the Ceph RadosGW object gateways will be running on
|
||||
ceph-rgw_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
{% endif %}
|
||||
|
||||
{% if 'manila' in bootstrap_host_scenarios_expanded %}
|
||||
ceph-mds_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
ceph-nfs_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
storage-infra_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
container_vars:
|
||||
cinder_qos_specs:
|
||||
- name: low-iops
|
||||
@@ -28,4 +28,4 @@ storage-infra_hosts:
|
||||
|
||||
storage_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# The controller host that will be running the cloudkitty services
|
||||
rating_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
dnsaas_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
image_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -15,5 +15,4 @@
|
||||
|
||||
metrics_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
load_balancer_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
orchestration_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
dashboard_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
---
|
||||
ironic-infra_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
# Ironic compute hosts. These compute hosts will be used to
|
||||
# facilitate ironic's interactions through nova.
|
||||
ironic-compute_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
ironic-inspector_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
identity_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
magnum-infra_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
@@ -1,7 +1,7 @@
|
||||
manila-infra_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
manila-data_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
---
|
||||
masakari-infra_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
masakari-monitor_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
mistral-infra_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -3,14 +3,14 @@
|
||||
# neutron-server, neutron-agents
|
||||
network_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
{% if _neutron_plugin_driver == 'ml2.ovn' %}
|
||||
network-gateway_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
network-northd_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
---
|
||||
compute-infra_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
compute_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# The controller host that the octavia control plane will be run on
|
||||
octavia-infra_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
placement-infra_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
skyline_dashboard_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -16,7 +16,7 @@ global_overrides:
|
||||
default: True
|
||||
swift-proxy_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
container_vars:
|
||||
swift_proxy_vars:
|
||||
limit_container_types: swift_proxy
|
||||
@@ -25,7 +25,7 @@ swift-proxy_hosts:
|
||||
write_affinity_node_count: "1 * replicas"
|
||||
swift_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
container_vars:
|
||||
swift_vars:
|
||||
limit_container_types: swift
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
mano_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
trove-infra_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
unbound_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
zun-infra_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
zun-compute_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
@@ -1,44 +1,44 @@
|
||||
---
|
||||
cidr_networks:
|
||||
{% if 'ironic' in bootstrap_host_scenarios_expanded %}
|
||||
bmaas: 172.29.228.0/22
|
||||
bmaas: {{ bmaas_network }}
|
||||
{% endif %}
|
||||
{% if 'trove' in bootstrap_host_scenarios_expanded %}
|
||||
dbaas: 172.29.252.0/22
|
||||
dbaas: {{ dbaas_network }}
|
||||
{% endif %}
|
||||
{% if 'octavia' in bootstrap_host_scenarios_expanded %}
|
||||
lbaas: 172.29.232.0/22
|
||||
lbaas: {{ lbaas_network }}
|
||||
{% endif %}
|
||||
management: 172.29.236.0/22
|
||||
tunnel: 172.29.240.0/22
|
||||
storage: 172.29.244.0/22
|
||||
management: {{ mgmt_network }}
|
||||
tunnel: {{ vxlan_network }}
|
||||
storage: {{ storage_network }}
|
||||
|
||||
used_ips:
|
||||
{% if 'ironic' in bootstrap_host_scenarios_expanded %}
|
||||
- "172.29.228.1,172.29.228.10"
|
||||
- "172.29.229.50,172.29.231.255"
|
||||
- "172.29.228.100"
|
||||
- "{{ bmaas_network | ansible.utils.nthhost('1') }},{{ bmaas_network | ansible.utils.nthhost('10') }}"
|
||||
- "{{ bmaas_network | ansible.utils.nthhost('110') }},{{ bmaas_network | ansible.utils.nthhost('-10') }}"
|
||||
- "{{ bmaas_network | ansible.utils.nthhost('100') }}"
|
||||
{% endif %}
|
||||
{% if 'trove' in bootstrap_host_scenarios_expanded %}
|
||||
- "172.29.252.1,172.29.252.10"
|
||||
- "172.29.252.50,172.29.255.255"
|
||||
- "{{ dbaas_network | ansible.utils.nthhost('1') }},{{ dbaas_network | ansible.utils.nthhost('10') }}"
|
||||
- "{{ dbaas_network | ansible.utils.nthhost('50') }},{{ dbaas_network | ansible.utils.nthhost('-1') }}"
|
||||
{% endif %}
|
||||
{% if 'octavia' in bootstrap_host_scenarios_expanded %}
|
||||
- "172.29.232.1,172.29.232.10"
|
||||
- "172.29.232.50,172.29.235.255"
|
||||
- "{{ lbaas_network | ansible.utils.nthhost('1') }},{{ lbaas_network | ansible.utils.nthhost('10') }}"
|
||||
- "{{ lbaas_network | ansible.utils.nthhost('50') }},{{ lbaas_network | ansible.utils.nthhost('-1') }}"
|
||||
{% endif %}
|
||||
- "172.29.236.1,172.29.236.50"
|
||||
- "172.29.236.100"
|
||||
- "172.29.236.101"
|
||||
- "172.29.240.1,172.29.240.50"
|
||||
- "172.29.240.100"
|
||||
- "172.29.244.1,172.29.244.50"
|
||||
- "172.29.244.100"
|
||||
- "172.29.248.1,172.29.248.50"
|
||||
- "172.29.248.100"
|
||||
- "{{ mgmt_network | ansible.utils.nthhost('1') }},{{ mgmt_network | ansible.utils.nthhost('50') }}"
|
||||
- "{{ mgmt_network | ansible.utils.nthhost('100') }}"
|
||||
- "{{ mgmt_network | ansible.utils.nthhost('101') }}"
|
||||
- "{{ vxlan_network | ansible.utils.nthhost('1') }},{{ vxlan_network | ansible.utils.nthhost('50') }}"
|
||||
- "{{ vxlan_network | ansible.utils.nthhost('100') }}"
|
||||
- "{{ storage_network | ansible.utils.nthhost('1') }},{{ storage_network | ansible.utils.nthhost('50') }}"
|
||||
- "{{ storage_network | ansible.utils.nthhost('100') }}"
|
||||
- "{{ vlan_network | ansible.utils.nthhost('1') }},{{ vlan_network | ansible.utils.nthhost('50') }}"
|
||||
- "{{ vlan_network | ansible.utils.nthhost('100') }}"
|
||||
|
||||
global_overrides:
|
||||
internal_lb_vip_address: 172.29.236.101
|
||||
internal_lb_vip_address: {{ bootstrap_host_internal_address }}
|
||||
# The external IP is quoted simply to ensure that the .aio file can be used as input
|
||||
# dynamic inventory testing.
|
||||
external_lb_vip_address: "{{ ('stepca' in bootstrap_host_scenarios) | ternary('external.openstack.local', bootstrap_host_public_address) }}"
|
||||
@@ -60,8 +60,8 @@ global_overrides:
|
||||
# floating ips using the br-mgmt interface as a gateway
|
||||
static_routes:
|
||||
# neutron public addresses, LXC
|
||||
- cidr: 172.29.248.0/22
|
||||
gateway: 172.29.236.100
|
||||
- cidr: {{ vlan_network }}
|
||||
gateway: {{ bootstrap_host_management_address }}
|
||||
{% if 'ovs' in bootstrap_host_scenarios_expanded %}
|
||||
- network:
|
||||
container_bridge: "br-vxlan"
|
||||
@@ -180,7 +180,7 @@ global_overrides:
|
||||
# keystone
|
||||
identity_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
{% if 'keystone' in bootstrap_host_scenarios or 'infra' in bootstrap_host_scenarios %}
|
||||
# NOTE (jrosser) this ensures that we deploy 3 keystone containers
|
||||
# during the os_keystone role test to validate ssh keys and fernet key sync
|
||||
@@ -193,7 +193,7 @@ identity_hosts:
|
||||
# galera, memcache, rabbitmq, utility
|
||||
shared-infra_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
{% if 'infra' in bootstrap_host_scenarios_expanded %}
|
||||
affinity:
|
||||
galera_container: 3
|
||||
@@ -211,12 +211,12 @@ repo-infra_hosts:
|
||||
affinity:
|
||||
repo_container: 3
|
||||
{% endif %}
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
|
||||
{% if 'zookeeper' in bootstrap_host_scenarios_expanded %}
|
||||
coordination_hosts:
|
||||
aio1:
|
||||
ip: 172.29.236.100
|
||||
ip: {{ bootstrap_host_management_address }}
|
||||
{% if 'infra' in bootstrap_host_scenarios_expanded %}
|
||||
affinity:
|
||||
zookeeper_container: 3
|
||||
|
||||
@@ -31,10 +31,13 @@ def make_example_config(aio_config_file, configs_dir):
|
||||
j2env = jinja2.Environment(loader=jinja2.BaseLoader,
|
||||
autoescape=jinja2.select_autoescape())
|
||||
files = glob.glob(os.path.join(configs_dir, '*.aio'))
|
||||
templated_variables = {
|
||||
'bootstrap_host_management_address': '172.29.236.100'
|
||||
}
|
||||
for file_name in files:
|
||||
with open(file_name, 'r') as f:
|
||||
template = j2env.from_string(f.read())
|
||||
jinja_data = template.render()
|
||||
jinja_data = template.render(**templated_variables)
|
||||
config.update(yaml.safe_load(jinja_data))
|
||||
|
||||
with open(aio_config_file, 'r') as f:
|
||||
|
||||
@@ -106,11 +106,14 @@ ceph_osd_images:
|
||||
|
||||
## Network configuration
|
||||
# Default network IP ranges
|
||||
mgmt_range: "172.29.236"
|
||||
vxlan_range: "172.29.240"
|
||||
storage_range: "172.29.244"
|
||||
vlan_range: "172.29.248"
|
||||
netmask: "255.255.252.0"
|
||||
mgmt_network: "172.29.236.0/22"
|
||||
vxlan_network: "172.29.240.0/22"
|
||||
storage_network: "172.29.244.0/22"
|
||||
vlan_network: "172.29.248.0/22"
|
||||
bmaas_network: "172.29.228.0/22"
|
||||
dbaas_network: "172.29.252.0/22"
|
||||
lbaas_network: "172.29.232.0/22"
|
||||
|
||||
#
|
||||
# NICs
|
||||
bootstrap_host_public_interface: "{{ ansible_facts['default_ipv4']['interface'] }}"
|
||||
@@ -118,7 +121,8 @@ bootstrap_host_public_interface: "{{ ansible_facts['default_ipv4']['interface']
|
||||
# By default the address will be set to the ipv4 address of the
|
||||
# host's network interface that has the default route on it.
|
||||
bootstrap_host_public_address: "{{ ansible_facts[bootstrap_host_public_interface | replace('-', '_')]['ipv4']['address'] }}"
|
||||
|
||||
bootstrap_host_management_address: "{{ mgmt_network | ansible.utils.nthhost('100') }}"
|
||||
bootstrap_host_internal_address: "{{ mgmt_network | ansible.utils.nthhost('101') }}"
|
||||
#
|
||||
# Utility paths
|
||||
bootstrap_host_network_utils:
|
||||
|
||||
@@ -125,23 +125,24 @@
|
||||
- interface: "br-mgmt"
|
||||
config_overrides:
|
||||
Network:
|
||||
Address:
|
||||
? "172.29.236.100/22"
|
||||
? "172.29.236.101/22"
|
||||
Address: "{{ {
|
||||
(bootstrap_host_management_address ~ '/' ~ mgmt_network | ansible.utils.ipaddr('netmask')) | ansible.utils.ipaddr('host/prefix'): None,
|
||||
(bootstrap_host_internal_address ~ '/' ~ mgmt_network | ansible.utils.ipaddr('netmask')) | ansible.utils.ipaddr('host/prefix'): None
|
||||
} }}"
|
||||
|
||||
- interface: "dummy-storage"
|
||||
bridge: "br-storage"
|
||||
mtu: 9000
|
||||
- interface: "br-storage"
|
||||
address: "172.29.244.100"
|
||||
netmask: "255.255.252.0"
|
||||
address: "{{ storage_network | ansible.utils.nthhost('100') }}"
|
||||
netmask: "{{ storage_network | ansible.utils.ipaddr('netmask') }}"
|
||||
|
||||
- interface: "dummy-dbaas"
|
||||
bridge: "br-dbaas"
|
||||
mtu: 9000
|
||||
- interface: "br-dbaas"
|
||||
address: "172.29.252.100"
|
||||
netmask: "255.255.252.0"
|
||||
address: "{{ dbaas_network | ansible.utils.nthhost('100') }}"
|
||||
netmask: "{{ dbaas_network | ansible.utils.ipaddr('netmask') }}"
|
||||
- interface: "br-dbaas-veth"
|
||||
bridge: "br-dbaas"
|
||||
mtu: 9000
|
||||
@@ -150,8 +151,8 @@
|
||||
bridge: "br-lbaas"
|
||||
mtu: 9000
|
||||
- interface: "br-lbaas"
|
||||
address: "172.29.232.100"
|
||||
netmask: "255.255.252.0"
|
||||
address: "{{ lbaas_network | ansible.utils.nthhost('100') }}"
|
||||
netmask: "{{ lbaas_network | ansible.utils.ipaddr('netmask') }}"
|
||||
- interface: "br-lbaas-veth"
|
||||
bridge: "br-lbaas"
|
||||
mtu: 9000
|
||||
@@ -160,8 +161,8 @@
|
||||
bridge: "br-bmaas"
|
||||
mtu: 1500
|
||||
- interface: "br-bmaas"
|
||||
address: "172.29.228.100"
|
||||
netmask: "255.255.252.0"
|
||||
address: "{{ bmaas_network | ansible.utils.nthhost('100') }}"
|
||||
netmask: "{{ bmaas_network | ansible.utils.ipaddr('netmask') }}"
|
||||
- interface: "br-bmaas-veth"
|
||||
bridge: "br-bmaas"
|
||||
mtu: 1500
|
||||
@@ -170,8 +171,8 @@
|
||||
bridge: "br-vxlan"
|
||||
mtu: 9000
|
||||
- interface: "br-vxlan"
|
||||
address: "172.29.240.100"
|
||||
netmask: "255.255.252.0"
|
||||
address: "{{ vxlan_network | ansible.utils.nthhost('100') }}"
|
||||
netmask: "{{ vxlan_network | ansible.utils.ipaddr('netmask') }}"
|
||||
|
||||
- interface: "dummy-vlan"
|
||||
bridge: "br-vlan"
|
||||
@@ -179,9 +180,10 @@
|
||||
- interface: "br-vlan"
|
||||
config_overrides:
|
||||
Network:
|
||||
Address:
|
||||
? "172.29.248.100/22"
|
||||
? "172.29.248.1/22"
|
||||
Address: "{{ {
|
||||
vlan_network | ansible.utils.ipaddr('100'): None,
|
||||
vlan_network | ansible.utils.ipaddr('1'): None
|
||||
} }}"
|
||||
- interface: "br-vlan-veth"
|
||||
bridge: "br-vlan"
|
||||
mtu: 9000
|
||||
@@ -194,22 +196,6 @@
|
||||
- name: Force systemd_networkd hander to run
|
||||
ansible.builtin.meta: flush_handlers
|
||||
|
||||
# NOTE(jrosser) The intention here is not to proceed further until the network bridges are up
|
||||
# This ensures there will be no race between the bridges coming up and subsequent tasks which
|
||||
# require functional network interfaces
|
||||
- name: Check that network bridges are up
|
||||
ansible.builtin.wait_for:
|
||||
port: 22
|
||||
timeout: 30
|
||||
host: "{{ item }}"
|
||||
with_items:
|
||||
- 172.29.236.100 # br-mgmt
|
||||
- 172.29.244.100 # br-storage
|
||||
- 172.29.252.100 # br-dbaas
|
||||
- 172.29.232.100 # br-lbaas
|
||||
- 172.29.240.100 # br-vxlan
|
||||
- 172.29.228.100 # br-bmaas
|
||||
|
||||
- name: Run the systemd service role
|
||||
ansible.builtin.include_role:
|
||||
name: systemd_service
|
||||
|
||||
@@ -79,7 +79,7 @@
|
||||
- name: Create exports file
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/exports
|
||||
line: '{{ item }} {{ storage_range }}.0/{{ netmask }}(rw,sync,no_subtree_check,insecure,all_squash,anonuid=10000,anongid=10000)'
|
||||
line: '{{ item }} {{ storage_network }}(rw,sync,no_subtree_check,insecure,all_squash,anonuid=10000,anongid=10000)'
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
- install-packages
|
||||
|
||||
- name: Install squid config
|
||||
ansible.builtin.copy:
|
||||
src: "squid.conf"
|
||||
ansible.builtin.template:
|
||||
src: "squid.conf.j2"
|
||||
dest: "/etc/squid/squid.conf"
|
||||
mode: "0644"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
acl CONNECT method CONNECT
|
||||
acl lan src 172.29.236.0/22
|
||||
acl lan src {{ mgmt_network }}
|
||||
|
||||
http_access allow localhost manager
|
||||
http_access allow lan
|
||||
@@ -25,8 +25,8 @@ ceph_pkg_source: distro
|
||||
{% endif %}
|
||||
|
||||
## Tempest settings
|
||||
tempest_public_subnet_cidr: "172.29.248.0/22"
|
||||
tempest_public_subnet_allocation_pools: "172.29.249.110-172.29.249.200"
|
||||
tempest_public_subnet_cidr: "{{ vlan_network }}"
|
||||
tempest_public_subnet_allocation_pools: "{{ vlan_network | ansible.utils.nthhost('110') }}-{{ vlan_network | ansible.utils.nthhost('-10') }}"
|
||||
|
||||
{% if _neutron_plugin_driver == 'ml2.ovn' %}
|
||||
tempest_private_net_provider_type: geneve
|
||||
@@ -157,8 +157,6 @@ tempest_tempest_conf_overrides_neutron:
|
||||
octavia_wsgi_threads: 1
|
||||
octavia_wsgi_processes: 1
|
||||
octavia_wsgi_buffer_size: 16384
|
||||
octavia_management_net_subnet_cidr: 172.29.232.0/22
|
||||
octavia_management_net_subnet_allocation_pools: "172.29.232.50-172.29.235.254"
|
||||
|
||||
## Heat
|
||||
heat_api_workers: 1
|
||||
@@ -302,14 +300,17 @@ openstack_hosts_package_state: latest
|
||||
octavia_v2: True
|
||||
# Disable Octavia V1 API
|
||||
octavia_v1: False
|
||||
octavia_management_net_subnet_cidr: '172.29.232.0/22'
|
||||
|
||||
octavia_management_net_subnet_cidr: {{ lbaas_network }}
|
||||
octavia_management_net_subnet_allocation_pools: {{ lbaas_network | ansible.utils.nthhost('110') }}-{{ lbaas_network | ansible.utils.nthhost('-10') }}
|
||||
|
||||
{% if 'metal' in bootstrap_host_scenarios %}
|
||||
# TODO(mnaser): The Octavia role relies on gathering IPs of hosts in the
|
||||
# LBaaS network and using those in the health manager pool
|
||||
# IPs. We don't store those IPs when running metal so we
|
||||
# have to override it manually. We should remove this and
|
||||
# fix the role (or the inventory tool) eventually.
|
||||
octavia_hm_hosts: 172.29.232.100 # br-lbaas IP
|
||||
octavia_hm_hosts: {{ lbaas_network | ansible.utils.nthhost('100') }} # br-lbaas IP
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -318,9 +319,9 @@ octavia_hm_hosts: 172.29.232.100 # br-lbaas IP
|
||||
# to point to the local squid
|
||||
# Playbooks will set a runtime proxy to the AIO host squid
|
||||
deployment_environment_variables:
|
||||
http_proxy: http://172.29.236.100:3128/
|
||||
https_proxy: http://172.29.236.100:3128/
|
||||
no_proxy: "localhost,127.0.0.1,172.29.236.100,172.29.236.101,{{ bootstrap_host_public_address }}"
|
||||
http_proxy: http://{{ bootstrap_host_management_address }}:3128/
|
||||
https_proxy: http://{{ bootstrap_host_management_address }}:3128/
|
||||
no_proxy: "localhost,127.0.0.1,{{ bootstrap_host_management_address }},{{ bootstrap_host_internal_address }},{{ bootstrap_host_public_address }}"
|
||||
|
||||
# Remove eth0 from all container so there is no default route and everything
|
||||
# must go via the http proxy
|
||||
@@ -333,7 +334,7 @@ cinder_backends:
|
||||
volume_group: cinder-volumes
|
||||
volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
|
||||
volume_backend_name: LVM_iSCSI
|
||||
iscsi_ip_address: "172.29.236.100"
|
||||
iscsi_ip_address: "{{ bootstrap_host_management_address }}"
|
||||
lvm_type: "thin"
|
||||
extra_volume_types:
|
||||
- low-iops
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
## ceph-ansible AIO settings
|
||||
is_hci: true
|
||||
monitor_interface: "{{ ('metal' in bootstrap_host_scenarios_expanded) | ternary('br-storage', 'eth2') }}" # Storage network in the AIO
|
||||
public_network: "{{ (storage_range ~ '.0/' ~ netmask) | ansible.utils.ipaddr('net') }}"
|
||||
public_network: "{{ storage_network }}"
|
||||
journal_size: 100
|
||||
osd_scenario: collocated
|
||||
ceph_conf_overrides_custom:
|
||||
|
||||
@@ -23,8 +23,8 @@ tempest_service_available_ironic: true
|
||||
tempest_service_available_neutron: true
|
||||
|
||||
# (jamesdenton) Set defaults for AIO
|
||||
ironic_inspector_dhcp_pool_range: 172.29.229.50 172.29.229.255
|
||||
ironic_inspector_dhcp_subnet: 172.29.228.0/22
|
||||
ironic_inspector_dhcp_subnet_mask: 255.255.252.0
|
||||
ironic_inspector_dhcp_gateway: 172.29.228.1
|
||||
ironic_inspector_dhcp_nameservers: 172.29.228.1
|
||||
ironic_inspector_dhcp_pool_range: {{ bmaas_network | ansible.utils.nthhost('110') }} {{ bmaas_network | ansible.utils.nthhost('-10') }}
|
||||
ironic_inspector_dhcp_subnet: {{ bmaas_network }}
|
||||
ironic_inspector_dhcp_subnet_mask: {{ bmaas_network | ansible.utils.ipaddr('netmask') }}
|
||||
ironic_inspector_dhcp_gateway: {{ bmaas_network | ansible.utils.nthhost('1') }}
|
||||
ironic_inspector_dhcp_nameservers: {{ bmaas_network | ansible.utils.nthhost('1') }}
|
||||
|
||||
@@ -73,7 +73,7 @@ manila_backends:
|
||||
cephfs_auth_id: manila
|
||||
cephfs_cluster_name: ceph
|
||||
cephfs_conf_path: /etc/ceph/ceph.conf
|
||||
cephfs_ganesha_server_ip: 172.29.236.100
|
||||
cephfs_ganesha_server_ip: {{ bootstrap_host_management_address }}
|
||||
cephfs_ganesha_server_is_remote: False
|
||||
cephfs_protocol_helper_type: NFS
|
||||
driver_handles_share_servers: False
|
||||
|
||||
@@ -18,6 +18,6 @@ trove_provider_net_name: dbaas-mgmt
|
||||
trove_service_net_phys_net: dbaas-mgmt
|
||||
trove_service_net_setup: True
|
||||
|
||||
trove_service_net_subnet_cidr: "172.29.252.0/22"
|
||||
trove_service_net_allocation_pool_start: "172.29.252.50"
|
||||
trove_service_net_allocation_pool_end: "172.29.255.254"
|
||||
trove_service_net_subnet_cidr: "{{ dbaas_network }}"
|
||||
trove_service_net_allocation_pool_start: "{{ dbaas_network | ansible.utils.nthhost('110') }}"
|
||||
trove_service_net_allocation_pool_end: "{{ dbaas_network | ansible.utils.nthhost('-10') }}"
|
||||
|
||||
Reference in New Issue
Block a user