From 983fbbe8c073d9acb8ba4d101d2057e5f905d4ab Mon Sep 17 00:00:00 2001 From: Sergey Kolekonov Date: Thu, 5 Nov 2015 16:17:26 +0300 Subject: [PATCH] Add support for Neutron L3 HA feature Neutron L3 HA feature adds an ability to schedule a virtual router to at least two L3 agents and an opportunity to establish connection faster after L3 agent failover than router rescheduling. Blueprint: neutron-vrrp-deployment Change-Id: If26ee7d7a56fccd3f0dc67792fd6c9807f07a8c2 --- .../modular/openstack-network/routers.pp | 38 +- .../openstack-network/server-config.pp | 9 +- .../modular/openstack-network/tasks.yaml | 15 + ...=> neut_vlan_l3ha.ceph.ceil-ceph-osd.yaml} | 1 + ... => neut_vlan_l3ha.ceph.ceil-compute.yaml} | 1 + .../neut_vlan_l3ha.ceph.ceil-controller.yaml | 1107 +++++++++++++++++ ...an_l3ha.ceph.ceil-primary-controller.yaml} | 54 + ...ut_vlan_l3ha.ceph.ceil-primary-mongo.yaml} | 1 + .../hosts/openstack-network/routers_spec.rb | 71 +- .../openstack-network/server-config_spec.rb | 24 +- 10 files changed, 1276 insertions(+), 45 deletions(-) rename tests/noop/astute.yaml/{neut_vlan.ceph.ceil-ceph-osd.yaml => neut_vlan_l3ha.ceph.ceil-ceph-osd.yaml} (99%) rename tests/noop/astute.yaml/{neut_vlan.ceph.ceil-compute.yaml => neut_vlan_l3ha.ceph.ceil-compute.yaml} (99%) create mode 100644 tests/noop/astute.yaml/neut_vlan_l3ha.ceph.ceil-controller.yaml rename tests/noop/astute.yaml/{neut_vlan.ceph.ceil-primary-controller.yaml => neut_vlan_l3ha.ceph.ceil-primary-controller.yaml} (94%) rename tests/noop/astute.yaml/{neut_vlan.ceph.ceil-primary-mongo.yaml => neut_vlan_l3ha.ceph.ceil-primary-mongo.yaml} (99%) diff --git a/deployment/puppet/osnailyfacter/modular/openstack-network/routers.pp b/deployment/puppet/osnailyfacter/modular/openstack-network/routers.pp index 7d2eb7cb02..b6d8f5c1fe 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-network/routers.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-network/routers.pp @@ -1,6 +1,12 @@ notice('MODULAR: openstack-network/routers.pp') -$use_neutron = hiera('use_neutron', false) +$use_neutron = hiera('use_neutron', false) +$neutron_advanced_config = hiera_hash('neutron_advanced_configuration', { }) +# In case of L3 HA enabled this task must be executed on a post-deployment stage. +# as HA routers can't be created when less then 2 L3 agents are available (Neutron limitation) +$l3_ha = pick($neutron_advanced_config['neutron_l3_ha'], false) +$neutron_controller_roles = hiera('neutron_controller_roles', ['controller', 'primary-controller']) +$controllers_num = size(get_nodes_hash_by_roles(hiera('network_metadata'), $neutron_controller_roles)) if $use_neutron { @@ -12,21 +18,25 @@ if $use_neutron { $default_router = try_get_value($neutron_config, 'default_router', 'router04') $nets = $neutron_config['predefined_networks'] - neutron_router { $default_router: - ensure => 'present', - gateway_network_name => $floating_net, - name => $default_router, - tenant_name => $keystone_admin_tenant, - } -> + if ($l3_ha) and ($controllers_num < 2) { + warning ("Not enough controllers to create an HA router") + } else { + neutron_router { $default_router: + ensure => 'present', + gateway_network_name => $floating_net, + name => $default_router, + tenant_name => $keystone_admin_tenant, + } -> - neutron_router_interface { "${default_router}:${private_net}__subnet": - ensure => 'present', - } + neutron_router_interface { "${default_router}:${private_net}__subnet": + ensure => 'present', + } - if has_key($nets, 'baremetal') { - neutron_router_interface { "${default_router}:baremetal__subnet": - ensure => 'present', - require => Neutron_router[$default_router] + if has_key($nets, 'baremetal') { + neutron_router_interface { "${default_router}:baremetal__subnet": + ensure => 'present', + require => Neutron_router[$default_router] + } } } } diff --git a/deployment/puppet/osnailyfacter/modular/openstack-network/server-config.pp b/deployment/puppet/osnailyfacter/modular/openstack-network/server-config.pp index 8f3d39f104..0767021873 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-network/server-config.pp +++ b/deployment/puppet/osnailyfacter/modular/openstack-network/server-config.pp @@ -38,7 +38,9 @@ if $use_neutron { $service_workers = pick($neutron_config['workers'], min(max($::processorcount, 2), 16)) $neutron_advanced_config = hiera_hash('neutron_advanced_configuration', { }) - $dvr = pick($neutron_advanced_config['neutron_dvr'], false) + $dvr = pick($neutron_advanced_config['neutron_dvr'], false) + $l3_ha = pick($neutron_advanced_config['neutron_l3_ha'], false) + $l3agent_failover = $l3_ha ? { true => false, default => true} $nova_auth_user = pick($nova_hash['user'], 'nova') $nova_auth_password = $nova_hash['user_password'] @@ -59,7 +61,10 @@ if $use_neutron { database_max_retries => '-1', agent_down_time => '30', - allow_automatic_l3agent_failover => true, + allow_automatic_l3agent_failover => $l3agent_failover, + l3_ha => $l3_ha, + min_l3_agents_per_router => 2, + max_l3_agents_per_router => 0, api_workers => $service_workers, rpc_workers => $service_workers, diff --git a/deployment/puppet/osnailyfacter/modular/openstack-network/tasks.yaml b/deployment/puppet/osnailyfacter/modular/openstack-network/tasks.yaml index dd396c33f5..49c564b352 100644 --- a/deployment/puppet/osnailyfacter/modular/openstack-network/tasks.yaml +++ b/deployment/puppet/osnailyfacter/modular/openstack-network/tasks.yaml @@ -68,6 +68,7 @@ - id: openstack-network-routers type: puppet groups: [primary-controller] + condition: "settings:neutron_advanced_configuration.neutron_l3_ha.value == false" required_for: [openstack-network-end] requires: [openstack-network-networks] parameters: @@ -75,6 +76,20 @@ puppet_modules: /etc/puppet/modules timeout: 1800 +# Additional task for routers when L3 HA is enabled +# HA routers must be created on post-deployment step when all L3 agents are available +- id: openstack-network-routers-ha + type: puppet + role: [primary-controller] + condition: "settings:neutron_advanced_configuration.neutron_l3_ha.value == true" + required_for: [post_deployment_end] + requires: [post_deployment_start] + parameters: + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/openstack-network/routers.pp + puppet_modules: /etc/puppet/modules + timeout: 1800 + cwd: / + - id: openstack-network-agents-l3 type: puppet groups: [primary-controller,controller,compute] diff --git a/tests/noop/astute.yaml/neut_vlan.ceph.ceil-ceph-osd.yaml b/tests/noop/astute.yaml/neut_vlan_l3ha.ceph.ceil-ceph-osd.yaml similarity index 99% rename from tests/noop/astute.yaml/neut_vlan.ceph.ceil-ceph-osd.yaml rename to tests/noop/astute.yaml/neut_vlan_l3ha.ceph.ceil-ceph-osd.yaml index 99ce40c195..24e978c539 100644 --- a/tests/noop/astute.yaml/neut_vlan.ceph.ceil-ceph-osd.yaml +++ b/tests/noop/astute.yaml/neut_vlan_l3ha.ceph.ceil-ceph-osd.yaml @@ -540,6 +540,7 @@ public_network_assignment: neutron_advanced_configuration: neutron_dvr: false neutron_l2_pop: false + neutron_l3_ha: true public_vip: 172.16.0.3 public_vrouter_vip: 172.16.0.4 puppet: diff --git a/tests/noop/astute.yaml/neut_vlan.ceph.ceil-compute.yaml b/tests/noop/astute.yaml/neut_vlan_l3ha.ceph.ceil-compute.yaml similarity index 99% rename from tests/noop/astute.yaml/neut_vlan.ceph.ceil-compute.yaml rename to tests/noop/astute.yaml/neut_vlan_l3ha.ceph.ceil-compute.yaml index a4d7df29c5..e31cb7fcfb 100644 --- a/tests/noop/astute.yaml/neut_vlan.ceph.ceil-compute.yaml +++ b/tests/noop/astute.yaml/neut_vlan_l3ha.ceph.ceil-compute.yaml @@ -540,6 +540,7 @@ public_network_assignment: neutron_advanced_configuration: neutron_dvr: false neutron_l2_pop: false + neutron_l3_ha: true public_vip: 172.16.0.3 public_vrouter_vip: 172.16.0.4 puppet: diff --git a/tests/noop/astute.yaml/neut_vlan_l3ha.ceph.ceil-controller.yaml b/tests/noop/astute.yaml/neut_vlan_l3ha.ceph.ceil-controller.yaml new file mode 100644 index 0000000000..b4c4068c61 --- /dev/null +++ b/tests/noop/astute.yaml/neut_vlan_l3ha.ceph.ceil-controller.yaml @@ -0,0 +1,1107 @@ +access: + email: admin@localhost + metadata: + label: Access + weight: 10 + password: admin + tenant: admin + user: admin +auth_key: '' +auto_assign_floating_ip: false +base_syslog: + syslog_port: '514' + syslog_server: 10.108.0.2 +ceilometer: + db_password: Toe5phw4 + enabled: true + metering_secret: tHq2rcoq + user_password: WBfBSo6U +cinder: + db_password: trj609V8 + fixed_key: 7883d66c643ce9a508ebcd4cd5516fc98814a11276bc98c4e8e671188b54e941 + user_password: sJRfG0GP +cobbler: + profile: ubuntu_1404_x86_64 +corosync: + group: 226.94.1.1 + metadata: + label: Corosync + restrictions: + - action: hide + condition: 'true' + weight: 50 + port: '12000' + verified: false +debug: false +deployment_id: 37 +deployment_mode: ha_compact +external_dns: + dns_list: 8.8.8.8, 8.8.4.4 + metadata: + label: Upstream DNS + weight: 90 +external_mongo: + hosts_ip: '' + metadata: + label: External MongoDB + restrictions: + - action: hide + condition: settings:additional_components.mongo.value == false + weight: 20 + mongo_db_name: ceilometer + mongo_password: ceilometer + mongo_replset: '' + mongo_user: ceilometer +external_ntp: + metadata: + label: Upstream NTP + weight: 100 + ntp_list: 0.pool.ntp.org, 1.pool.ntp.org +public_ssl: + metadata: + label: Public TLS + weight: 110 + horizon: true + services: true + cert_source: self_signed + cert_data: + content: 'somedataaboutyourkeypair' + hostname: public.fuel.local +fail_if_error: true +fqdn: node-125.test.domain.local +fuel_version: '6.1' +glance: + db_password: 385SUUrC + image_cache_max_size: '0' + user_password: A9KgbnX6 +heat: + auth_encryption_key: 2604abefbdf5043f07e989af10f6caba + db_password: NTeyraV2 + enabled: true + rabbit_password: ReVt6ZKQ + user_password: tryL79Yl +kernel_params: + kernel: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90 + nomodeset + metadata: + label: Kernel parameters + weight: 40 +keystone: + admin_token: UxFQFw3m + db_password: e4Op1FQB +last_controller: node-125 +libvirt_type: qemu +management_network_range: 192.168.0.0/24 +management_vip: 192.168.0.6 +management_vrouter_vip: 192.168.0.7 +master_ip: 10.108.0.2 +metadata: + label: Common + weight: 30 +mongo: + enabled: false +mp: +- point: '1' + weight: '1' +- point: '2' + weight: '2' +murano: + db_password: 7I6NRZcB + enabled: false + rabbit_password: X4GK4R7f + user_password: nuCELy8q +murano_settings: + metadata: + label: Murano Settings + restrictions: + - action: hide + condition: settings:additional_components.murano.value == false + weight: 20 + murano_repo_url: http://catalog.openstack.org/ +mysql: + root_password: 5eqwkxY3 + wsrep_password: sFMiVJ7R +network_metadata: + nodes: + node-121: + swift_zone: '1' + uid: '121' + fqdn: node-121.test.domain.local + network_roles: + keystone/api: 192.168.0.1 + neutron/api: 192.168.0.1 + mgmt/database: 192.168.0.1 + sahara/api: 192.168.0.1 + heat/api: 192.168.0.1 + ceilometer/api: 192.168.0.1 + ex: + ceph/public: 192.168.0.1 + ceph/radosgw: + management: 192.168.0.1 + swift/api: 192.168.0.1 + mgmt/api: 192.168.0.1 + storage: 192.168.1.1 + mgmt/corosync: 192.168.0.1 + cinder/api: 192.168.0.1 + public/vip: + swift/replication: 192.168.1.1 + mgmt/messaging: 192.168.0.1 + neutron/mesh: 192.168.0.1 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.1 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.1 + mgmt/vip: 192.168.0.1 + murano/api: 192.168.0.1 + nova/api: 192.168.0.1 + horizon: 192.168.0.1 + mgmt/memcache: 192.168.0.1 + cinder/iscsi: 192.168.1.1 + ceph/replication: 192.168.1.1 + user_node_name: Untitled (6a:e7) + node_roles: + - primary-mongo + name: node-121 + node-124: + swift_zone: '1' + uid: '124' + fqdn: node-124.test.domain.local + network_roles: + keystone/api: 192.168.0.2 + neutron/api: 192.168.0.2 + mgmt/database: 192.168.0.2 + sahara/api: 192.168.0.2 + heat/api: 192.168.0.2 + ceilometer/api: 192.168.0.2 + ex: + ceph/public: 192.168.0.2 + ceph/radosgw: + management: 192.168.0.2 + swift/api: 192.168.0.2 + mgmt/api: 192.168.0.2 + storage: 192.168.1.2 + mgmt/corosync: 192.168.0.2 + cinder/api: 192.168.0.2 + public/vip: + swift/replication: 192.168.1.2 + mgmt/messaging: 192.168.0.2 + neutron/mesh: 192.168.0.2 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.2 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.2 + mgmt/vip: 192.168.0.2 + murano/api: 192.168.0.2 + nova/api: 192.168.0.2 + horizon: 192.168.0.2 + mgmt/memcache: 192.168.0.2 + cinder/iscsi: 192.168.1.2 + ceph/replication: 192.168.1.2 + user_node_name: Untitled (6a:e7) + node_roles: + - ceph-osd + name: node-124 + node-125: + swift_zone: '1' + uid: '125' + fqdn: node-125.test.domain.local + network_roles: + keystone/api: 192.168.0.3 + neutron/api: 192.168.0.3 + mgmt/database: 192.168.0.3 + sahara/api: 192.168.0.3 + heat/api: 192.168.0.3 + ceilometer/api: 192.168.0.3 + ex: 172.16.0.2 + ceph/public: 192.168.0.3 + ceph/radosgw: 172.16.0.2 + management: 192.168.0.3 + swift/api: 192.168.0.3 + mgmt/api: 192.168.0.3 + storage: 192.168.1.3 + mgmt/corosync: 192.168.0.3 + cinder/api: 192.168.0.3 + public/vip: 172.16.0.2 + swift/replication: 192.168.1.3 + mgmt/messaging: 192.168.0.3 + neutron/mesh: 192.168.0.3 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.3 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.3 + mgmt/vip: 192.168.0.3 + murano/api: 192.168.0.3 + nova/api: 192.168.0.3 + horizon: 192.168.0.3 + mgmt/memcache: 192.168.0.3 + cinder/iscsi: 192.168.1.3 + ceph/replication: 192.168.1.3 + user_node_name: Untitled (6a:e7) + node_roles: + - primary-controller + name: node-125 + node-126: + swift_zone: '1' + uid: '126' + fqdn: node-126.test.domain.local + network_roles: + keystone/api: 192.168.0.4 + neutron/api: 192.168.0.4 + mgmt/database: 192.168.0.4 + sahara/api: 192.168.0.4 + heat/api: 192.168.0.4 + ceilometer/api: 192.168.0.4 + ex: + ceph/public: 192.168.0.4 + ceph/radosgw: + management: 192.168.0.4 + swift/api: 192.168.0.4 + mgmt/api: 192.168.0.4 + storage: 192.168.1.4 + mgmt/corosync: 192.168.0.4 + cinder/api: 192.168.0.4 + public/vip: + swift/replication: 192.168.1.4 + mgmt/messaging: 192.168.0.4 + neutron/mesh: 192.168.0.4 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.4 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.4 + mgmt/vip: 192.168.0.4 + murano/api: 192.168.0.4 + nova/api: 192.168.0.4 + horizon: 192.168.0.4 + mgmt/memcache: 192.168.0.4 + cinder/iscsi: 192.168.1.4 + ceph/replication: 192.168.1.4 + user_node_name: Untitled (6a:e7) + node_roles: + - ceph-osd + name: node-126 + node-127: + swift_zone: '1' + uid: '127' + fqdn: node-127.test.domain.local + network_roles: + keystone/api: 192.168.0.5 + neutron/api: 192.168.0.5 + mgmt/database: 192.168.0.5 + sahara/api: 192.168.0.5 + heat/api: 192.168.0.5 + ceilometer/api: 192.168.0.5 + ex: + ceph/public: 192.168.0.5 + ceph/radosgw: + management: 192.168.0.5 + swift/api: 192.168.0.5 + mgmt/api: 192.168.0.5 + storage: 192.168.1.5 + mgmt/corosync: 192.168.0.5 + cinder/api: 192.168.0.5 + public/vip: + swift/replication: 192.168.1.5 + mgmt/messaging: 192.168.0.5 + neutron/mesh: 192.168.0.5 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.5 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.5 + mgmt/vip: 192.168.0.5 + murano/api: 192.168.0.5 + nova/api: 192.168.0.5 + horizon: 192.168.0.5 + mgmt/memcache: 192.168.0.5 + cinder/iscsi: 192.168.1.5 + ceph/replication: 192.168.1.5 + user_node_name: Untitled (6a:e7) + node_roles: + - compute + name: node-127 + node-128: + swift_zone: '1' + uid: '128' + fqdn: node-128.test.domain.local + network_roles: + keystone/api: 192.168.0.8 + neutron/api: 192.168.0.8 + mgmt/database: 192.168.0.8 + sahara/api: 192.168.0.8 + heat/api: 192.168.0.8 + ceilometer/api: 192.168.0.8 + ex: 172.16.0.8 + ceph/public: 192.168.0.8 + ceph/radosgw: 172.16.0.8 + management: 192.168.0.8 + swift/api: 192.168.0.8 + mgmt/api: 192.168.0.8 + storage: 192.168.1.8 + mgmt/corosync: 192.168.0.8 + cinder/api: 192.168.0.8 + public/vip: 172.16.0.8 + swift/replication: 192.168.1.8 + mgmt/messaging: 192.168.0.8 + neutron/mesh: 192.168.0.8 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.8 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.8 + mgmt/vip: 192.168.0.8 + murano/api: 192.168.0.8 + nova/api: 192.168.0.8 + horizon: 192.168.0.8 + mgmt/memcache: 192.168.0.8 + cinder/iscsi: 192.168.1.8 + ceph/replication: 192.168.1.8 + user_node_name: Untitled (56:67) + node_roles: + - controller + name: node-128 + vips: + vrouter: + ipaddr: 192.168.0.6 + management: + ipaddr: 192.168.0.7 + public: + ipaddr: 172.16.0.3 + vrouter_pub: + ipaddr: 172.16.0.3 +network_scheme: + endpoints: + br-ex: + IP: + - 172.16.0.8/24 + gateway: 172.16.0.1 + vendor_specific: + phy_interfaces: + - eth1 + br-floating: + IP: none + br-fw-admin: + IP: + - 10.108.0.9/24 + br-mgmt: + IP: + - 192.168.0.8/24 + vendor_specific: + phy_interfaces: + - eth0 + vlans: 101 + br-prv: + IP: none + vendor_specific: + phy_interfaces: + - eth0 + vlans: 1000:1030 + br-storage: + IP: + - 192.168.1.8/24 + vendor_specific: + phy_interfaces: + - eth0 + vlans: 102 + interfaces: + eth0: + vendor_specific: + bus_info: '0000:00:03.0' + driver: e1000 + eth1: + vendor_specific: + bus_info: '0000:00:04.0' + driver: e1000 + eth2: + vendor_specific: + bus_info: '0000:00:05.0' + driver: e1000 + eth3: + vendor_specific: + bus_info: '0000:00:06.0' + driver: e1000 + eth4: + vendor_specific: + bus_info: '0000:00:07.0' + driver: e1000 + provider: lnx + roles: + ex: br-ex + public/vip: br-ex + neutron/floating: br-floating + storage: br-storage + keystone/api: br-mgmt + neutron/api: br-mgmt + mgmt/database: br-mgmt + sahara/api: br-mgmt + ceilometer/api: br-mgmt + mgmt/vip: br-mgmt + ceph/public: br-mgmt + mgmt/messaging: br-mgmt + management: br-mgmt + swift/api: br-mgmt + mgmt/api: br-mgmt + storage: br-storage + mgmt/corosync: br-mgmt + cinder/api: br-mgmt + swift/replication: br-storage + neutron/mesh: br-mgmt + admin/pxe: br-fw-admin + mongo/db: br-mgmt + neutron/private: br-prv + fw-admin: br-fw-admin + glance/api: br-mgmt + heat/api: br-mgmt + murano/api: br-mgmt + nova/api: br-mgmt + horizon: br-mgmt + mgmt/memcache: br-mgmt + cinder/iscsi: br-storage + ceph/replication: br-storage + neutron/mesh: br-mgmt + transformations: + - action: add-br + name: br-fw-admin + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-ex + - action: add-br + name: br-floating + provider: ovs + - action: add-patch + bridges: + - br-floating + - br-ex + provider: ovs + - action: add-br + name: br-prv + provider: ovs + - action: add-patch + bridges: + - br-prv + - br-fw-admin + provider: ovs + - action: add-port + bridge: br-fw-admin + name: eth0 + - action: add-port + bridge: br-storage + name: eth0.102 + - action: add-port + bridge: br-mgmt + name: eth0.101 + - action: add-port + bridge: br-ex + name: eth1 + version: '1.1' +neutron_mellanox: + metadata: + enabled: true + label: Mellanox Neutron components + toggleable: false + weight: 50 + plugin: disabled + vf_num: '16' +nodes: +- fqdn: node-121.test.domain.local + internal_address: 192.168.0.1 + internal_netmask: 255.255.255.0 + name: node-121 + role: primary-mongo + storage_address: 192.168.1.1 + storage_netmask: 255.255.255.0 + swift_zone: '121' + uid: '121' + user_node_name: Untitled (18:c9) +- fqdn: node-124.test.domain.local + internal_address: 192.168.0.2 + internal_netmask: 255.255.255.0 + name: node-124 + role: ceph-osd + storage_address: 192.168.1.2 + storage_netmask: 255.255.255.0 + swift_zone: '124' + uid: '124' + user_node_name: Untitled (6f:9d) +- fqdn: node-125.test.domain.local + internal_address: 192.168.0.3 + internal_netmask: 255.255.255.0 + name: node-125 + public_address: 172.16.0.2 + public_netmask: 255.255.255.0 + role: primary-controller + storage_address: 192.168.1.3 + storage_netmask: 255.255.255.0 + swift_zone: '125' + uid: '125' + user_node_name: Untitled (34:45) +- fqdn: node-126.test.domain.local + internal_address: 192.168.0.4 + internal_netmask: 255.255.255.0 + name: node-126 + role: ceph-osd + storage_address: 192.168.1.4 + storage_netmask: 255.255.255.0 + swift_zone: '126' + uid: '126' + user_node_name: Untitled (12:ea) +- fqdn: node-127.test.domain.local + internal_address: 192.168.0.5 + internal_netmask: 255.255.255.0 + name: node-127 + role: compute + storage_address: 192.168.1.5 + storage_netmask: 255.255.255.0 + swift_zone: '127' + uid: '127' + user_node_name: Untitled (74:27) +- fqdn: node-128.test.domain.local + internal_address: 192.168.0.8 + internal_netmask: 255.255.255.0 + name: node-128 + public_address: 172.16.0.8 + public_netmask: 255.255.255.0 + role: controller + storage_address: 192.168.1.8 + storage_netmask: 255.255.255.0 + swift_zone: '128' + uid: '128' + user_node_name: Untitled (56:67) +nova: + db_password: VXcP6cIR + state_path: /var/lib/nova + user_password: fuhtZH6v +nova_quota: false +online: true +openstack_version: 2014.2-6.1 +openstack_version_prev: null +priority: 200 +provision: + codename: trusty + image_data: + /: + container: gzip + format: ext4 + uri: http://10.108.0.2:8080/targetimages/env_37_ubuntu_1404_amd64.img.gz + /boot: + container: gzip + format: ext2 + uri: http://10.108.0.2:8080/targetimages/env_37_ubuntu_1404_amd64-boot.img.gz + metadata: + label: Provision + weight: 80 + method: image +public_network_assignment: + assign_to_all_nodes: false + metadata: + label: Public network assignment + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' + weight: 50 +neutron_advanced_configuration: + neutron_dvr: false + neutron_l2_pop: false + neutron_l3_ha: true +public_vip: 172.16.0.3 +public_vrouter_vip: 172.16.0.4 +puppet: + manifests: rsync://10.108.0.2:/puppet/2014.2-6.1/manifests/ + modules: rsync://10.108.0.2:/puppet/2014.2-6.1/modules/ +puppet_debug: true +quantum: true +quantum_settings: + L2: + base_mac: fa:16:3e:00:00:00 + phys_nets: + physnet1: + bridge: br-floating + physnet2: + bridge: br-prv + vlan_range: 1000:1030 + segmentation_type: vlan + L3: + use_namespaces: true + database: + passwd: zOXpcc6c + keystone: + admin_password: XgdPodA7 + metadata: + metadata_proxy_shared_secret: QU11ydS2 + predefined_networks: + net04: + L2: + network_type: vlan + physnet: physnet2 + router_ext: false + segment_id: null + L3: + enable_dhcp: true + floating: null + gateway: 192.168.111.1 + nameservers: + - 8.8.4.4 + - 8.8.8.8 + subnet: 192.168.111.0/24 + shared: false + tenant: admin + net04_ext: + L2: + network_type: flat + physnet: physnet1 + router_ext: true + segment_id: null + L3: + enable_dhcp: false + floating: 172.16.0.130:172.16.0.254 + gateway: 172.16.0.1 + nameservers: [] + subnet: 172.16.0.0/24 + shared: false + tenant: admin +rabbit: + password: 1GXPbTgb +repo_setup: + installer_initrd: + local: /var/www/nailgun/ubuntu/x86_64/images/initrd.gz + remote_relative: dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz + installer_kernel: + local: /var/www/nailgun/ubuntu/x86_64/images/linux + remote_relative: dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux + metadata: + label: Repositories + weight: 50 + repos: + - name: ubuntu + priority: null + section: main universe multiverse + suite: trusty + type: deb + uri: http://archive.ubuntu.com/ubuntu/ + - name: ubuntu-updates + priority: null + section: main universe multiverse + suite: trusty-updates + type: deb + uri: http://archive.ubuntu.com/ubuntu/ + - name: ubuntu-security + priority: null + section: main universe multiverse + suite: trusty-security + type: deb + uri: http://archive.ubuntu.com/ubuntu/ + - name: mos + priority: 1050 + section: main restricted + suite: mos6.1 + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ + - name: mos-updates + priority: 1050 + section: main restricted + suite: mos6.1-updates + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ + - name: mos-security + priority: 1050 + section: main restricted + suite: mos6.1-security + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ + - name: mos-holdback + priority: 1100 + section: main restricted + suite: mos6.1-holdback + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ +resume_guests_state_on_host_boot: true +role: primary-controller +sahara: + db_password: R68HpdNS + enabled: false + user_password: ts32qXcD +status: discover +storage: + ephemeral_ceph: false + images_ceph: true + images_vcenter: false + iser: false + metadata: + label: Storage + weight: 60 + objects_ceph: true + osd_pool_size: '2' + pg_num: 256 + volumes_ceph: true + volumes_lvm: false +storage_network_range: 192.168.1.0/24 +swift: + user_password: bpFT3TKn +syslog: + metadata: + label: Syslog + weight: 50 + syslog_port: '514' + syslog_server: '' + syslog_transport: tcp +tasks: +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/hiera/hiera.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 100 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/globals/globals.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 200 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/logging/logging.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 300 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/tools/tools.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 400 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/umm/umm.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 500 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/netconfig/netconfig.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 600 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/hosts/hosts.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 700 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/firewall/firewall.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 800 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/cluster/cluster.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 900 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/virtual_ips/virtual_ips.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1000 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/virtual_ips/conntrackd.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1100 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/cluster-haproxy/cluster-haproxy.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1200 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/openstack-haproxy/openstack-haproxy.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1300 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/dns/dns-server.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1400 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/dns/dns-client.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1500 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ntp/ntp-server.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1600 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/database/database.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1700 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceilometer/controller.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1800 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceilometer/radosgw_user.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1800 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/rabbitmq/rabbitmq.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 1900 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/memcached/memcached.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2000 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/keystone/keystone.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2100 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/openstack-cinder/openstack-cinder.pp + puppet_modules: /etc/puppet/modules + timeout: 1200 + priority: 2200 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/glance/glance.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2300 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/openstack-controller/openstack-controller.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2400 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/openstack-network/openstack-network-controller.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2500 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/heat/heat.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2600 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/horizon/horizon.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2700 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/murano/murano.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2800 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/sahara/sahara.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 2900 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/api-proxy/api-proxy.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 3000 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/mon.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 3100 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/radosgw.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 3200 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/swift/swift.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 3300 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/roles/controller.pp + puppet_modules: /etc/puppet/modules + timeout: 3600 + priority: 3400 + type: puppet + uids: + - '125' +- parameters: + cwd: / + puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/swift/rebalance_cronjob.pp + puppet_modules: /etc/puppet/modules + timeout: 300 + priority: 3500 + type: puppet + uids: + - '125' +test_vm_image: + container_format: bare + disk_format: qcow2 + glance_properties: '' + img_name: TestVM + img_path: /usr/share/cirros-testvm/cirros-x86_64-disk.img + min_ram: 64 + os_name: cirros + public: 'true' +uid: '125' +use_cow_images: true +use_vcenter: false +user_node_name: Untitled (34:45) +workloads_collector: + enabled: true + metadata: + label: Workloads Collector User + restrictions: + - action: hide + condition: 'true' + weight: 10 + password: v6vMAe7Q + tenant: services + username: workloads_collector diff --git a/tests/noop/astute.yaml/neut_vlan.ceph.ceil-primary-controller.yaml b/tests/noop/astute.yaml/neut_vlan_l3ha.ceph.ceil-primary-controller.yaml similarity index 94% rename from tests/noop/astute.yaml/neut_vlan.ceph.ceil-primary-controller.yaml rename to tests/noop/astute.yaml/neut_vlan_l3ha.ceph.ceil-primary-controller.yaml index 059477aad5..c06b8bb285 100644 --- a/tests/noop/astute.yaml/neut_vlan.ceph.ceil-primary-controller.yaml +++ b/tests/noop/astute.yaml/neut_vlan_l3ha.ceph.ceil-primary-controller.yaml @@ -328,6 +328,47 @@ network_metadata: node_roles: - compute name: node-127 + node-128: + swift_zone: '1' + uid: '128' + fqdn: node-128.test.domain.local + network_roles: + keystone/api: 192.168.0.8 + neutron/api: 192.168.0.8 + mgmt/database: 192.168.0.8 + sahara/api: 192.168.0.8 + heat/api: 192.168.0.8 + ceilometer/api: 192.168.0.8 + ex: 172.16.0.8 + ceph/public: 192.168.0.8 + ceph/radosgw: 172.16.0.8 + management: 192.168.0.8 + swift/api: 192.168.0.8 + mgmt/api: 192.168.0.8 + storage: 192.168.1.8 + mgmt/corosync: 192.168.0.8 + cinder/api: 192.168.0.8 + public/vip: 172.16.0.8 + swift/replication: 192.168.1.8 + mgmt/messaging: 192.168.0.8 + neutron/mesh: 192.168.0.8 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.8 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.8 + mgmt/vip: 192.168.0.8 + murano/api: 192.168.0.8 + nova/api: 192.168.0.8 + horizon: 192.168.0.8 + mgmt/memcache: 192.168.0.8 + cinder/iscsi: 192.168.1.8 + ceph/replication: 192.168.1.8 + user_node_name: Untitled (56:67) + node_roles: + - controller + name: node-128 vips: vrouter: ipaddr: 192.168.0.6 @@ -526,6 +567,18 @@ nodes: swift_zone: '127' uid: '127' user_node_name: Untitled (74:27) +- fqdn: node-128.test.domain.local + internal_address: 192.168.0.8 + internal_netmask: 255.255.255.0 + name: node-128 + public_address: 172.16.0.8 + public_netmask: 255.255.255.0 + role: controller + storage_address: 192.168.1.8 + storage_netmask: 255.255.255.0 + swift_zone: '128' + uid: '128' + user_node_name: Untitled (56:67) nova: db_password: VXcP6cIR state_path: /var/lib/nova @@ -561,6 +614,7 @@ public_network_assignment: neutron_advanced_configuration: neutron_dvr: false neutron_l2_pop: false + neutron_l3_ha: true public_vip: 172.16.0.3 public_vrouter_vip: 172.16.0.4 puppet: diff --git a/tests/noop/astute.yaml/neut_vlan.ceph.ceil-primary-mongo.yaml b/tests/noop/astute.yaml/neut_vlan_l3ha.ceph.ceil-primary-mongo.yaml similarity index 99% rename from tests/noop/astute.yaml/neut_vlan.ceph.ceil-primary-mongo.yaml rename to tests/noop/astute.yaml/neut_vlan_l3ha.ceph.ceil-primary-mongo.yaml index 5dd92fcb85..86d245499a 100644 --- a/tests/noop/astute.yaml/neut_vlan.ceph.ceil-primary-mongo.yaml +++ b/tests/noop/astute.yaml/neut_vlan_l3ha.ceph.ceil-primary-mongo.yaml @@ -541,6 +541,7 @@ public_network_assignment: neutron_advanced_configuration: neutron_dvr: false neutron_l2_pop: false + neutron_l3_ha: true public_vip: 172.16.0.3 public_vrouter_vip: 172.16.0.4 puppet: diff --git a/tests/noop/spec/hosts/openstack-network/routers_spec.rb b/tests/noop/spec/hosts/openstack-network/routers_spec.rb index e2cf61add5..8ef5d9f5f2 100644 --- a/tests/noop/spec/hosts/openstack-network/routers_spec.rb +++ b/tests/noop/spec/hosts/openstack-network/routers_spec.rb @@ -9,36 +9,55 @@ describe manifest do neutron_config = Noop.hiera('neutron_config') nets = neutron_config['predefined_networks'] - floating_net = (neutron_config['default_floating_net'] or 'net04_ext') - private_net = (neutron_config['default_private_net'] or 'net04') - default_router = (neutron_config['default_router'] or 'router04') + floating_net = (neutron_config['default_floating_net'] or 'net04_ext') + private_net = (neutron_config['default_private_net'] or 'net04') + default_router = (neutron_config['default_router'] or 'router04') + l3_ha = Noop.hiera_hash('neutron_advanced_configuration', {}).fetch('neutron_l3_ha', false) + network_metadata = Noop.hiera('network_metadata') + neutron_controller_roles = Noop.hiera('neutron_controller_nodes', ['controller', 'primary-controller']) + neutron_controller_nodes = Noop.puppet_function 'get_nodes_hash_by_roles', network_metadata, neutron_controller_roles + neutron_controllers_num = neutron_controller_nodes.size - context 'Default router serves tenant networks' do - it 'should be created and serve gateway' do - should contain_neutron_router(default_router).with( - 'ensure' => 'present', - 'gateway_network_name' => floating_net, - 'name' => default_router, - ) + if (neutron_controllers_num < 2 and l3_ha) + context 'With L3 HA and not enough number of controllers' do + it 'should not create a default router' do + should_not contain_neutron_router(default_router) + end + it 'should not serve private network' do + should_not contain_neutron_router_interface("#{default_router}:#{private_net}__subnet") + end + it 'should not serve baremetal network' do + should_not contain_neutron_router_interface("#{default_router}:baremetal__subnet") + end end - it 'should serve private network' do - should contain_neutron_router_interface("#{default_router}:#{private_net}__subnet").with( - 'ensure' => 'present', - ) - should contain_neutron_router(default_router).that_comes_before( - "Neutron_router_interface[#{default_router}:#{private_net}__subnet]" - ) + else + context 'Default router serves tenant networks' do + it 'should be created and serve gateway' do + should contain_neutron_router(default_router).with( + 'ensure' => 'present', + 'gateway_network_name' => floating_net, + 'name' => default_router, + ) + end + it 'should serve private network' do + should contain_neutron_router_interface("#{default_router}:#{private_net}__subnet").with( + 'ensure' => 'present', + ) + should contain_neutron_router(default_router).that_comes_before( + "Neutron_router_interface[#{default_router}:#{private_net}__subnet]" + ) + end end - end - context 'Default router serves Ironic baremetal network', :if => nets.has_key?('baremetal') do - it 'should serve baremetal network' do - should contain_neutron_router_interface("#{default_router}:baremetal__subnet").with( - 'ensure' => 'present', - ) - should contain_neutron_router(default_router).that_comes_before( - "Neutron_router_interface[#{default_router}:baremetal__subnet]" - ) + context 'Default router serves Ironic baremetal network', :if => nets.has_key?('baremetal') do + it 'should serve baremetal network' do + should contain_neutron_router_interface("#{default_router}:baremetal__subnet").with( + 'ensure' => 'present', + ) + should contain_neutron_router(default_router).that_comes_before( + "Neutron_router_interface[#{default_router}:baremetal__subnet]" + ) + end end end end diff --git a/tests/noop/spec/hosts/openstack-network/server-config_spec.rb b/tests/noop/spec/hosts/openstack-network/server-config_spec.rb index 78af8f6df1..b3992f9652 100644 --- a/tests/noop/spec/hosts/openstack-network/server-config_spec.rb +++ b/tests/noop/spec/hosts/openstack-network/server-config_spec.rb @@ -29,9 +29,10 @@ describe manifest do end context 'with Neutron-server' do - neutron_config = Noop.hiera_hash('neutron_config') - management_vip = Noop.hiera('management_vip') + neutron_config = Noop.hiera_hash('neutron_config') + management_vip = Noop.hiera('management_vip') service_endpoint = Noop.hiera('service_endpoint', management_vip) + l3_ha = Noop.hiera_hash('neutron_advanced_configuration', {}).fetch('neutron_l3_ha', false) it 'database options' do database_vip = Noop.hiera('database_vip') @@ -64,13 +65,30 @@ describe manifest do it { should contain_class('neutron::server').with('manage_service' => 'true')} it { should contain_class('neutron::server').with('enabled' => 'false')} # bacause server should be started after plugin configured it { should contain_class('neutron::server').with('agent_down_time' => '30')} - it { should contain_class('neutron::server').with('allow_automatic_l3agent_failover' => 'true')} it 'dvr' do dvr = Noop.hiera_hash('neutron_advanced_configuration', {}).fetch('neutron_dvr', false) should contain_class('neutron::server').with('router_distributed' => dvr) end + if l3_ha + it 'l3_ha_enabled' do + should contain_class('neutron::server').with( + 'l3_ha' => true, + 'allow_automatic_l3agent_failover' => false, + 'min_l3_agents_per_router' => 2, + 'max_l3_agents_per_router' => 0, + ) + end + else + it 'l3_ha_disabled' do + should contain_class('neutron::server').with( + 'l3_ha' => false, + 'allow_automatic_l3agent_failover' => true, + ) + end + end + it 'worker count' do fallback_workers = [[processorcount, 2].max, 16].min workers = neutron_config.fetch('workers', fallback_workers)