tripleo-ci/scripts/rh1-env.yaml
Steven Hardy 821d84f34c Replace references to deprecated controllerExtraConfig
This should be ControllerExtraConfig, since the current parameter name
has been deprecated for some time, and is inconsistent with all other
roles.

Since mitaka is now EOL this also removes references to the
worker-config-mitaka-and-below environment.

Change-Id: I0f07b3abbe290ed7f740a6f4915e16be39e3a4c6
2017-08-04 23:05:55 +00:00

92 lines
5.7 KiB
YAML

parameter_defaults:
CloudName: ci-overcloud.rh1.tripleo.org
ControllerExtraConfig:
tripleo::loadbalancer::public_virtual_ip: 66.187.229.2
neutron::agents::ml2::ovs::prevent_arp_spoofing: false
# https://bugs.launchpad.net/tripleo/+bug/1590101
# Tripleo sets this to 1400, the mtu of most pysical networks without jumbo frames is 1500
# Tripleo also forces dhcp-option-force=26,1400 , this leaves no overhead room for vxlan
# we probably shouldn't force this as neutron automatically subtracts the overlay protocol overhead global_physnet_mtu.
# TODO: investigate properly
neutron::global_physnet_mtu: 1500
# this is deprecated but take precendence ??
neutron::network_device_mtu: 1500
# rh2 disks are small, we're relying on the fact that CI jobs at different stages wont
# ever use peak disk usage together (also they don't use all thats allocate in the flavor)
nova::scheduler::filter::disk_allocation_ratio: 4
# We have a few compute nodes in rh1 that aren't using/don't have SSDs. Since
# those nodes are using a 1 TB HDD instead, they tend to get preferred by the
# scheduler if we use the normal weighers. In reality, we care mostly about
# evenly distributing the RAM across nodes, so let's only use that weigher.
nova::scheduler::filter::scheduler_weight_classes: nova.scheduler.weights.ram.RAMWeigher
# More than 4 Heat engine workers allows Heat to essentially DoS the other services
# if a lot of jobs come in at once. This is borrowed from
# worker-config-mitaka-and-below.yaml. Once rh1 is on a newer release we should
# be able to just use HeatWorkers.
heat::config::heat_config:
DEFAULT/num_engine_workers:
value: 4
# Limit the workers for these services. More workers doesn't appreciably help our
# capacity, so it's just a waste of memory and CPU.
HeatWorkers: 12
NovaWorkers: 12
NovaComputeExtraConfig:
neutron::agents::ml2::ovs::prevent_arp_spoofing: false
neutron::plugins::ml2::firewall_driver: neutron.agent.firewall.NoopFirewallDriver
neutron::agents::ml2::ovs::firewall_driver: neutron.agent.firewall.NoopFirewallDriver
neutron::global_physnet_mtu: 1500
# this is deprecated but takes precendence ??
neutron::network_device_mtu: 1500
# Allow file injection so that the nodepool cloud creds can be injected into the te-broker
nova::compute::libvirt::libvirt_inject_partition: -1
# This should be OK if the cloud is exclusivly for CI but it might end in tears
nova::compute::libvirt::libvirt_disk_cachemodes:
- file=unsafe
EndpointMap:
AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
KeystoneV3Admin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
KeystoneV3Internal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
KeystoneV3Public: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
NovaEC2Admin: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
NovaEC2Internal: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
NovaEC2Public: {protocol: 'https', port: '13773', host: 'CLOUDNAME'}
NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
resource_registry:
OS::TripleO::NodeTLSData: /usr/share/openstack-tripleo-heat-templates/puppet/extraconfig/tls/tls-cert-inject.yaml