magnum/magnum/drivers/swarm_fedora_atomic_v1/templates/swarmnode.yaml
Feilong Wang be0609ce88 Support soft-anti-affinity policy for nodes
Currently, there is no guarantee to make sure all nodes of one cluster are
created on different compute hosts. So it would be nice if we can create
a server group and set it with anti-affinity policy to get a better HA
for cluster. This patch is proposing to create a server group for master
and minion nodes with soft-anti-affinity policy by default.

Closes-Bug: #1737802

Change-Id: Icc7a73ef55296a58bf00719ca4d1cdcc304fab86
2018-01-24 07:13:48 +13:00

460 lines
14 KiB
YAML

heat_template_version: 2014-10-16
description: >
This is a nested stack that defines a single swarm node,
based on a vanilla Fedora 20 cloud image. This stack is included by
a ResourceGroup resource in the parent template (swarmcluster.yaml).
parameters:
name:
type: string
description: server name
server_image:
type: string
description: glance image used to boot the server
server_flavor:
type: string
description: flavor to use when booting the server
ssh_key_name:
type: string
description: name of ssh key to be provisioned on our server
docker_volume_size:
type: number
description: >
size of a cinder volume to allocate to docker for container/image
storage
docker_volume_type:
type: string
description: >
type of a cinder volume to allocate to docker for container/image
storage
docker_storage_driver:
type: string
description: docker storage driver name
external_network:
type: string
description: uuid/name of a network to use for floating ip addresses
fixed_network_id:
type: string
description: Network from which to allocate fixed addresses.
fixed_subnet_id:
type: string
description: Subnet from which to allocate fixed addresses.
network_driver:
type: string
description: network driver to use for instantiating container networks
flannel_network_cidr:
type: string
description: network range for flannel overlay network
http_proxy:
type: string
description: http proxy address for docker
https_proxy:
type: string
description: https proxy address for docker
no_proxy:
type: string
description: no proxies for docker
swarm_api_ip:
type: string
description: swarm master's api server ip address
api_ip_address:
type: string
description: swarm master's api server public ip address
cluster_uuid:
type: string
description: identifier for the cluster this template is generating
magnum_url:
type: string
description: endpoint to retrieve TLS certs from
tls_disabled:
type: boolean
description: whether or not to disable TLS
verify_ca:
type: boolean
description: whether or not to validate certificate authority
swarm_version:
type: string
description: version of swarm used for swarm cluster
secgroup_swarm_node_id:
type: string
description: ID of the security group for swarm node.
etcd_server_ip:
type: string
description: ip address of the load balancer pool of etcd server.
trustee_domain_id:
type: string
description: domain id of the trustee
trustee_user_id:
type: string
description: user id of the trustee
trustee_username:
type: string
description: username of the trustee
trustee_password:
type: string
description: password of the trustee
hidden: true
trust_id:
type: string
description: id of the trust which is used by the trustee
hidden: true
auth_url:
type: string
description: url for keystone
registry_enabled:
type: boolean
description: >
Indicates whether the docker registry is enabled.
registry_port:
type: number
description: port of registry service
swift_region:
type: string
description: region of swift service
registry_container:
type: string
description: >
name of swift container which docker registry stores images in
registry_insecure:
type: boolean
description: >
indicates whether to skip TLS verification between registry and backend storage
registry_chunksize:
type: number
description: >
size fo the data segments for the swift dynamic large objects
volume_driver:
type: string
description: volume driver to use for container storage
default: ""
rexray_preempt:
type: string
description: >
enables any host to take control of a volume irrespective of whether
other hosts are using the volume
default: "false"
openstack_ca:
type: string
description: The OpenStack CA certificate to install on the node.
nodes_server_group_id:
type: string
description: ID of the server group for kubernetes cluster nodes.
resources:
node_wait_handle:
type: "OS::Heat::WaitConditionHandle"
node_wait_condition:
type: "OS::Heat::WaitCondition"
depends_on: swarm-node
properties:
handle: {get_resource: node_wait_handle}
timeout: 6000
######################################################################
#
# software configs. these are components that are combined into
# a multipart MIME user-data archive.
no_proxy_extended:
type: OS::Heat::Value
properties:
type: string
value:
list_join:
- ','
- - {get_param: swarm_api_ip}
- {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]}
- {get_param: etcd_server_ip}
- {get_param: api_ip_address}
- {get_param: no_proxy}
write_heat_params:
type: "OS::Heat::SoftwareConfig"
properties:
group: ungrouped
config:
str_replace:
template: {get_file: ../../common/templates/swarm/fragments/write-heat-params-node.yaml}
params:
"$WAIT_CURL": {get_attr: [node_wait_handle, curl_cli]}
"$DOCKER_VOLUME": {get_resource: docker_volume}
"$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size}
"$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver}
"$HTTP_PROXY": {get_param: http_proxy}
"$HTTPS_PROXY": {get_param: https_proxy}
"$NO_PROXY": {get_attr: [no_proxy_extended, value]}
"$SWARM_API_IP": {get_param: swarm_api_ip}
"$SWARM_NODE_IP": {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]}
"$CLUSTER_UUID": {get_param: cluster_uuid}
"$MAGNUM_URL": {get_param: magnum_url}
"$TLS_DISABLED": {get_param: tls_disabled}
"$VERIFY_CA": {get_param: verify_ca}
"$NETWORK_DRIVER": {get_param: network_driver}
"$ETCD_SERVER_IP": {get_param: etcd_server_ip}
"$API_IP_ADDRESS": {get_param: api_ip_address}
"$SWARM_VERSION": {get_param: swarm_version}
"$TRUSTEE_DOMAIN_ID": {get_param: trustee_domain_id}
"$TRUSTEE_USER_ID": {get_param: trustee_user_id}
"$TRUSTEE_USERNAME": {get_param: trustee_username}
"$TRUSTEE_PASSWORD": {get_param: trustee_password}
"$TRUST_ID": {get_param: trust_id}
"$AUTH_URL": {get_param: auth_url}
"$REGISTRY_ENABLED": {get_param: registry_enabled}
"$REGISTRY_PORT": {get_param: registry_port}
"$SWIFT_REGION": {get_param: swift_region}
"$REGISTRY_CONTAINER": {get_param: registry_container}
"$REGISTRY_INSECURE": {get_param: registry_insecure}
"$REGISTRY_CHUNKSIZE": {get_param: registry_chunksize}
"$VOLUME_DRIVER": {get_param: volume_driver}
"$REXRAY_PREEMPT": {get_param: rexray_preempt}
install_openstack_ca:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
str_replace:
params:
$OPENSTACK_CA: {get_param: openstack_ca}
template: {get_file: ../../common/templates/fragments/atomic-install-openstack-ca.sh}
remove_docker_key:
type: "OS::Heat::SoftwareConfig"
properties:
group: ungrouped
config: {get_file: ../../common/templates/swarm/fragments/remove-docker-key.sh}
make_cert:
type: "OS::Heat::SoftwareConfig"
properties:
group: ungrouped
config: {get_file: ../../common/templates/swarm/fragments/make-cert.py}
configure_docker_storage:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
str_replace:
params:
$configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh}
template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh}
configure_docker_registry:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: ../../common/templates/fragments/configure-docker-registry.sh}
add_docker_daemon_options:
type: "OS::Heat::SoftwareConfig"
properties:
group: ungrouped
config: {get_file: ../../common/templates/swarm/fragments/add-docker-daemon-options.sh}
write_docker_socket:
type: "OS::Heat::SoftwareConfig"
properties:
group: ungrouped
config: {get_file: ../../common/templates/swarm/fragments/write-docker-socket.yaml}
network_service:
type: "OS::Heat::SoftwareConfig"
properties:
group: ungrouped
config: {get_file: ../../common/templates/swarm/fragments/network-service.sh}
write_swarm_agent_failure_service:
type: "OS::Heat::SoftwareConfig"
properties:
group: ungrouped
config:
str_replace:
template: {get_file: ../../common/templates/swarm/fragments/write-cluster-failure-service.yaml}
params:
"$SERVICE": swarm-agent
"$WAIT_CURL": {get_attr: [node_wait_handle, curl_cli]}
"$VERIFY_CA": {get_param: verify_ca}
write_swarm_agent_service:
type: "OS::Heat::SoftwareConfig"
properties:
group: ungrouped
config: {get_file: ../../common/templates/swarm/fragments/write-swarm-agent-service.sh}
enable_docker_registry:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: ../../common/templates/fragments/enable-docker-registry.sh}
enable_services:
type: "OS::Heat::SoftwareConfig"
properties:
group: ungrouped
config:
str_replace:
template: {get_file: ../../common/templates/swarm/fragments/enable-services.sh}
params:
"$NODE_SERVICES": "docker.socket docker swarm-agent"
cfn_signal:
type: "OS::Heat::SoftwareConfig"
properties:
group: ungrouped
config: {get_file: ../../common/templates/swarm/fragments/cfn-signal.sh}
configure_selinux:
type: "OS::Heat::SoftwareConfig"
properties:
group: ungrouped
config: {get_file: ../../common/templates/swarm/fragments/configure-selinux.sh}
add_proxy:
type: "OS::Heat::SoftwareConfig"
properties:
group: ungrouped
config: {get_file: ../../common/templates/swarm/fragments/add-proxy.sh}
volume_service:
type: "OS::Heat::SoftwareConfig"
properties:
group: ungrouped
config: {get_file: ../../common/templates/swarm/fragments/volume-service.sh}
swarm_node_init:
type: "OS::Heat::MultipartMime"
properties:
parts:
- config: {get_resource: install_openstack_ca}
- config: {get_resource: configure_selinux}
- config: {get_resource: remove_docker_key}
- config: {get_resource: write_heat_params}
- config: {get_resource: make_cert}
- config: {get_resource: network_service}
- config: {get_resource: configure_docker_storage}
- config: {get_resource: configure_docker_registry}
- config: {get_resource: write_swarm_agent_failure_service}
- config: {get_resource: write_swarm_agent_service}
- config: {get_resource: add_docker_daemon_options}
- config: {get_resource: write_docker_socket}
- config: {get_resource: add_proxy}
- config: {get_resource: enable_docker_registry}
- config: {get_resource: enable_services}
- config: {get_resource: cfn_signal}
- config: {get_resource: volume_service}
# do NOT use "_" (underscore) in the Nova server name
# it creates a mismatch between the generated Nova name and its hostname
# which can lead to weird problems
swarm-node:
type: "OS::Nova::Server"
properties:
name: {get_param: name}
image:
get_param: server_image
flavor:
get_param: server_flavor
key_name:
get_param: ssh_key_name
user_data_format: RAW
user_data: {get_resource: swarm_node_init}
networks:
- port:
get_resource: swarm_node_eth0
scheduler_hints: { group: { get_param: nodes_server_group_id }}
swarm_node_eth0:
type: "OS::Neutron::Port"
properties:
network_id:
get_param: fixed_network_id
security_groups:
- {get_param: secgroup_swarm_node_id}
fixed_ips:
- subnet_id:
get_param: fixed_subnet_id
allowed_address_pairs:
- ip_address: {get_param: flannel_network_cidr}
swarm_node_floating:
type: "OS::Neutron::FloatingIP"
properties:
floating_network:
get_param: external_network
port_id:
get_resource: swarm_node_eth0
######################################################################
#
# docker storage. This allocates a cinder volume and attaches it
# to the node.
#
docker_volume:
type: Magnum::Optional::Cinder::Volume
properties:
size: {get_param: docker_volume_size}
volume_type: {get_param: docker_volume_type}
docker_volume_attach:
type: Magnum::Optional::Cinder::VolumeAttachment
properties:
instance_uuid: {get_resource: swarm-node}
volume_id: {get_resource: docker_volume}
mountpoint: /dev/vdb
outputs:
swarm_node_ip:
value: {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]}
description: >
This is the "private" address of the Swarm node.
swarm_node_external_ip:
value: {get_attr: [swarm_node_floating, floating_ip_address]}
description: >
This is the "public" address of the Swarm node.