magnum/magnum/drivers/swarm_fedora_atomic_v1/templates/cluster.yaml

469 lines
14 KiB
YAML

heat_template_version: 2013-05-23
description: >
This template will boot a Docker swarm cluster. A swarm cluster is made up
of several master nodes, and N agent nodes. Every node in the cluster,
including the master, is running a Docker daemon and a swarm agent
advertising it to the cluster. The master is running an addition swarm
master container listening on port 2376. By default, the cluster is made
up of one master node and one agent node.
parameters:
#
# REQUIRED PARAMETERS
#
ssh_key_name:
type: string
description: name of ssh key to be provisioned on our server
external_network:
type: string
description: uuid/name of a network to use for floating ip addresses
discovery_url:
type: string
description: url provided for node discovery
bay_uuid:
type: string
description: identifier for the bay this template is generating
magnum_url:
type: string
description: endpoint to retrieve TLS certs from
server_image:
type: string
description: glance image used to boot the server
#
# OPTIONAL PARAMETERS
#
master_flavor:
type: string
default: m1.small
description: flavor to use when booting the swarm master
node_flavor:
type: string
default: m1.small
description: flavor to use when booting the swarm node
dns_nameserver:
type: string
description: address of a dns nameserver reachable in your environment
default: 8.8.8.8
http_proxy:
type: string
description: http proxy address for docker
default: ""
https_proxy:
type: string
description: https proxy address for docker
default: ""
no_proxy:
type: string
description: no proxies for docker
default: ""
number_of_masters:
type: number
description: how many swarm masters to spawn
default: 1
number_of_nodes:
type: number
description: how many swarm nodes to spawn
default: 1
fixed_network_cidr:
type: string
description: network range for fixed ip network
default: "10.0.0.0/24"
tls_disabled:
type: boolean
description: whether or not to enable TLS
default: False
network_driver:
type: string
description: network driver to use for instantiating container networks
default: None
flannel_network_cidr:
type: string
description: network range for flannel overlay network
default: 10.100.0.0/16
flannel_network_subnetlen:
type: number
description: size of subnet assigned to each master
default: 24
flannel_backend:
type: string
description: >
specify the backend for flannel, default udp backend
default: "udp"
constraints:
- allowed_values: ["udp", "vxlan", "host-gw"]
docker_volume_size:
type: number
description: >
size of a cinder volume to allocate to docker for container/image
storage
default: 25
docker_storage_driver:
type: string
description: docker storage driver name
default: "devicemapper"
constraints:
- allowed_values: ["devicemapper", "overlay"]
loadbalancing_protocol:
type: string
description: >
The protocol which is used for load balancing. If you want to change
tls_disabled option to 'True', please change this to "HTTP".
default: TCP
constraints:
- allowed_values: ["TCP", "HTTP"]
swarm_port:
type: number
description: >
The port which are used by swarm manager to provide swarm service.
default: 2376
swarm_version:
type: string
description: version of swarm used for swarm cluster
default: 1.0.0
trustee_domain_id:
type: string
description: domain id of the trustee
default: ""
trustee_user_id:
type: string
description: user id of the trustee
default: ""
trustee_username:
type: string
description: username of the trustee
default: ""
trustee_password:
type: string
description: password of the trustee
default: ""
hidden: true
trust_id:
type: string
description: id of the trust which is used by the trustee
default: ""
hidden: true
auth_url:
type: string
description: url for keystone
registry_enabled:
type: boolean
description: >
Indicates whether the docker registry is enabled.
default: false
registry_port:
type: number
description: port of registry service
default: 5000
swift_region:
type: string
description: region of swift service
default: ""
registry_container:
type: string
description: >
name of swift container which docker registry stores images in
default: "container"
registry_insecure:
type: boolean
description: >
indicates whether to skip TLS verification between registry and backend storage
default: true
registry_chunksize:
type: number
description: >
size fo the data segments for the swift dynamic large objects
default: 5242880
resources:
######################################################################
#
# network resources. allocate a network and router for our server.
# it would also be possible to take advantage of existing network
# resources (and have the deployer provide network and subnet ids,
# etc, as parameters), but I wanted to minmize the amount of
# configuration necessary to make this go.
fixed_network:
type: "OS::Neutron::Net"
# This is the subnet on which we will deploy our server.
fixed_subnet:
type: "OS::Neutron::Subnet"
properties:
cidr: {get_param: fixed_network_cidr}
network_id:
get_resource: fixed_network
dns_nameservers:
- get_param: dns_nameserver
# create a router attached to the external network provided as a
# parameter to this stack.
extrouter:
type: "OS::Neutron::Router"
properties:
external_gateway_info:
network:
get_param: external_network
# attached fixed_subnet to our extrouter router.
extrouter_inside:
type: "OS::Neutron::RouterInterface"
properties:
router_id:
get_resource: extrouter
subnet_id:
get_resource:
fixed_subnet
######################################################################
#
# security groups. we need to permit network traffic of various
# sorts.
#
secgroup_manager:
type: "OS::Neutron::SecurityGroup"
properties:
rules:
- protocol: icmp
- protocol: tcp
- protocol: udp
######################################################################
#
# load balancers.
#
api_monitor:
type: Magnum::Optional::Neutron::Pool::HealthMonitor
properties:
type: TCP
delay: 5
max_retries: 5
timeout: 5
api_pool:
type: Magnum::Optional::Neutron::Pool
properties:
protocol: {get_param: loadbalancing_protocol}
monitors: [{get_resource: api_monitor}]
subnet: {get_resource: fixed_subnet}
lb_method: ROUND_ROBIN
vip:
protocol_port: {get_param: swarm_port}
api_pool_floating:
type: Magnum::Optional::Neutron::Pool::FloatingIP
depends_on:
- extrouter_inside
properties:
floating_network: {get_param: external_network}
port_id: {get_attr: [api_pool, vip, port_id]}
etcd_monitor:
type: Magnum::Optional::Neutron::Pool::HealthMonitor
properties:
type: TCP
delay: 5
max_retries: 5
timeout: 5
etcd_pool:
type: Magnum::Optional::Neutron::Pool
properties:
protocol: HTTP
monitors: [{get_resource: etcd_monitor}]
subnet: {get_resource: fixed_subnet}
lb_method: ROUND_ROBIN
vip:
protocol_port: 2379
######################################################################
#
# resources that expose the IPs of either the swarm master or a given
# LBaaS pool depending on whether LBaaS is enabled for the bay.
#
api_address_switch:
type: Magnum::ApiGatewaySwitcher
properties:
pool_public_ip: {get_attr: [api_pool_floating, floating_ip_address]}
pool_private_ip: {get_attr: [api_pool, vip, address]}
master_public_ip: {get_attr: [swarm_masters, resource.0.swarm_master_external_ip]}
master_private_ip: {get_attr: [swarm_masters, resource.0.swarm_master_ip]}
etcd_address_switch:
type: Magnum::ApiGatewaySwitcher
properties:
pool_private_ip: {get_attr: [etcd_pool, vip, address]}
master_private_ip: {get_attr: [swarm_masters, resource.0.swarm_master_ip]}
######################################################################
#
# Swarm manager is responsible for the entire cluster and manages the
# resources of multiple Docker hosts at scale.
# It supports high availability by create a primary manager and multiple
# replica instances.
swarm_masters:
type: "OS::Heat::ResourceGroup"
depends_on:
- extrouter_inside
properties:
count: {get_param: number_of_masters}
resource_def:
type: swarmmaster.yaml
properties:
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
server_flavor: {get_param: master_flavor}
docker_volume_size: {get_param: docker_volume_size}
docker_storage_driver: {get_param: docker_storage_driver}
fixed_network_id: {get_resource: fixed_network}
fixed_subnet_id: {get_resource: fixed_subnet}
external_network: {get_param: external_network}
discovery_url: {get_param: discovery_url}
http_proxy: {get_param: http_proxy}
https_proxy: {get_param: https_proxy}
no_proxy: {get_param: no_proxy}
swarm_api_ip: {get_attr: [api_pool, vip, address]}
bay_uuid: {get_param: bay_uuid}
magnum_url: {get_param: magnum_url}
tls_disabled: {get_param: tls_disabled}
secgroup_swarm_master_id: {get_resource: secgroup_manager}
network_driver: {get_param: network_driver}
flannel_network_cidr: {get_param: flannel_network_cidr}
flannel_network_subnetlen: {get_param: flannel_network_subnetlen}
flannel_backend: {get_param: flannel_backend}
swarm_port: {get_param: swarm_port}
api_pool_id: {get_resource: api_pool}
etcd_pool_id: {get_resource: etcd_pool}
etcd_server_ip: {get_attr: [etcd_pool, vip, address]}
api_ip_address: {get_attr: [api_pool_floating, floating_ip_address]}
swarm_version: {get_param: swarm_version}
trustee_user_id: {get_param: trustee_user_id}
trustee_password: {get_param: trustee_password}
trust_id: {get_param: trust_id}
auth_url: {get_param: auth_url}
swarm_nodes:
type: "OS::Heat::ResourceGroup"
depends_on:
- extrouter_inside
properties:
count: {get_param: number_of_nodes}
resource_def:
type: swarmnode.yaml
properties:
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
server_flavor: {get_param: node_flavor}
docker_volume_size: {get_param: docker_volume_size}
docker_storage_driver: {get_param: docker_storage_driver}
fixed_network_id: {get_resource: fixed_network}
fixed_subnet_id: {get_resource: fixed_subnet}
external_network: {get_param: external_network}
http_proxy: {get_param: http_proxy}
https_proxy: {get_param: https_proxy}
no_proxy: {get_param: no_proxy}
swarm_api_ip: {get_attr: [api_address_switch, private_ip]}
bay_uuid: {get_param: bay_uuid}
magnum_url: {get_param: magnum_url}
tls_disabled: {get_param: tls_disabled}
secgroup_swarm_node_id: {get_resource: secgroup_manager}
flannel_network_cidr: {get_param: flannel_network_cidr}
network_driver: {get_param: network_driver}
etcd_server_ip: {get_attr: [etcd_address_switch, private_ip]}
api_ip_address: {get_attr: [api_address_switch, public_ip]}
swarm_version: {get_param: swarm_version}
trustee_domain_id: {get_param: trustee_domain_id}
trustee_user_id: {get_param: trustee_user_id}
trustee_username: {get_param: trustee_username}
trustee_password: {get_param: trustee_password}
trust_id: {get_param: trust_id}
auth_url: {get_param: auth_url}
registry_enabled: {get_param: registry_enabled}
registry_port: {get_param: registry_port}
swift_region: {get_param: swift_region}
registry_container: {get_param: registry_container}
registry_insecure: {get_param: registry_insecure}
registry_chunksize: {get_param: registry_chunksize}
outputs:
api_address:
value:
str_replace:
template: api_ip_address
params:
api_ip_address: {get_attr: [api_address_switch, public_ip]}
description: >
This is the API endpoint of the Swarm masters. Use this to access
the Swarm API server from outside the cluster.
swarm_masters_private:
value: {get_attr: [swarm_masters, swarm_master_ip]}
description: >
This is a list of the "private" addresses of all the Swarm masters.
swarm_masters:
value: {get_attr: [swarm_masters, swarm_master_external_ip]}
description: >
This is a list of "public" ip addresses of all Swarm masters.
Use these addresses to log into the Swarm masters via ssh.
swarm_nodes_private:
value: {get_attr: [swarm_nodes, swarm_node_ip]}
description: >
This is a list of the "private" addresses of all the Swarm nodes.
swarm_nodes:
value: {get_attr: [swarm_nodes, swarm_node_external_ip]}
description: >
This is a list of the "public" addresses of all the Swarm nodes. Use
these addresses to, e.g., log into the nodes.
discovery_url:
value: {get_param: discovery_url}
description: >
This the discovery url for Swarm cluster.