Swarm: Split swarm.yaml to swarmcluster.yaml and swarmmaster.yaml

Would like to make swarm much more similar with k8s.

This patch is the preparation of supporting Swarm HA mode:
1. Refactor swarm.yaml to swarmcluster.yaml and swarmmaster.yaml
2. Add api_pool, LB in front of swarm master node.
3. Add etcd_pool

After this change:

Swarm bay will update the fileds of 'master_addresses'.

P.S. notes Swarm HA is not supported yet, master_addresses will be only 1
ip address.

Partially implements: blueprint swarm-high-availability
Change-Id: Ib6346bfd5a7ad0ef2226a6e6bc98b0ad46e577cb
This commit is contained in:
Eli Qiao 2015-11-16 17:38:15 +08:00
parent 2dff5b1a4e
commit 353ae9b87d
12 changed files with 465 additions and 180 deletions

View File

@ -55,7 +55,7 @@ template_def_opts = [
help=_('coreos discovery token url.')),
cfg.StrOpt('swarm_atomic_template_path',
default=paths.basedir_def('templates/swarm/'
'swarm.yaml'),
'swarmcluster.yaml'),
help=_('Location of template to build a swarm '
'cluster on atomic.')),
cfg.StrOpt('mesos_ubuntu_template_path',
@ -558,6 +558,9 @@ class AtomicSwarmTemplateDefinition(BaseTemplateDefinition):
self.add_parameter('number_of_nodes',
bay_attr='node_count',
param_type=str)
self.add_parameter('number_of_masters',
bay_attr='master_count',
param_type=str)
self.add_parameter('server_flavor',
baymodel_attr='flavor_id')
self.add_parameter('docker_volume_size',
@ -575,8 +578,8 @@ class AtomicSwarmTemplateDefinition(BaseTemplateDefinition):
mapping_type=SwarmApiAddressOutputMapping)
self.add_output('swarm_master_private',
bay_attr=None)
self.add_output('swarm_master',
bay_attr=None)
self.add_output('swarm_masters',
bay_attr='master_addresses')
self.add_output('swarm_nodes_private',
bay_attr=None)
self.add_output('swarm_nodes',

View File

@ -33,7 +33,7 @@ if [ -f "$BASH_RC" ]; then
if [ -n "$NO_PROXY" ]; then
echo "declare -x no_proxy=$NO_PROXY" >> $BASH_RC
else
echo "declare -x no_proxy=$SWARM_MASTER_IP,$SWARM_NODE_IP" >> $BASH_RC
echo "declare -x no_proxy=$SWARM_API_IP,$ETCD_SERVER_IP,$SWARM_NODE_IP" >> $BASH_RC
fi
else
echo "File $BASH_RC does not exist, not setting no_proxy"

View File

@ -72,7 +72,9 @@ def _get_public_ip():
def _build_subject_alt_names(config):
subject_alt_names = [
'IP:%s' % _get_public_ip(),
'IP:%s' % config['API_IP_ADDRESS'],
'IP:%s' % config['SWARM_NODE_IP'],
'IP:%s' % config['SWARM_API_IP'],
'IP:127.0.0.1'
]
return ','.join(subject_alt_names)

View File

@ -12,7 +12,7 @@ FLANNEL_CONFIG_SERVICE=/etc/systemd/system/flannel-config.service
FLANNEL_JSON=/etc/sysconfig/flannel-network.json
sed -i '
/^FLANNEL_ETCD=/ s|=.*|="http://'"$SWARM_MASTER_IP"':2379"|
/^FLANNEL_ETCD=/ s|=.*|="http://'"$ETCD_SERVER_IP"':2379"|
' $FLANNELD_CONFIG
. $FLANNELD_CONFIG

View File

@ -16,7 +16,7 @@ mkdir -p /etc/systemd/system/docker.service.d
mkdir -p /etc/systemd/system/flanneld.service.d
sed -i '
/^FLANNEL_ETCD=/ s|=.*|="http://'"$SWARM_MASTER_IP"':2379"|
/^FLANNEL_ETCD=/ s|=.*|="http://'"$ETCD_SERVER_IP"':2379"|
' $FLANNELD_CONFIG
cat >> $FLANNEL_DOCKER_BRIDGE_BIN <<EOF

View File

@ -11,7 +11,7 @@ write_files:
HTTP_PROXY="$HTTP_PROXY"
HTTPS_PROXY="$HTTPS_PROXY"
NO_PROXY="$NO_PROXY"
SWARM_MASTER_IP="$SWARM_MASTER_IP"
SWARM_API_IP="$SWARM_API_IP"
SWARM_NODE_IP="$SWARM_NODE_IP"
BAY_UUID="$BAY_UUID"
USER_TOKEN="$USER_TOKEN"
@ -21,3 +21,5 @@ write_files:
FLANNEL_NETWORK_CIDR="$FLANNEL_NETWORK_CIDR"
FLANNEL_NETWORK_SUBNETLEN="$FLANNEL_NETWORK_SUBNETLEN"
FLANNEL_USE_VXLAN="$FLANNEL_USE_VXLAN"
ETCD_SERVER_IP="$ETCD_SERVER_IP"
API_IP_ADDRESS="$API_IP_ADDRESS"

View File

@ -16,7 +16,7 @@ write_files:
ExecStartPre=-/usr/bin/docker kill swarm-agent
ExecStartPre=-/usr/bin/docker rm swarm-agent
ExecStartPre=-/usr/bin/docker pull swarm:$SWARM_VERSION
ExecStart=/usr/bin/docker run -e http_proxy=$HTTP_PROXY -e https_proxy=$HTTPS_PROXY -e no_proxy=$NO_PROXY --name swarm-agent swarm:$SWARM_VERSION join --addr $NODE_IP:2375 etcd://$SWARM_MASTER_IP:2379/v2/keys/swarm/
ExecStart=/usr/bin/docker run -e http_proxy=$HTTP_PROXY -e https_proxy=$HTTPS_PROXY -e no_proxy=$NO_PROXY --name swarm-agent swarm:$SWARM_VERSION join --addr $NODE_IP:2375 etcd://$ETCD_SERVER_IP:2379/v2/keys/swarm/
ExecStop=/usr/bin/docker stop swarm-agent
ExecStartPost=/usr/bin/curl -sf -X PUT -H 'Content-Type: application/json' \
--data-binary '{"Status": "SUCCESS", "Reason": "Setup complete", "Data": "OK", "UniqueId": "00000"}' \

View File

@ -34,7 +34,7 @@ END_TLS
fi
cat >> /etc/systemd/system/swarm-manager.service << END_SERVICE_BOTTOM
etcd://$SWARM_MASTER_IP:2379/v2/keys/swarm/
etcd://$ETCD_SERVER_IP:2379/v2/keys/swarm/
ExecStop=/usr/bin/docker stop swarm-manager
ExecStartPost=/usr/bin/curl -sf -X PUT -H 'Content-Type: application/json' \\
--data-binary '{"Status": "SUCCESS", "Reason": "Setup complete", "Data": "OK", "UniqueId": "00000"}' \\

View File

@ -0,0 +1,362 @@
heat_template_version: 2013-05-23
description: >
This template will boot a Docker swarm cluster. A swarm cluster is made up
of several master node, and N agent nodes. Every node in the cluster,
including the master, is running a Docker daemon and a swarm agent
advertising it to the cluster. The master is running an addition swarm
master container listening on port 2376. By default, the cluster is made
up of one master node and one agent node.
parameters:
#
# REQUIRED PARAMETERS
#
ssh_key_name:
type: string
description: name of ssh key to be provisioned on our server
external_network:
type: string
description: uuid/name of a network to use for floating ip addresses
discovery_url:
type: string
description: url provided for node discovery
user_token:
type: string
description: token used for communicating back to Magnum for TLS certs
bay_uuid:
type: string
description: identifier for the bay this template is generating
magnum_url:
type: string
description: endpoint to retrieve TLS certs from
#
# OPTIONAL PARAMETERS
#
server_image:
type: string
default: fedora-atomic
description: glance image used to boot the server
server_flavor:
type: string
default: m1.small
description: flavor to use when booting the server
dns_nameserver:
type: string
description: address of a dns nameserver reachable in your environment
default: 8.8.8.8
http_proxy:
type: string
description: http proxy address for docker
default: ""
https_proxy:
type: string
description: https proxy address for docker
default: ""
no_proxy:
type: string
description: no proxies for docker
default: ""
number_of_masters:
type: string
description: how many swarm masters to spawn
default: 1
number_of_nodes:
type: string
description: how many swarm nodes to spawn
default: 1
fixed_network_cidr:
type: string
description: network range for fixed ip network
default: "10.0.0.0/24"
tls_disabled:
type: boolean
description: whether or not to enable TLS
default: False
network_driver:
type: string
description: network driver to use for instantiating container networks
default: None
flannel_network_cidr:
type: string
description: network range for flannel overlay network
default: 10.100.0.0/16
flannel_network_subnetlen:
type: string
description: size of subnet assigned to each master
default: 24
flannel_use_vxlan:
type: string
description: >
if true use the vxlan backend, otherwise use the default
udp backend
default: "false"
constraints:
- allowed_values: ["true", "false"]
docker_volume_size:
type: number
description: >
size of a cinder volume to allocate to docker for container/image
storage
default: 25
loadbalancing_protocol:
type: string
description: >
The protocol which is used for load balancing. If you want to change
tls_disabled option to 'True', please change this to "HTTP".
default: TCP
constraints:
- allowed_values: ["TCP", "HTTP"]
swarm_port:
type: number
description: >
The port which are used by swarm manager to provide swarm service.
default: 2376
swarm_version:
type: string
description: version of swarm used for swarm cluster
default: 1.0.0
resources:
######################################################################
#
# network resources. allocate a network and router for our server.
# it would also be possible to take advantage of existing network
# resources (and have the deployer provide network and subnet ids,
# etc, as parameters), but I wanted to minmize the amount of
# configuration necessary to make this go.
fixed_network:
type: "OS::Neutron::Net"
# This is the subnet on which we will deploy our server.
fixed_subnet:
type: "OS::Neutron::Subnet"
properties:
cidr: {get_param: fixed_network_cidr}
network_id:
get_resource: fixed_network
dns_nameservers:
- get_param: dns_nameserver
# create a router attached to the external network provided as a
# parameter to this stack.
extrouter:
type: "OS::Neutron::Router"
properties:
external_gateway_info:
network:
get_param: external_network
# attached fixed_subnet to our extrouter router.
extrouter_inside:
type: "OS::Neutron::RouterInterface"
properties:
router_id:
get_resource: extrouter
subnet_id:
get_resource:
fixed_subnet
######################################################################
#
# security groups. we need to permit network traffic of various
# sorts.
#
secgroup_manager:
type: "OS::Neutron::SecurityGroup"
properties:
rules:
- protocol: icmp
- protocol: tcp
- protocol: udp
######################################################################
#
# load balancers.
#
api_monitor:
type: OS::Neutron::HealthMonitor
properties:
type: TCP
delay: 5
max_retries: 5
timeout: 5
api_pool:
type: OS::Neutron::Pool
properties:
protocol: {get_param: loadbalancing_protocol}
monitors: [{get_resource: api_monitor}]
subnet: {get_resource: fixed_subnet}
lb_method: ROUND_ROBIN
vip:
protocol_port: {get_param: swarm_port}
api_pool_floating:
type: OS::Neutron::FloatingIP
depends_on:
- extrouter_inside
properties:
floating_network: {get_param: external_network}
port_id: {get_attr: [api_pool, vip, port_id]}
etcd_monitor:
type: OS::Neutron::HealthMonitor
properties:
type: TCP
delay: 5
max_retries: 5
timeout: 5
etcd_pool:
type: OS::Neutron::Pool
properties:
protocol: HTTP
monitors: [{get_resource: etcd_monitor}]
subnet: {get_resource: fixed_subnet}
lb_method: ROUND_ROBIN
vip:
protocol_port: 2379
######################################################################
#
# Swarm manager is responsible for the entire cluster and manages the
# resources of multiple Docker hosts at scale.
# It supports high availability by create a primary manager and multiple
# replica instances.
swarm_masters:
type: "OS::Heat::ResourceGroup"
depends_on:
- extrouter_inside
properties:
count: {get_param: number_of_masters}
resource_def:
type: swarmmaster.yaml
properties:
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
server_flavor: {get_param: server_flavor}
docker_volume_size: {get_param: docker_volume_size}
fixed_network_id: {get_resource: fixed_network}
fixed_subnet_id: {get_resource: fixed_subnet}
external_network: {get_param: external_network}
discovery_url: {get_param: discovery_url}
http_proxy: {get_param: http_proxy}
https_proxy: {get_param: https_proxy}
no_proxy: {get_param: no_proxy}
swarm_api_ip: {get_attr: [api_pool, vip, address]}
bay_uuid: {get_param: bay_uuid}
user_token: {get_param: user_token}
magnum_url: {get_param: magnum_url}
tls_disabled: {get_param: tls_disabled}
secgroup_swarm_master_id: {get_resource: secgroup_manager}
network_driver: {get_param: network_driver}
flannel_network_cidr: {get_param: flannel_network_cidr}
flannel_network_subnetlen: {get_param: flannel_network_subnetlen}
flannel_use_vxlan: {get_param: flannel_use_vxlan}
swarm_port: {get_param: swarm_port}
api_pool_id: {get_resource: api_pool}
etcd_pool_id: {get_resource: etcd_pool}
etcd_server_ip: {get_attr: [etcd_pool, vip, address]}
api_ip_address: {get_attr: [api_pool_floating, floating_ip_address]}
swarm_version: {get_param: swarm_version}
swarm_nodes:
type: "OS::Heat::ResourceGroup"
depends_on:
- extrouter_inside
- swarm_masters
properties:
count: {get_param: number_of_nodes}
resource_def:
type: swarmnode.yaml
properties:
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
server_flavor: {get_param: server_flavor}
docker_volume_size: {get_param: docker_volume_size}
fixed_network_id: {get_resource: fixed_network}
fixed_subnet_id: {get_resource: fixed_subnet}
external_network: {get_param: external_network}
discovery_url: {get_param: discovery_url}
http_proxy: {get_param: http_proxy}
https_proxy: {get_param: https_proxy}
no_proxy: {get_param: no_proxy}
swarm_api_ip: {get_attr: [api_pool, vip, address]}
bay_uuid: {get_param: bay_uuid}
user_token: {get_param: user_token}
magnum_url: {get_param: magnum_url}
tls_disabled: {get_param: tls_disabled}
secgroup_swarm_node_id: {get_resource: secgroup_manager}
network_driver: {get_param: network_driver}
etcd_server_ip: {get_attr: [etcd_pool, vip, address]}
api_ip_address: {get_attr: [api_pool_floating, floating_ip_address]}
swarm_version: {get_param: swarm_version}
outputs:
api_address:
value:
str_replace:
template: api_ip_address
params:
api_ip_address: {get_attr: [api_pool_floating, floating_ip_address]}
description: >
This is the API endpoint of the Swarm masters. Use this to access
the Swarm API server from outside the cluster.
swarm_masters_private:
value: {get_attr: [swarm_masters, swarm_master_ip]}
description: >
This is a list of the "private" addresses of all the Swarm masters.
swarm_masters:
value: {get_attr: [swarm_masters, swarm_master_external_ip]}
description: >
This is a list of "public" ip addresses of all Swarm masters.
Use these addresses to log into the Swarm masters via ssh.
swarm_nodes_private:
value: {get_attr: [swarm_nodes, swarm_node_ip]}
description: >
This is a list of the "private" addresses of all the Swarm nodes.
swarm_nodes:
value: {get_attr: [swarm_nodes, swarm_node_external_ip]}
description: >
This is a list of the "public" addresses of all the Swarm nodes. Use
these addresses to, e.g., log into the nodes.
discovery_url:
value: {get_param: discovery_url}
description: >
This the discovery url for Swarm cluster.

View File

@ -1,12 +1,9 @@
heat_template_version: 2013-05-23
description: >
This template will boot a Docker swarm cluster. A swarm cluster is made up
of a single master node, and N agent nodes. Every node in the cluster,
including the master, is running a Docker daemon and a swarm agent
advertising it to the cluster. The master is running an addition swarm
master container listening on port 2376. By default, the cluster is made
up of one master node and one agent node.
This template will boot a Docker swarm master node. A swarm mater node is
running a Docker daemon and a swarm master container listening on port 2376,
and a swarm agent advertising it to the cluster.
parameters:
@ -37,82 +34,69 @@ parameters:
type: string
description: endpoint to retrieve TLS certs from
#
# OPTIONAL PARAMETERS
#
fixed_network_id:
type: string
description: Network from which to allocate fixed addresses.
fixed_subnet_id:
type: string
description: Subnet from which to allocate fixed addresses.
swarm_api_ip:
type: string
description: swarm master's api server ip address
api_ip_address:
type: string
description: swarm master's api server public ip address
server_image:
type: string
default: fedora-atomic
description: glance image used to boot the server
server_flavor:
type: string
default: m1.small
description: flavor to use when booting the server
dns_nameserver:
type: string
description: address of a dns nameserver reachable in your environment
default: 8.8.8.8
http_proxy:
type: string
description: http proxy address for docker
default: ""
https_proxy:
type: string
description: https proxy address for docker
default: ""
no_proxy:
type: string
description: no proxies for docker
default: ""
number_of_nodes:
type: string
description: how many swarm nodes to spawn
default: 1
docker_volume_size:
type: number
description: >
size of a cinder volume to allocate to docker for container/image
storage
default: 25
fixed_network_cidr:
type: string
description: network range for fixed ip network
default: "10.0.0.0/24"
tls_disabled:
type: boolean
description: whether or not to enable TLS
default: False
network_driver:
type: string
description: network driver to use for instantiating container networks
default: None
flannel_network_cidr:
type: string
description: network range for flannel overlay network
default: 10.100.0.0/16
flannel_network_subnetlen:
type: string
description: size of subnet assigned to each master
default: 24
flannel_use_vxlan:
type: string
description: >
if true use the vxlan backend, otherwise use the default
udp backend
default: "false"
constraints:
- allowed_values: ["true", "false"]
@ -121,6 +105,27 @@ parameters:
description: version of swarm used for swarm cluster
default: 1.0.0
secgroup_swarm_master_id:
type: string
description: ID of the security group for swarm master.
swarm_port:
type: number
description: >
The port which are used by swarm manager to provide swarm service.
api_pool_id:
type: string
description: ID of the load balancer pool of swarm master server.
etcd_pool_id:
type: string
description: ID of the load balancer pool of etcd server.
etcd_server_ip:
type: string
description: ID of the load balancer pool of etcd server.
resources:
cloud_init_wait_handle:
@ -159,61 +164,6 @@ resources:
get_resource: agent_wait_handle
Timeout: 6000
######################################################################
#
# network resources. allocate a network and router for our server.
# it would also be possible to take advantage of existing network
# resources (and have the deployer provide network and subnet ids,
# etc, as parameters), but I wanted to minmize the amount of
# configuration necessary to make this go.
fixed_network:
type: "OS::Neutron::Net"
# This is the subnet on which we will deploy our server.
fixed_subnet:
type: "OS::Neutron::Subnet"
properties:
cidr: {get_param: fixed_network_cidr}
network_id:
get_resource: fixed_network
dns_nameservers:
- get_param: dns_nameserver
# create a router attached to the external network provided as a
# parameter to this stack.
extrouter:
type: "OS::Neutron::Router"
properties:
external_gateway_info:
network:
get_param: external_network
# attached fixed_subnet to our extrouter router.
extrouter_inside:
type: "OS::Neutron::RouterInterface"
properties:
router_id:
get_resource: extrouter
subnet_id:
get_resource:
fixed_subnet
######################################################################
#
# security groups. we need to permit network traffic of various
# sorts.
#
secgroup_manager:
type: "OS::Neutron::SecurityGroup"
properties:
rules:
- protocol: icmp
- protocol: tcp
- protocol: udp
######################################################################
#
# software configs. these are components that are combined into
@ -233,7 +183,7 @@ resources:
"$HTTP_PROXY": {get_param: http_proxy}
"$HTTPS_PROXY": {get_param: https_proxy}
"$NO_PROXY": {get_param: no_proxy}
"$SWARM_MASTER_IP": {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]}
"$SWARM_API_IP": {get_param: swarm_api_ip}
"$SWARM_NODE_IP": {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]}
"$BAY_UUID": {get_param: bay_uuid}
"$USER_TOKEN": {get_param: user_token}
@ -243,6 +193,9 @@ resources:
"$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr}
"$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen}
"$FLANNEL_USE_VXLAN": {get_param: flannel_use_vxlan}
"$ETCD_SERVER_IP": {get_param: etcd_server_ip}
"$API_IP_ADDRESS": {get_param: api_ip_address}
write_network_config:
type: "OS::Heat::SoftwareConfig"
@ -328,7 +281,7 @@ resources:
str_replace:
template: {get_file: fragments/write-swarm-agent-service.yaml}
params:
"$SWARM_MASTER_IP": {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]}
"$ETCD_SERVER_IP": {get_param: etcd_server_ip}
"$NODE_IP": {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]}
"$DISCOVERY_URL": {get_param: discovery_url}
"$WAIT_HANDLE": {get_resource: agent_wait_handle}
@ -345,7 +298,7 @@ resources:
str_replace:
template: {get_file: fragments/write-swarm-master-service.sh}
params:
"$SWARM_MASTER_IP": {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]}
"$ETCD_SERVER_IP": {get_param: etcd_server_ip}
"$DISCOVERY_URL": {get_param: discovery_url}
"$WAIT_HANDLE": {get_resource: master_wait_handle}
"$HTTP_PROXY": {get_param: http_proxy}
@ -420,8 +373,6 @@ resources:
swarm_master:
type: "OS::Nova::Server"
depends_on:
- extrouter_inside
properties:
image:
get_param: server_image
@ -439,49 +390,34 @@ resources:
type: "OS::Neutron::Port"
properties:
network_id:
get_resource: fixed_network
get_param: fixed_network_id
security_groups:
- get_resource: secgroup_manager
- {get_param: secgroup_swarm_master_id}
fixed_ips:
- subnet_id:
get_resource: fixed_subnet
get_param: fixed_subnet_id
swarm_master_floating:
type: "OS::Neutron::FloatingIP"
depends_on:
- extrouter_inside
properties:
floating_network:
get_param: external_network
port_id:
get_resource: swarm_master_eth0
swarm_nodes:
type: "OS::Heat::ResourceGroup"
depends_on:
- extrouter_inside
api_pool_member:
type: OS::Neutron::PoolMember
properties:
count: {get_param: number_of_nodes}
resource_def:
type: swarmnode.yaml
pool_id: {get_param: api_pool_id}
address: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]}
protocol_port: {get_param: swarm_port}
etcd_pool_member:
type: OS::Neutron::PoolMember
properties:
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
server_flavor: {get_param: server_flavor}
docker_volume_size: {get_param: docker_volume_size}
fixed_network_id: {get_resource: fixed_network}
fixed_subnet_id: {get_resource: fixed_subnet}
external_network: {get_param: external_network}
discovery_url: {get_param: discovery_url}
http_proxy: {get_param: http_proxy}
https_proxy: {get_param: https_proxy}
no_proxy: {get_param: no_proxy}
swarm_master_ip: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]}
bay_uuid: {get_param: bay_uuid}
user_token: {get_param: user_token}
magnum_url: {get_param: magnum_url}
tls_disabled: {get_param: tls_disabled}
network_driver: {get_param: network_driver}
pool_id: {get_param: etcd_pool_id}
address: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]}
protocol_port: 2379
######################################################################
#
@ -503,35 +439,13 @@ resources:
outputs:
api_address:
value: {get_attr: [swarm_master_floating, floating_ip_address]}
description: >
This is the API endpoint of the Swarm masters. Use this to access
the Swarm API server from outside the cluster.
swarm_master_private:
swarm_master_ip:
value: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]}
description: >
This is a list of the "private" addresses of all the Swarm masters.
This is the "private" addresses of all the Swarm master.
swarm_master:
swarm_master_external_ip:
value: {get_attr: [swarm_master_floating, floating_ip_address]}
description: >
This is a list of "public" ip addresses of all Swarm master.
Use these addresses to log into the Swarm masters via ssh.
This is the "public" ip addresses of Swarm master.
swarm_nodes_private:
value: {get_attr: [swarm_nodes, swarm_node_ip]}
description: >
This is a list of the "private" addresses of all the Swarm nodes.
swarm_nodes:
value: {get_attr: [swarm_nodes, swarm_node_external_ip]}
description: >
This is a list of the "public" addresses of all the Swarm nodes. Use
these addresses to, e.g., log into the nodes.
discovery_url:
value: {get_param: discovery_url}
description: >
This the discovery url for Swarm cluster.

View File

@ -64,9 +64,13 @@ parameters:
description: no proxies for docker
default: ""
swarm_master_ip:
swarm_api_ip:
type: string
description: swarm master's ip address
description: swarm master's api server ip address
api_ip_address:
type: string
description: swarm master's api server public ip address
user_token:
type: string
@ -89,6 +93,14 @@ parameters:
description: version of swarm used for swarm cluster
default: 1.0.0
secgroup_swarm_node_id:
type: string
description: ID of the security group for swarm node.
etcd_server_ip:
type: string
description: ID of the load balancer pool of etcd serve.
resources:
node_cloud_init_wait_handle:
@ -115,21 +127,6 @@ resources:
get_resource: node_agent_wait_handle
Timeout: 6000
######################################################################
#
# security groups. we need to permit network traffic of various
# sorts.
#
secgroup_node:
type: "OS::Neutron::SecurityGroup"
properties:
rules:
- protocol: icmp
- protocol: tcp
- protocol: udp
######################################################################
#
# software configs. these are components that are combined into
@ -147,13 +144,15 @@ resources:
"$HTTP_PROXY": {get_param: http_proxy}
"$HTTPS_PROXY": {get_param: https_proxy}
"$NO_PROXY": {get_param: no_proxy}
"$SWARM_MASTER_IP": {get_param: swarm_master_ip}
"$SWARM_API_IP": {get_param: swarm_api_ip}
"$SWARM_NODE_IP": {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]}
"$BAY_UUID": {get_param: bay_uuid}
"$USER_TOKEN": {get_param: user_token}
"$MAGNUM_URL": {get_param: magnum_url}
"$TLS_DISABLED": {get_param: tls_disabled}
"$NETWORK_DRIVER": {get_param: network_driver}
"$ETCD_SERVER_IP": {get_param: etcd_server_ip}
"$API_IP_ADDRESS": {get_param: api_ip_address}
configure_swarm:
type: "OS::Heat::SoftwareConfig"
@ -217,13 +216,13 @@ resources:
template: {get_file: fragments/write-swarm-agent-service.yaml}
params:
"$NODE_IP": {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]}
"$SWARM_MASTER_IP": {get_param: swarm_master_ip}
"$DISCOVERY_URL": {get_param: discovery_url}
"$WAIT_HANDLE": {get_resource: node_agent_wait_handle}
"$HTTP_PROXY": {get_param: http_proxy}
"$HTTPS_PROXY": {get_param: https_proxy}
"$NO_PROXY": {get_param: no_proxy}
"$SWARM_VERSION": {get_param: swarm_version}
"$ETCD_SERVER_IP": {get_param: etcd_server_ip}
enable_services:
type: "OS::Heat::SoftwareConfig"
@ -293,7 +292,7 @@ resources:
network_id:
get_param: fixed_network_id
security_groups:
- get_resource: secgroup_node
- {get_param: secgroup_swarm_node_id}
fixed_ips:
- subnet_id:
get_param: fixed_subnet_id

View File

@ -53,6 +53,7 @@ class TestBayConductorWithSwarm(base.TestCase):
'stack_id': 'xx-xx-xx-xx',
'api_address': '172.17.2.3',
'node_addresses': ['172.17.2.4'],
'master_count': 1,
'node_count': 1,
'discovery_url': 'https://discovery.test.io/123456789',
}
@ -83,6 +84,7 @@ class TestBayConductorWithSwarm(base.TestCase):
'dns_nameserver': 'dns_nameserver',
'server_image': 'image_id',
'server_flavor': 'flavor_id',
'number_of_masters': '1',
'number_of_nodes': '1',
'docker_volume_size': 20,
'fixed_network_cidr': '10.2.0.0/22',
@ -124,6 +126,7 @@ class TestBayConductorWithSwarm(base.TestCase):
expected = {
'ssh_key_name': 'keypair_id',
'external_network': 'external_network_id',
'number_of_masters': '1',
'number_of_nodes': '1',
'discovery_url': 'https://discovery.etcd.io/test',
'user_token': 'fake_token',