Add swarm-mode driver
* add swarm-mode config * remove etcd, flannel, discovery_url, swift_registry * add swarm-mode COE obj * add functional test, create cluster, create/remove service and delete cluster. Co-Authored-By: ArchiFleKs <kevin.lefevre@osones.io> Implements: blueprint swarm-mode-support Change-Id: Iba177be167cb3a3866441d5f42670171f26c5a86
This commit is contained in:
parent
6cd8d62d6a
commit
b4386f83ad
@ -206,7 +206,7 @@ class Validator(object):
|
||||
def get_coe_validator(cls, coe):
|
||||
if coe == 'kubernetes':
|
||||
return K8sValidator()
|
||||
elif coe == 'swarm':
|
||||
elif coe == 'swarm' or coe == 'swarm-mode':
|
||||
return SwarmValidator()
|
||||
elif coe == 'mesos':
|
||||
return MesosValidator()
|
||||
|
130
magnum/drivers/heat/swarm_mode_template_def.py
Normal file
130
magnum/drivers/heat/swarm_mode_template_def.py
Normal file
@ -0,0 +1,130 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from magnum.drivers.heat import template_def
|
||||
from oslo_config import cfg
|
||||
|
||||
CONF = cfg.CONF
|
||||
DOCKER_PORT = '2375'
|
||||
|
||||
|
||||
class SwarmModeApiAddressOutputMapping(template_def.OutputMapping):
|
||||
|
||||
def set_output(self, stack, cluster_template, cluster):
|
||||
if self.cluster_attr is None:
|
||||
return
|
||||
|
||||
output_value = self.get_output_value(stack)
|
||||
if output_value is not None:
|
||||
# Note(rocha): protocol should always be tcp as the docker
|
||||
# command client does not handle https (see bug #1604812).
|
||||
params = {
|
||||
'protocol': 'tcp',
|
||||
'address': output_value,
|
||||
'port': DOCKER_PORT,
|
||||
}
|
||||
value = "%(protocol)s://%(address)s:%(port)s" % params
|
||||
setattr(cluster, self.cluster_attr, value)
|
||||
|
||||
|
||||
class SwarmModeMasterAddressesOutputMapping(template_def.OutputMapping):
|
||||
|
||||
def set_output(self, stack, cluster_template, cluster):
|
||||
if self.cluster_attr is None:
|
||||
return
|
||||
|
||||
_master_addresses = []
|
||||
for output in stack.to_dict().get('outputs', []):
|
||||
if output['output_key'] == 'swarm_primary_master':
|
||||
_master_addresses.append(output['output_value'][0])
|
||||
elif output['output_key'] == 'swarm_secondary_masters':
|
||||
_master_addresses += output['output_value']
|
||||
setattr(cluster, self.cluster_attr, _master_addresses)
|
||||
|
||||
|
||||
class SwarmModeTemplateDefinition(template_def.BaseTemplateDefinition):
|
||||
"""Docker swarm mode template."""
|
||||
|
||||
def __init__(self):
|
||||
super(SwarmModeTemplateDefinition, self).__init__()
|
||||
self.add_parameter('cluster_uuid',
|
||||
cluster_attr='uuid',
|
||||
param_type=str)
|
||||
self.add_parameter('number_of_nodes',
|
||||
cluster_attr='node_count')
|
||||
self.add_parameter('master_flavor',
|
||||
cluster_template_attr='master_flavor_id')
|
||||
self.add_parameter('node_flavor',
|
||||
cluster_template_attr='flavor_id')
|
||||
self.add_parameter('docker_volume_size',
|
||||
cluster_attr='docker_volume_size')
|
||||
self.add_parameter('volume_driver',
|
||||
cluster_template_attr='volume_driver')
|
||||
self.add_parameter('external_network',
|
||||
cluster_template_attr='external_network_id',
|
||||
required=True)
|
||||
self.add_parameter('fixed_network',
|
||||
cluster_template_attr='fixed_network')
|
||||
self.add_parameter('fixed_subnet',
|
||||
cluster_template_attr='fixed_subnet')
|
||||
self.add_parameter('tls_disabled',
|
||||
cluster_template_attr='tls_disabled',
|
||||
required=True)
|
||||
self.add_parameter('docker_storage_driver',
|
||||
cluster_template_attr='docker_storage_driver')
|
||||
|
||||
self.add_output('api_address',
|
||||
cluster_attr='api_address',
|
||||
mapping_type=SwarmModeApiAddressOutputMapping)
|
||||
self.add_output('swarm_primary_master_private',
|
||||
cluster_attr=None)
|
||||
self.add_output('swarm_primary_master',
|
||||
cluster_attr='master_addresses',
|
||||
mapping_type=SwarmModeMasterAddressesOutputMapping)
|
||||
self.add_output('swarm_nodes_private',
|
||||
cluster_attr=None)
|
||||
self.add_output('swarm_nodes',
|
||||
cluster_attr='node_addresses')
|
||||
|
||||
def get_params(self, context, cluster_template, cluster, **kwargs):
|
||||
extra_params = kwargs.pop('extra_params', {})
|
||||
# HACK(apmelton) - This uses the user's bearer token, ideally
|
||||
# it should be replaced with an actual trust token with only
|
||||
# access to do what the template needs it to do.
|
||||
osc = self.get_osc(context)
|
||||
extra_params['magnum_url'] = osc.magnum_url()
|
||||
|
||||
label_list = ['rexray_preempt']
|
||||
|
||||
extra_params['auth_url'] = context.auth_url
|
||||
|
||||
for label in label_list:
|
||||
extra_params[label] = cluster_template.labels.get(label)
|
||||
|
||||
# set docker_volume_type
|
||||
# use the configuration default if None provided
|
||||
docker_volume_type = cluster_template.labels.get(
|
||||
'docker_volume_type', CONF.cinder.default_docker_volume_type)
|
||||
extra_params['docker_volume_type'] = docker_volume_type
|
||||
|
||||
return super(SwarmModeTemplateDefinition,
|
||||
self).get_params(context, cluster_template, cluster,
|
||||
extra_params=extra_params,
|
||||
**kwargs)
|
||||
|
||||
def get_env_files(self, cluster_template, cluster):
|
||||
env_files = []
|
||||
|
||||
template_def.add_priv_net_env_file(env_files, cluster_template)
|
||||
template_def.add_volume_env_file(env_files, cluster)
|
||||
template_def.add_lb_env_file(env_files, cluster_template)
|
||||
|
||||
return env_files
|
0
magnum/drivers/swarm_fedora_atomic_v2/__init__.py
Normal file
0
magnum/drivers/swarm_fedora_atomic_v2/__init__.py
Normal file
34
magnum/drivers/swarm_fedora_atomic_v2/driver.py
Normal file
34
magnum/drivers/swarm_fedora_atomic_v2/driver.py
Normal file
@ -0,0 +1,34 @@
|
||||
# Copyright 2016 Rackspace Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from magnum.drivers.heat import driver
|
||||
from magnum.drivers.swarm_fedora_atomic_v2 import monitor
|
||||
from magnum.drivers.swarm_fedora_atomic_v2 import template_def
|
||||
|
||||
|
||||
class Driver(driver.HeatDriver):
|
||||
|
||||
@property
|
||||
def provides(self):
|
||||
return [
|
||||
{'server_type': 'vm',
|
||||
'os': 'fedora-atomic',
|
||||
'coe': 'swarm-mode'},
|
||||
]
|
||||
|
||||
def get_template_definition(self):
|
||||
return template_def.AtomicSwarmTemplateDefinition()
|
||||
|
||||
def get_monitor(self, context, cluster):
|
||||
return monitor.SwarmMonitor(context, cluster)
|
107
magnum/drivers/swarm_fedora_atomic_v2/monitor.py
Normal file
107
magnum/drivers/swarm_fedora_atomic_v2/monitor.py
Normal file
@ -0,0 +1,107 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from magnum.common import docker_utils
|
||||
from magnum.conductor import monitors
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class SwarmMonitor(monitors.MonitorBase):
|
||||
|
||||
def __init__(self, context, cluster):
|
||||
super(SwarmMonitor, self).__init__(context, cluster)
|
||||
self.data = {}
|
||||
self.data['nodes'] = []
|
||||
self.data['containers'] = []
|
||||
|
||||
@property
|
||||
def metrics_spec(self):
|
||||
return {
|
||||
'memory_util': {
|
||||
'unit': '%',
|
||||
'func': 'compute_memory_util',
|
||||
},
|
||||
}
|
||||
|
||||
def pull_data(self):
|
||||
with docker_utils.docker_for_cluster(self.context,
|
||||
self.cluster) as docker:
|
||||
system_info = docker.info()
|
||||
self.data['nodes'] = self._parse_node_info(system_info)
|
||||
|
||||
# pull data from each container
|
||||
containers = []
|
||||
for container in docker.containers(all=True):
|
||||
try:
|
||||
container = docker.inspect_container(container['Id'])
|
||||
except Exception as e:
|
||||
LOG.warning("Ignore error [%(e)s] when inspecting "
|
||||
"container %(container_id)s.",
|
||||
{'e': e, 'container_id': container['Id']},
|
||||
exc_info=True)
|
||||
containers.append(container)
|
||||
self.data['containers'] = containers
|
||||
|
||||
def compute_memory_util(self):
|
||||
mem_total = 0
|
||||
for node in self.data['nodes']:
|
||||
mem_total += node['MemTotal']
|
||||
mem_reserved = 0
|
||||
for container in self.data['containers']:
|
||||
mem_reserved += container['HostConfig']['Memory']
|
||||
|
||||
if mem_total == 0:
|
||||
return 0
|
||||
else:
|
||||
return mem_reserved * 100 / mem_total
|
||||
|
||||
def _parse_node_info(self, system_info):
|
||||
"""Parse system_info to retrieve memory size of each node.
|
||||
|
||||
:param system_info: The output returned by docker.info(). Example:
|
||||
{
|
||||
u'Debug': False,
|
||||
u'NEventsListener': 0,
|
||||
u'DriverStatus': [
|
||||
[u'\x08Strategy', u'spread'],
|
||||
[u'\x08Filters', u'...'],
|
||||
[u'\x08Nodes', u'2'],
|
||||
[u'node1', u'10.0.0.4:2375'],
|
||||
[u' \u2514 Containers', u'1'],
|
||||
[u' \u2514 Reserved CPUs', u'0 / 1'],
|
||||
[u' \u2514 Reserved Memory', u'0 B / 2.052 GiB'],
|
||||
[u'node2', u'10.0.0.3:2375'],
|
||||
[u' \u2514 Containers', u'2'],
|
||||
[u' \u2514 Reserved CPUs', u'0 / 1'],
|
||||
[u' \u2514 Reserved Memory', u'0 B / 2.052 GiB']
|
||||
],
|
||||
u'Containers': 3
|
||||
}
|
||||
:return: Memory size of each node. Excample:
|
||||
[{'MemTotal': 2203318222.848},
|
||||
{'MemTotal': 2203318222.848}]
|
||||
"""
|
||||
nodes = []
|
||||
for info in system_info['DriverStatus']:
|
||||
key = info[0]
|
||||
value = info[1]
|
||||
if key == u' \u2514 Reserved Memory':
|
||||
memory = value # Example: '0 B / 2.052 GiB'
|
||||
memory = memory.split('/')[1].strip() # Example: '2.052 GiB'
|
||||
memory = memory.split(' ')[0] # Example: '2.052'
|
||||
memory = float(memory) * 1024 * 1024 * 1024
|
||||
nodes.append({'MemTotal': memory})
|
||||
return nodes
|
39
magnum/drivers/swarm_fedora_atomic_v2/template_def.py
Normal file
39
magnum/drivers/swarm_fedora_atomic_v2/template_def.py
Normal file
@ -0,0 +1,39 @@
|
||||
# Copyright 2016 Rackspace Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import os
|
||||
|
||||
from magnum.drivers.heat import swarm_mode_template_def as sftd
|
||||
|
||||
|
||||
class AtomicSwarmTemplateDefinition(sftd.SwarmModeTemplateDefinition):
|
||||
"""Docker swarm template for a Fedora Atomic VM."""
|
||||
|
||||
@property
|
||||
def driver_module_path(self):
|
||||
return __name__[:__name__.rindex('.')]
|
||||
|
||||
@property
|
||||
def template_path(self):
|
||||
return os.path.join(os.path.dirname(os.path.realpath(__file__)),
|
||||
'templates/swarmcluster.yaml')
|
||||
|
||||
def get_params(self, context, cluster_template, cluster, **kwargs):
|
||||
ep = kwargs.pop('extra_params', {})
|
||||
|
||||
ep['number_of_secondary_masters'] = cluster.master_count - 1
|
||||
|
||||
return super(AtomicSwarmTemplateDefinition,
|
||||
self).get_params(context, cluster_template, cluster,
|
||||
extra_params=ep,
|
||||
**kwargs)
|
@ -0,0 +1,28 @@
|
||||
#cloud-config
|
||||
merge_how: dict(recurse_array)+list(append)
|
||||
write_files:
|
||||
- path: /etc/sysconfig/heat-params
|
||||
owner: "root:root"
|
||||
permissions: "0600"
|
||||
content: |
|
||||
IS_PRIMARY_MASTER="$IS_PRIMARY_MASTER"
|
||||
WAIT_CURL="$WAIT_CURL"
|
||||
DOCKER_VOLUME="$DOCKER_VOLUME"
|
||||
DOCKER_VOLUME_SIZE="$DOCKER_VOLUME_SIZE"
|
||||
DOCKER_STORAGE_DRIVER="$DOCKER_STORAGE_DRIVER"
|
||||
HTTP_PROXY="$HTTP_PROXY"
|
||||
HTTPS_PROXY="$HTTPS_PROXY"
|
||||
NO_PROXY="$NO_PROXY"
|
||||
PRIMARY_MASTER_IP="$PRIMARY_MASTER_IP"
|
||||
SWARM_API_IP="$SWARM_API_IP"
|
||||
SWARM_NODE_IP="$SWARM_NODE_IP"
|
||||
CLUSTER_UUID="$CLUSTER_UUID"
|
||||
MAGNUM_URL="$MAGNUM_URL"
|
||||
TLS_DISABLED="$TLS_DISABLED"
|
||||
API_IP_ADDRESS="$API_IP_ADDRESS"
|
||||
TRUSTEE_USER_ID="$TRUSTEE_USER_ID"
|
||||
TRUSTEE_PASSWORD="$TRUSTEE_PASSWORD"
|
||||
TRUST_ID="$TRUST_ID"
|
||||
AUTH_URL="$AUTH_URL"
|
||||
VOLUME_DRIVER="$VOLUME_DRIVER"
|
||||
REXRAY_PREEMPT="$REXRAY_PREEMPT"
|
@ -0,0 +1,78 @@
|
||||
#!/bin/bash
|
||||
|
||||
. /etc/sysconfig/heat-params
|
||||
|
||||
set -x
|
||||
|
||||
if [ "${IS_PRIMARY_MASTER}" = "True" ]; then
|
||||
cat > /usr/local/bin/magnum-start-swarm-manager << START_SWARM_BIN
|
||||
#!/bin/bash -xe
|
||||
|
||||
docker swarm init --advertise-addr "${SWARM_NODE_IP}"
|
||||
if [[ \$? -eq 0 ]]; then
|
||||
status="SUCCESS"
|
||||
msg="Swarm init was successful."
|
||||
else
|
||||
status="FAILURE"
|
||||
msg="Failed to init swarm."
|
||||
fi
|
||||
sh -c "${WAIT_CURL} --data-binary '{\"status\": \"\$status\", \"reason\": \"\$msg\"}'"
|
||||
START_SWARM_BIN
|
||||
else
|
||||
if [ "${TLS_DISABLED}" = 'False' ]; then
|
||||
tls="--tlsverify"
|
||||
tls=$tls" --tlscacert=/etc/docker/ca.crt"
|
||||
tls=$tls" --tlskey=/etc/docker/server.key"
|
||||
tls=$tls" --tlscert=/etc/docker/server.crt"
|
||||
fi
|
||||
|
||||
cat > /usr/local/bin/magnum-start-swarm-manager << START_SWARM_BIN
|
||||
#!/bin/bash -xe
|
||||
i=0
|
||||
until token=\$(docker $tls -H $PRIMARY_MASTER_IP swarm join-token --quiet manager)
|
||||
do
|
||||
((i++))
|
||||
[ \$i -lt 5 ] || break;
|
||||
sleep 5
|
||||
done
|
||||
|
||||
if [[ -z \$token ]] ; then
|
||||
sh -c "${WAIT_CURL} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"Failed to retrieve swarm join token.\"}'"
|
||||
fi
|
||||
|
||||
i=0
|
||||
until docker swarm join --token \$token $PRIMARY_MASTER_IP:2377
|
||||
do
|
||||
((i++))
|
||||
[ \$i -lt 5 ] || break;
|
||||
sleep 5
|
||||
done
|
||||
if [[ \$i -ge 5 ]] ; then
|
||||
sh -c "${WAIT_CURL} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"Manager failed to join swarm.\"}'"
|
||||
else
|
||||
sh -c "${WAIT_CURL} --data-binary '{\"status\": \"SUCCESS\", \"reason\": \"Manager joined swarm.\"}'"
|
||||
fi
|
||||
START_SWARM_BIN
|
||||
fi
|
||||
chmod +x /usr/local/bin/magnum-start-swarm-manager
|
||||
|
||||
cat > /etc/systemd/system/swarm-manager.service << END_SERVICE
|
||||
[Unit]
|
||||
Description=Swarm Manager
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/local/bin/magnum-start-swarm-manager
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
END_SERVICE
|
||||
|
||||
chown root:root /etc/systemd/system/swarm-manager.service
|
||||
chmod 644 /etc/systemd/system/swarm-manager.service
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl start --no-block swarm-manager
|
||||
|
@ -0,0 +1,62 @@
|
||||
#!/bin/bash
|
||||
|
||||
. /etc/sysconfig/heat-params
|
||||
|
||||
set -x
|
||||
|
||||
if [ "${TLS_DISABLED}" = 'False' ]; then
|
||||
tls="--tlsverify"
|
||||
tls=$tls" --tlscacert=/etc/docker/ca.crt"
|
||||
tls=$tls" --tlskey=/etc/docker/server.key"
|
||||
tls=$tls" --tlscert=/etc/docker/server.crt"
|
||||
fi
|
||||
cat > /usr/local/bin/magnum-start-swarm-worker << START_SWARM_BIN
|
||||
#!/bin/bash -ex
|
||||
|
||||
i=0
|
||||
until token=\$(/usr/bin/docker $tls -H $SWARM_API_IP swarm join-token --quiet worker)
|
||||
do
|
||||
((i++))
|
||||
[ \$i -lt 5 ] || break;
|
||||
sleep 5
|
||||
done
|
||||
|
||||
if [[ -z \$token ]] ; then
|
||||
sh -c "${WAIT_CURL} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"Failed to retrieve swarm join token.\"}'"
|
||||
fi
|
||||
|
||||
i=0
|
||||
until docker swarm join --token \$token $SWARM_API_IP:2377
|
||||
do
|
||||
((i++))
|
||||
[ \$i -lt 5 ] || break;
|
||||
sleep 5
|
||||
done
|
||||
if [[ \$i -ge 5 ]] ; then
|
||||
sh -c "${WAIT_CURL} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"Node failed to join swarm.\"}'"
|
||||
else
|
||||
sh -c "${WAIT_CURL} --data-binary '{\"status\": \"SUCCESS\", \"reason\": \"Node joined swarm.\"}'"
|
||||
fi
|
||||
START_SWARM_BIN
|
||||
|
||||
chmod +x /usr/local/bin/magnum-start-swarm-worker
|
||||
|
||||
cat > /etc/systemd/system/swarm-worker.service << END_SERVICE
|
||||
[Unit]
|
||||
Description=Swarm Worker
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/local/bin/magnum-start-swarm-worker
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
END_SERVICE
|
||||
|
||||
chown root:root /etc/systemd/system/swarm-worker.service
|
||||
chmod 644 /etc/systemd/system/swarm-worker.service
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl start --no-block swarm-worker
|
@ -0,0 +1,413 @@
|
||||
heat_template_version: 2014-10-16
|
||||
|
||||
description: >
|
||||
This template will boot a Docker Swarm-Mode cluster. A swarm cluster
|
||||
is made up of several master nodes, and N worker nodes. Every node in
|
||||
the cluster, including the master, is running a Docker daemon and
|
||||
joins the swarm as a manager or as a worker. The managers are
|
||||
listening on port 2375. By default, the cluster is made up of one
|
||||
master node and one worker node.
|
||||
|
||||
parameters:
|
||||
|
||||
#
|
||||
# REQUIRED PARAMETERS
|
||||
#
|
||||
ssh_key_name:
|
||||
type: string
|
||||
description: name of ssh key to be provisioned on our server
|
||||
|
||||
external_network:
|
||||
type: string
|
||||
description: uuid/name of a network to use for floating ip addresses
|
||||
|
||||
fixed_network:
|
||||
type: string
|
||||
description: uuid/name of an existing network to use to provision machines
|
||||
default: ""
|
||||
|
||||
fixed_subnet:
|
||||
type: string
|
||||
description: uuid/name of an existing subnet to use to provision machines
|
||||
default: ""
|
||||
|
||||
cluster_uuid:
|
||||
type: string
|
||||
description: identifier for the cluster this template is generating
|
||||
|
||||
magnum_url:
|
||||
type: string
|
||||
description: endpoint to retrieve TLS certs from
|
||||
|
||||
server_image:
|
||||
type: string
|
||||
description: glance image used to boot the server
|
||||
|
||||
#
|
||||
# OPTIONAL PARAMETERS
|
||||
#
|
||||
master_flavor:
|
||||
type: string
|
||||
description: flavor to use when booting the swarm master
|
||||
default: m1.small
|
||||
|
||||
node_flavor:
|
||||
type: string
|
||||
description: flavor to use when booting the swarm node
|
||||
|
||||
dns_nameserver:
|
||||
type: string
|
||||
description: address of a dns nameserver reachable in your environment
|
||||
default: 8.8.8.8
|
||||
|
||||
http_proxy:
|
||||
type: string
|
||||
description: http proxy address for docker
|
||||
default: ""
|
||||
|
||||
https_proxy:
|
||||
type: string
|
||||
description: https proxy address for docker
|
||||
default: ""
|
||||
|
||||
no_proxy:
|
||||
type: string
|
||||
description: no proxies for docker
|
||||
default: ""
|
||||
|
||||
number_of_masters:
|
||||
type: number
|
||||
description: how many swarm masters to spawn
|
||||
default: 1
|
||||
|
||||
number_of_nodes:
|
||||
type: number
|
||||
description: how many swarm nodes to spawn
|
||||
default: 1
|
||||
|
||||
number_of_secondary_masters:
|
||||
type: number
|
||||
description: how many secondary masters to spawn
|
||||
|
||||
fixed_network_cidr:
|
||||
type: string
|
||||
description: network range for fixed ip network
|
||||
default: "10.0.0.0/24"
|
||||
|
||||
tls_disabled:
|
||||
type: boolean
|
||||
description: whether or not to enable TLS
|
||||
default: False
|
||||
|
||||
docker_volume_size:
|
||||
type: number
|
||||
description: >
|
||||
size of a cinder volume to allocate to docker for container/image
|
||||
storage
|
||||
default: 0
|
||||
|
||||
docker_volume_type:
|
||||
type: string
|
||||
description: >
|
||||
type of a cinder volume to allocate to docker for container/image
|
||||
storage
|
||||
|
||||
docker_storage_driver:
|
||||
type: string
|
||||
description: docker storage driver name
|
||||
default: "devicemapper"
|
||||
constraints:
|
||||
- allowed_values: ["devicemapper", "overlay"]
|
||||
|
||||
loadbalancing_protocol:
|
||||
type: string
|
||||
description: >
|
||||
The protocol which is used for load balancing. If you want to change
|
||||
tls_disabled option to 'True', please change this to "HTTP".
|
||||
default: TCP
|
||||
constraints:
|
||||
- allowed_values: ["TCP", "HTTP"]
|
||||
|
||||
swarm_port:
|
||||
type: number
|
||||
description: >
|
||||
The port which are used by swarm manager to provide swarm service.
|
||||
default: 2375
|
||||
|
||||
trustee_domain_id:
|
||||
type: string
|
||||
description: domain id of the trustee
|
||||
default: ""
|
||||
|
||||
trustee_user_id:
|
||||
type: string
|
||||
description: user id of the trustee
|
||||
default: ""
|
||||
|
||||
trustee_username:
|
||||
type: string
|
||||
description: username of the trustee
|
||||
default: ""
|
||||
|
||||
trustee_password:
|
||||
type: string
|
||||
description: password of the trustee
|
||||
default: ""
|
||||
hidden: true
|
||||
|
||||
trust_id:
|
||||
type: string
|
||||
description: id of the trust which is used by the trustee
|
||||
default: ""
|
||||
hidden: true
|
||||
|
||||
auth_url:
|
||||
type: string
|
||||
description: url for keystone
|
||||
|
||||
volume_driver:
|
||||
type: string
|
||||
description: volume driver to use for container storage
|
||||
default: ""
|
||||
constraints:
|
||||
- allowed_values: ["","rexray"]
|
||||
|
||||
rexray_preempt:
|
||||
type: string
|
||||
description: >
|
||||
enables any host to take control of a volume irrespective of whether
|
||||
other hosts are using the volume
|
||||
default: "false"
|
||||
|
||||
|
||||
resources:
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# network resources. allocate a network and router for our server.
|
||||
# it would also be possible to take advantage of existing network
|
||||
# resources (and have the deployer provide network and subnet ids,
|
||||
# etc, as parameters), but I wanted to minmize the amount of
|
||||
# configuration necessary to make this go.
|
||||
|
||||
network:
|
||||
type: ../../common/templates/network.yaml
|
||||
properties:
|
||||
existing_network: {get_param: fixed_network}
|
||||
existing_subnet: {get_param: fixed_subnet}
|
||||
private_network_cidr: {get_param: fixed_network_cidr}
|
||||
dns_nameserver: {get_param: dns_nameserver}
|
||||
external_network: {get_param: external_network}
|
||||
|
||||
api_lb:
|
||||
type: ../../common/templates/lb.yaml
|
||||
properties:
|
||||
fixed_subnet: {get_attr: [network, fixed_subnet]}
|
||||
external_network: {get_param: external_network}
|
||||
protocol: {get_param: loadbalancing_protocol}
|
||||
port: {get_param: swarm_port}
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# security groups. we need to permit network traffic of various
|
||||
# sorts.
|
||||
#
|
||||
|
||||
secgroup_swarm_manager:
|
||||
type: "OS::Neutron::SecurityGroup"
|
||||
properties:
|
||||
rules:
|
||||
- protocol: icmp
|
||||
- protocol: tcp
|
||||
port_range_min: 22
|
||||
port_range_max: 22
|
||||
- protocol: tcp
|
||||
port_range_min: 2375
|
||||
port_range_max: 2375
|
||||
- protocol: tcp
|
||||
remote_ip_prefix: {get_param: fixed_network_cidr}
|
||||
port_range_min: 1
|
||||
port_range_max: 65535
|
||||
- protocol: udp
|
||||
port_range_min: 53
|
||||
port_range_max: 53
|
||||
|
||||
secgroup_swarm_node:
|
||||
type: "OS::Neutron::SecurityGroup"
|
||||
properties:
|
||||
rules:
|
||||
- protocol: icmp
|
||||
- protocol: tcp
|
||||
- protocol: udp
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# resources that expose the IPs of either the swarm master or a given
|
||||
# LBaaS pool depending on whether LBaaS is enabled for the cluster.
|
||||
#
|
||||
|
||||
api_address_lb_switch:
|
||||
type: Magnum::ApiGatewaySwitcher
|
||||
properties:
|
||||
pool_public_ip: {get_attr: [api_lb, floating_address]}
|
||||
pool_private_ip: {get_attr: [api_lb, address]}
|
||||
master_public_ip: {get_attr: [swarm_primary_master, resource.0.swarm_master_external_ip]}
|
||||
master_private_ip: {get_attr: [swarm_primary_master, resource.0.swarm_master_ip]}
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# Swarm manager is responsible for the entire cluster and manages the
|
||||
# resources of multiple Docker hosts at scale.
|
||||
# It supports high availability by create a primary manager and multiple
|
||||
# replica instances.
|
||||
|
||||
swarm_primary_master:
|
||||
type: "OS::Heat::ResourceGroup"
|
||||
depends_on:
|
||||
- network
|
||||
properties:
|
||||
count: 1
|
||||
resource_def:
|
||||
type: swarmmaster.yaml
|
||||
properties:
|
||||
is_primary_master: True
|
||||
ssh_key_name: {get_param: ssh_key_name}
|
||||
server_image: {get_param: server_image}
|
||||
server_flavor: {get_param: master_flavor}
|
||||
docker_volume_size: {get_param: docker_volume_size}
|
||||
docker_volume_type: {get_param: docker_volume_type}
|
||||
docker_storage_driver: {get_param: docker_storage_driver}
|
||||
fixed_network_id: {get_attr: [network, fixed_network]}
|
||||
fixed_subnet_id: {get_attr: [network, fixed_subnet]}
|
||||
external_network: {get_param: external_network}
|
||||
http_proxy: {get_param: http_proxy}
|
||||
https_proxy: {get_param: https_proxy}
|
||||
no_proxy: {get_param: no_proxy}
|
||||
swarm_api_ip: {get_attr: [api_lb, address]}
|
||||
cluster_uuid: {get_param: cluster_uuid}
|
||||
magnum_url: {get_param: magnum_url}
|
||||
tls_disabled: {get_param: tls_disabled}
|
||||
secgroup_swarm_master_id: {get_resource: secgroup_swarm_manager}
|
||||
swarm_port: {get_param: swarm_port}
|
||||
api_pool_id: {get_attr: [api_lb, pool_id]}
|
||||
api_ip_address: {get_attr: [api_lb, floating_address]}
|
||||
trustee_user_id: {get_param: trustee_user_id}
|
||||
trustee_password: {get_param: trustee_password}
|
||||
trust_id: {get_param: trust_id}
|
||||
auth_url: {get_param: auth_url}
|
||||
volume_driver: {get_param: volume_driver}
|
||||
rexray_preempt: {get_param: rexray_preempt}
|
||||
|
||||
swarm_secondary_masters:
|
||||
type: "OS::Heat::ResourceGroup"
|
||||
depends_on:
|
||||
- network
|
||||
- swarm_primary_master
|
||||
properties:
|
||||
count: {get_param: number_of_secondary_masters}
|
||||
resource_def:
|
||||
type: swarmmaster.yaml
|
||||
properties:
|
||||
ssh_key_name: {get_param: ssh_key_name}
|
||||
server_image: {get_param: server_image}
|
||||
server_flavor: {get_param: master_flavor}
|
||||
docker_volume_size: {get_param: docker_volume_size}
|
||||
docker_volume_type: {get_param: docker_volume_type}
|
||||
docker_storage_driver: {get_param: docker_storage_driver}
|
||||
fixed_network_id: {get_attr: [network, fixed_network]}
|
||||
fixed_subnet_id: {get_attr: [network, fixed_subnet]}
|
||||
external_network: {get_param: external_network}
|
||||
http_proxy: {get_param: http_proxy}
|
||||
https_proxy: {get_param: https_proxy}
|
||||
no_proxy: {get_param: no_proxy}
|
||||
swarm_api_ip: {get_attr: [api_address_lb_switch, private_ip]}
|
||||
cluster_uuid: {get_param: cluster_uuid}
|
||||
magnum_url: {get_param: magnum_url}
|
||||
tls_disabled: {get_param: tls_disabled}
|
||||
secgroup_swarm_master_id: {get_resource: secgroup_swarm_manager}
|
||||
swarm_port: {get_param: swarm_port}
|
||||
api_pool_id: {get_attr: [api_lb, pool_id]}
|
||||
api_ip_address: {get_attr: [api_lb, floating_address]}
|
||||
trustee_user_id: {get_param: trustee_user_id}
|
||||
trustee_password: {get_param: trustee_password}
|
||||
trust_id: {get_param: trust_id}
|
||||
auth_url: {get_param: auth_url}
|
||||
volume_driver: {get_param: volume_driver}
|
||||
rexray_preempt: {get_param: rexray_preempt}
|
||||
|
||||
swarm_nodes:
|
||||
type: "OS::Heat::ResourceGroup"
|
||||
depends_on:
|
||||
- network
|
||||
- swarm_primary_master
|
||||
properties:
|
||||
count: {get_param: number_of_nodes}
|
||||
resource_def:
|
||||
type: swarmnode.yaml
|
||||
properties:
|
||||
ssh_key_name: {get_param: ssh_key_name}
|
||||
server_image: {get_param: server_image}
|
||||
server_flavor: {get_param: node_flavor}
|
||||
docker_volume_size: {get_param: docker_volume_size}
|
||||
docker_volume_type: {get_param: docker_volume_type}
|
||||
docker_storage_driver: {get_param: docker_storage_driver}
|
||||
fixed_network_id: {get_attr: [network, fixed_network]}
|
||||
fixed_subnet_id: {get_attr: [network, fixed_subnet]}
|
||||
external_network: {get_param: external_network}
|
||||
http_proxy: {get_param: http_proxy}
|
||||
https_proxy: {get_param: https_proxy}
|
||||
no_proxy: {get_param: no_proxy}
|
||||
swarm_api_ip: {get_attr: [api_address_lb_switch, private_ip]}
|
||||
cluster_uuid: {get_param: cluster_uuid}
|
||||
magnum_url: {get_param: magnum_url}
|
||||
tls_disabled: {get_param: tls_disabled}
|
||||
secgroup_swarm_node_id: {get_resource: secgroup_swarm_node}
|
||||
api_ip_address: {get_attr: [api_address_lb_switch, public_ip]}
|
||||
trustee_domain_id: {get_param: trustee_domain_id}
|
||||
trustee_user_id: {get_param: trustee_user_id}
|
||||
trustee_username: {get_param: trustee_username}
|
||||
trustee_password: {get_param: trustee_password}
|
||||
trust_id: {get_param: trust_id}
|
||||
auth_url: {get_param: auth_url}
|
||||
volume_driver: {get_param: volume_driver}
|
||||
rexray_preempt: {get_param: rexray_preempt}
|
||||
|
||||
outputs:
|
||||
|
||||
api_address:
|
||||
value:
|
||||
str_replace:
|
||||
template: api_ip_address
|
||||
params:
|
||||
api_ip_address: {get_attr: [api_address_lb_switch, public_ip]}
|
||||
description: >
|
||||
This is the API endpoint of the Swarm masters. Use this to access
|
||||
the Swarm API server from outside the cluster.
|
||||
|
||||
swarm_primary_master_private:
|
||||
value: {get_attr: [swarm_primary_master, swarm_master_ip]}
|
||||
description: >
|
||||
This is a list of the "private" addresses of all the Swarm masters.
|
||||
|
||||
swarm_primary_master:
|
||||
value: {get_attr: [swarm_primary_master, swarm_master_external_ip]}
|
||||
description: >
|
||||
This is a list of "public" ip addresses of all Swarm masters.
|
||||
Use these addresses to log into the Swarm masters via ssh.
|
||||
|
||||
swarm_secondary_masters:
|
||||
value: {get_attr: [swarm_secondary_masters, swarm_master_external_ip]}
|
||||
description: >
|
||||
This is a list of "public" ip addresses of all Swarm masters.
|
||||
Use these addresses to log into the Swarm masters via ssh.
|
||||
|
||||
swarm_nodes_private:
|
||||
value: {get_attr: [swarm_nodes, swarm_node_ip]}
|
||||
description: >
|
||||
This is a list of the "private" addresses of all the Swarm nodes.
|
||||
|
||||
swarm_nodes:
|
||||
value: {get_attr: [swarm_nodes, swarm_node_external_ip]}
|
||||
description: >
|
||||
This is a list of the "public" addresses of all the Swarm nodes. Use
|
||||
these addresses to, e.g., log into the nodes.
|
359
magnum/drivers/swarm_fedora_atomic_v2/templates/swarmmaster.yaml
Normal file
359
magnum/drivers/swarm_fedora_atomic_v2/templates/swarmmaster.yaml
Normal file
@ -0,0 +1,359 @@
|
||||
heat_template_version: 2014-10-16
|
||||
|
||||
description: >
|
||||
This is a nested stack that defines swarm master node. A swarm mater node is
|
||||
running a Docker daemon and joins swarm as a manager. The Docker daemon
|
||||
listens on port 2375.
|
||||
|
||||
parameters:
|
||||
|
||||
ssh_key_name:
|
||||
type: string
|
||||
description: name of ssh key to be provisioned on our server
|
||||
|
||||
docker_volume_size:
|
||||
type: number
|
||||
description: >
|
||||
size of a cinder volume to allocate to docker for container/image
|
||||
storage
|
||||
|
||||
docker_volume_type:
|
||||
type: string
|
||||
description: >
|
||||
type of a cinder volume to allocate to docker for container/image
|
||||
storage
|
||||
|
||||
docker_storage_driver:
|
||||
type: string
|
||||
description: docker storage driver name
|
||||
constraints:
|
||||
- allowed_values: ["devicemapper", "overlay"]
|
||||
|
||||
external_network:
|
||||
type: string
|
||||
description: uuid/name of a network to use for floating ip addresses
|
||||
|
||||
cluster_uuid:
|
||||
type: string
|
||||
description: identifier for the cluster this template is generating
|
||||
|
||||
magnum_url:
|
||||
type: string
|
||||
description: endpoint to retrieve TLS certs from
|
||||
|
||||
fixed_network_id:
|
||||
type: string
|
||||
description: Network from which to allocate fixed addresses.
|
||||
|
||||
fixed_subnet_id:
|
||||
type: string
|
||||
description: Subnet from which to allocate fixed addresses.
|
||||
|
||||
swarm_api_ip:
|
||||
type: string
|
||||
description: swarm master's api server ip address
|
||||
default: ""
|
||||
|
||||
api_ip_address:
|
||||
type: string
|
||||
description: swarm master's api server public ip address
|
||||
default: ""
|
||||
|
||||
server_image:
|
||||
type: string
|
||||
description: glance image used to boot the server
|
||||
|
||||
server_flavor:
|
||||
type: string
|
||||
description: flavor to use when booting the server
|
||||
|
||||
http_proxy:
|
||||
type: string
|
||||
description: http proxy address for docker
|
||||
|
||||
https_proxy:
|
||||
type: string
|
||||
description: https proxy address for docker
|
||||
|
||||
no_proxy:
|
||||
type: string
|
||||
description: no proxies for docker
|
||||
|
||||
tls_disabled:
|
||||
type: boolean
|
||||
description: whether or not to enable TLS
|
||||
|
||||
secgroup_swarm_master_id:
|
||||
type: string
|
||||
description: ID of the security group for swarm master.
|
||||
|
||||
swarm_port:
|
||||
type: number
|
||||
description: >
|
||||
The port which are used by swarm manager to provide swarm service.
|
||||
|
||||
api_pool_id:
|
||||
type: string
|
||||
description: ID of the load balancer pool of swarm master server.
|
||||
|
||||
trustee_user_id:
|
||||
type: string
|
||||
description: user id of the trustee
|
||||
|
||||
trustee_password:
|
||||
type: string
|
||||
description: password of the trustee
|
||||
hidden: true
|
||||
|
||||
trust_id:
|
||||
type: string
|
||||
description: id of the trust which is used by the trustee
|
||||
hidden: true
|
||||
|
||||
auth_url:
|
||||
type: string
|
||||
description: url for keystone
|
||||
|
||||
volume_driver:
|
||||
type: string
|
||||
description: volume driver to use for container storage
|
||||
default: ""
|
||||
|
||||
rexray_preempt:
|
||||
type: string
|
||||
description: >
|
||||
enables any host to take control of a volume irrespective of whether
|
||||
other hosts are using the volume
|
||||
default: "false"
|
||||
|
||||
is_primary_master:
|
||||
type: boolean
|
||||
description: whether this master is primary or not
|
||||
default: False
|
||||
|
||||
resources:
|
||||
|
||||
master_wait_handle:
|
||||
type: "OS::Heat::WaitConditionHandle"
|
||||
|
||||
master_wait_condition:
|
||||
type: "OS::Heat::WaitCondition"
|
||||
depends_on: swarm-master
|
||||
properties:
|
||||
handle: {get_resource: master_wait_handle}
|
||||
timeout: 6000
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# resource that exposes the IPs of either the Swarm master or the API
|
||||
# LBaaS pool depending on whether LBaaS is enabled for the cluster.
|
||||
#
|
||||
|
||||
api_address_switch:
|
||||
type: Magnum::ApiGatewaySwitcher
|
||||
properties:
|
||||
pool_public_ip: {get_param: api_ip_address}
|
||||
pool_private_ip: {get_param: swarm_api_ip}
|
||||
master_public_ip: {get_attr: [swarm_master_floating, floating_ip_address]}
|
||||
master_private_ip: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]}
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# software configs. these are components that are combined into
|
||||
# a multipart MIME user-data archive.
|
||||
#
|
||||
write_heat_params:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config:
|
||||
str_replace:
|
||||
template: {get_file: fragments/write-heat-params-master.yaml}
|
||||
params:
|
||||
"$IS_PRIMARY_MASTER": {get_param: is_primary_master}
|
||||
"$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]}
|
||||
"$DOCKER_VOLUME": {get_resource: docker_volume}
|
||||
"$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size}
|
||||
"$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver}
|
||||
"$HTTP_PROXY": {get_param: http_proxy}
|
||||
"$HTTPS_PROXY": {get_param: https_proxy}
|
||||
"$NO_PROXY": {get_param: no_proxy}
|
||||
"$PRIMARY_MASTER_IP": {get_param: swarm_api_ip}
|
||||
"$SWARM_API_IP": {get_attr: [api_address_switch, private_ip]}
|
||||
"$SWARM_NODE_IP": {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]}
|
||||
"$CLUSTER_UUID": {get_param: cluster_uuid}
|
||||
"$MAGNUM_URL": {get_param: magnum_url}
|
||||
"$TLS_DISABLED": {get_param: tls_disabled}
|
||||
"$API_IP_ADDRESS": {get_attr: [api_address_switch, public_ip]}
|
||||
"$TRUSTEE_USER_ID": {get_param: trustee_user_id}
|
||||
"$TRUSTEE_PASSWORD": {get_param: trustee_password}
|
||||
"$TRUST_ID": {get_param: trust_id}
|
||||
"$AUTH_URL": {get_param: auth_url}
|
||||
"$VOLUME_DRIVER": {get_param: volume_driver}
|
||||
"$REXRAY_PREEMPT": {get_param: rexray_preempt}
|
||||
|
||||
remove_docker_key:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/swarm/fragments/remove-docker-key.sh}
|
||||
|
||||
configure_docker_storage:
|
||||
type: OS::Heat::SoftwareConfig
|
||||
properties:
|
||||
group: ungrouped
|
||||
config:
|
||||
str_replace:
|
||||
params:
|
||||
$configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh}
|
||||
template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh}
|
||||
|
||||
make_cert:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/swarm/fragments/make-cert.py}
|
||||
|
||||
add_docker_daemon_options:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/swarm/fragments/add-docker-daemon-options.sh}
|
||||
|
||||
write_docker_socket:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/swarm/fragments/write-docker-socket.yaml}
|
||||
|
||||
write_swarm_master_service:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: fragments/write-swarm-master-service.sh}
|
||||
|
||||
enable_services:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config:
|
||||
str_replace:
|
||||
template: {get_file: ../../common/templates/swarm/fragments/enable-services.sh}
|
||||
params:
|
||||
"$NODE_SERVICES": "docker.socket docker"
|
||||
|
||||
configure_selinux:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/swarm/fragments/configure-selinux.sh}
|
||||
|
||||
add_proxy:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/swarm/fragments/add-proxy.sh}
|
||||
|
||||
volume_service:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/swarm/fragments/volume-service.sh}
|
||||
|
||||
swarm_master_init:
|
||||
type: "OS::Heat::MultipartMime"
|
||||
properties:
|
||||
parts:
|
||||
- config: {get_resource: configure_selinux}
|
||||
- config: {get_resource: remove_docker_key}
|
||||
- config: {get_resource: write_heat_params}
|
||||
- config: {get_resource: make_cert}
|
||||
- config: {get_resource: configure_docker_storage}
|
||||
- config: {get_resource: add_docker_daemon_options}
|
||||
- config: {get_resource: write_docker_socket}
|
||||
- config: {get_resource: add_proxy}
|
||||
- config: {get_resource: enable_services}
|
||||
- config: {get_resource: write_swarm_master_service}
|
||||
- config: {get_resource: volume_service}
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# Swarm_manager is a special node running the swarm manage daemon along
|
||||
# side the swarm worker.
|
||||
#
|
||||
|
||||
# do NOT use "_" (underscore) in the Nova server name
|
||||
# it creates a mismatch between the generated Nova name and its hostname
|
||||
# which can lead to weird problems
|
||||
swarm-master:
|
||||
type: "OS::Nova::Server"
|
||||
properties:
|
||||
image:
|
||||
get_param: server_image
|
||||
flavor:
|
||||
get_param: server_flavor
|
||||
key_name:
|
||||
get_param: ssh_key_name
|
||||
user_data_format: RAW
|
||||
user_data: {get_resource: swarm_master_init}
|
||||
networks:
|
||||
- port:
|
||||
get_resource: swarm_master_eth0
|
||||
|
||||
swarm_master_eth0:
|
||||
type: "OS::Neutron::Port"
|
||||
properties:
|
||||
network_id:
|
||||
get_param: fixed_network_id
|
||||
security_groups:
|
||||
- {get_param: secgroup_swarm_master_id}
|
||||
fixed_ips:
|
||||
- subnet_id:
|
||||
get_param: fixed_subnet_id
|
||||
|
||||
swarm_master_floating:
|
||||
type: "OS::Neutron::FloatingIP"
|
||||
properties:
|
||||
floating_network:
|
||||
get_param: external_network
|
||||
port_id:
|
||||
get_resource: swarm_master_eth0
|
||||
|
||||
api_pool_member:
|
||||
type: Magnum::Optional::Neutron::LBaaS::PoolMember
|
||||
properties:
|
||||
pool: {get_param: api_pool_id}
|
||||
address: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]}
|
||||
subnet: { get_param: fixed_subnet_id }
|
||||
protocol_port: {get_param: swarm_port}
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# docker storage. This allocates a cinder volume and attaches it
|
||||
# to the node.
|
||||
#
|
||||
|
||||
docker_volume:
|
||||
type: Magnum::Optional::Cinder::Volume
|
||||
properties:
|
||||
size: {get_param: docker_volume_size}
|
||||
volume_type: {get_param: docker_volume_type}
|
||||
|
||||
docker_volume_attach:
|
||||
type: Magnum::Optional::Cinder::VolumeAttachment
|
||||
properties:
|
||||
instance_uuid: {get_resource: swarm-master}
|
||||
volume_id: {get_resource: docker_volume}
|
||||
mountpoint: /dev/vdb
|
||||
|
||||
outputs:
|
||||
|
||||
swarm_master_ip:
|
||||
value: {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]}
|
||||
description: >
|
||||
This is the "private" addresses of all the Swarm master.
|
||||
|
||||
swarm_master_external_ip:
|
||||
value: {get_attr: [swarm_master_floating, floating_ip_address]}
|
||||
description: >
|
||||
This is the "public" ip addresses of Swarm master.
|
322
magnum/drivers/swarm_fedora_atomic_v2/templates/swarmnode.yaml
Normal file
322
magnum/drivers/swarm_fedora_atomic_v2/templates/swarmnode.yaml
Normal file
@ -0,0 +1,322 @@
|
||||
heat_template_version: 2014-10-16
|
||||
|
||||
description: >
|
||||
This is a nested stack that defines a single swarm worker node, based on a
|
||||
vanilla Fedora Atomic image. This stack is included by a ResourceGroup
|
||||
resource in the parent template (swarmcluster.yaml).
|
||||
|
||||
parameters:
|
||||
|
||||
server_image:
|
||||
type: string
|
||||
description: glance image used to boot the server
|
||||
|
||||
server_flavor:
|
||||
type: string
|
||||
description: flavor to use when booting the server
|
||||
|
||||
ssh_key_name:
|
||||
type: string
|
||||
description: name of ssh key to be provisioned on our server
|
||||
|
||||
docker_volume_size:
|
||||
type: number
|
||||
description: >
|
||||
size of a cinder volume to allocate to docker for container/image
|
||||
storage
|
||||
|
||||
docker_volume_type:
|
||||
type: string
|
||||
description: >
|
||||
type of a cinder volume to allocate to docker for container/image
|
||||
storage
|
||||
|
||||
docker_storage_driver:
|
||||
type: string
|
||||
description: docker storage driver name
|
||||
constraints:
|
||||
- allowed_values: ["devicemapper", "overlay"]
|
||||
|
||||
external_network:
|
||||
type: string
|
||||
description: uuid/name of a network to use for floating ip addresses
|
||||
|
||||
fixed_network_id:
|
||||
type: string
|
||||
description: Network from which to allocate fixed addresses.
|
||||
|
||||
fixed_subnet_id:
|
||||
type: string
|
||||
description: Subnet from which to allocate fixed addresses.
|
||||
|
||||
http_proxy:
|
||||
type: string
|
||||
description: http proxy address for docker
|
||||
|
||||
https_proxy:
|
||||
type: string
|
||||
description: https proxy address for docker
|
||||
|
||||
no_proxy:
|
||||
type: string
|
||||
description: no proxies for docker
|
||||
|
||||
swarm_api_ip:
|
||||
type: string
|
||||
description: swarm master's api server ip address
|
||||
|
||||
api_ip_address:
|
||||
type: string
|
||||
description: swarm master's api server public ip address
|
||||
|
||||
cluster_uuid:
|
||||
type: string
|
||||
description: identifier for the cluster this template is generating
|
||||
|
||||
magnum_url:
|
||||
type: string
|
||||
description: endpoint to retrieve TLS certs from
|
||||
|
||||
tls_disabled:
|
||||
type: boolean
|
||||
description: whether or not to disable TLS
|
||||
|
||||
secgroup_swarm_node_id:
|
||||
type: string
|
||||
description: ID of the security group for swarm node.
|
||||
|
||||
trustee_domain_id:
|
||||
type: string
|
||||
description: domain id of the trustee
|
||||
|
||||
trustee_user_id:
|
||||
type: string
|
||||
description: user id of the trustee
|
||||
|
||||
trustee_username:
|
||||
type: string
|
||||
description: username of the trustee
|
||||
|
||||
trustee_password:
|
||||
type: string
|
||||
description: password of the trustee
|
||||
hidden: true
|
||||
|
||||
trust_id:
|
||||
type: string
|
||||
description: id of the trust which is used by the trustee
|
||||
hidden: true
|
||||
|
||||
auth_url:
|
||||
type: string
|
||||
description: url for keystone
|
||||
|
||||
volume_driver:
|
||||
type: string
|
||||
description: volume driver to use for container storage
|
||||
default: ""
|
||||
|
||||
rexray_preempt:
|
||||
type: string
|
||||
description: >
|
||||
enables any host to take control of a volume irrespective of whether
|
||||
other hosts are using the volume
|
||||
default: "false"
|
||||
|
||||
resources:
|
||||
|
||||
node_wait_handle:
|
||||
type: "OS::Heat::WaitConditionHandle"
|
||||
|
||||
node_wait_condition:
|
||||
type: "OS::Heat::WaitCondition"
|
||||
depends_on: swarm-node
|
||||
properties:
|
||||
handle: {get_resource: node_wait_handle}
|
||||
timeout: 6000
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# software configs. these are components that are combined into
|
||||
# a multipart MIME user-data archive.
|
||||
write_heat_params:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config:
|
||||
str_replace:
|
||||
template: {get_file: ../../common/templates/swarm/fragments/write-heat-params-node.yaml}
|
||||
params:
|
||||
"$WAIT_CURL": {get_attr: [node_wait_handle, curl_cli]}
|
||||
"$DOCKER_VOLUME": {get_resource: docker_volume}
|
||||
"$DOCKER_VOLUME_SIZE": {get_param: docker_volume_size}
|
||||
"$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver}
|
||||
"$HTTP_PROXY": {get_param: http_proxy}
|
||||
"$HTTPS_PROXY": {get_param: https_proxy}
|
||||
"$NO_PROXY": {get_param: no_proxy}
|
||||
"$SWARM_API_IP": {get_param: swarm_api_ip}
|
||||
"$SWARM_NODE_IP": {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]}
|
||||
"$CLUSTER_UUID": {get_param: cluster_uuid}
|
||||
"$MAGNUM_URL": {get_param: magnum_url}
|
||||
"$TLS_DISABLED": {get_param: tls_disabled}
|
||||
"$API_IP_ADDRESS": {get_param: api_ip_address}
|
||||
"$TRUSTEE_DOMAIN_ID": {get_param: trustee_domain_id}
|
||||
"$TRUSTEE_USER_ID": {get_param: trustee_user_id}
|
||||
"$TRUSTEE_USERNAME": {get_param: trustee_username}
|
||||
"$TRUSTEE_PASSWORD": {get_param: trustee_password}
|
||||
"$TRUST_ID": {get_param: trust_id}
|
||||
"$AUTH_URL": {get_param: auth_url}
|
||||
"$VOLUME_DRIVER": {get_param: volume_driver}
|
||||
"$REXRAY_PREEMPT": {get_param: rexray_preempt}
|
||||
|
||||
remove_docker_key:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/swarm/fragments/remove-docker-key.sh}
|
||||
|
||||
make_cert:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/swarm/fragments/make-cert.py}
|
||||
|
||||
configure_docker_storage:
|
||||
type: OS::Heat::SoftwareConfig
|
||||
properties:
|
||||
group: ungrouped
|
||||
config:
|
||||
str_replace:
|
||||
params:
|
||||
$configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh}
|
||||
template: {get_file: ../../common/templates/fragments/configure-docker-storage.sh}
|
||||
|
||||
add_docker_daemon_options:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/swarm/fragments/add-docker-daemon-options.sh}
|
||||
|
||||
write_docker_socket:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/swarm/fragments/write-docker-socket.yaml}
|
||||
|
||||
write_swarm_worker_service:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: fragments/write-swarm-worker-service.sh}
|
||||
|
||||
enable_services:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config:
|
||||
str_replace:
|
||||
template: {get_file: ../../common/templates/swarm/fragments/enable-services.sh}
|
||||
params:
|
||||
"$NODE_SERVICES": "docker.socket docker"
|
||||
|
||||
configure_selinux:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/swarm/fragments/configure-selinux.sh}
|
||||
|
||||
add_proxy:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/swarm/fragments/add-proxy.sh}
|
||||
|
||||
volume_service:
|
||||
type: "OS::Heat::SoftwareConfig"
|
||||
properties:
|
||||
group: ungrouped
|
||||
config: {get_file: ../../common/templates/swarm/fragments/volume-service.sh}
|
||||
|
||||
swarm_node_init:
|
||||
type: "OS::Heat::MultipartMime"
|
||||
properties:
|
||||
parts:
|
||||
- config: {get_resource: configure_selinux}
|
||||
- config: {get_resource: remove_docker_key}
|
||||
- config: {get_resource: write_heat_params}
|
||||
- config: {get_resource: make_cert}
|
||||
- config: {get_resource: configure_docker_storage}
|
||||
- config: {get_resource: add_docker_daemon_options}
|
||||
- config: {get_resource: write_docker_socket}
|
||||
- config: {get_resource: add_proxy}
|
||||
- config: {get_resource: enable_services}
|
||||
- config: {get_resource: write_swarm_worker_service}
|
||||
- config: {get_resource: volume_service}
|
||||
|
||||
# do NOT use "_" (underscore) in the Nova server name
|
||||
# it creates a mismatch between the generated Nova name and its hostname
|
||||
# which can lead to weird problems
|
||||
swarm-node:
|
||||
type: "OS::Nova::Server"
|
||||
properties:
|
||||
image:
|
||||
get_param: server_image
|
||||
flavor:
|
||||
get_param: server_flavor
|
||||
key_name:
|
||||
get_param: ssh_key_name
|
||||
user_data_format: RAW
|
||||
user_data: {get_resource: swarm_node_init}
|
||||
networks:
|
||||
- port:
|
||||
get_resource: swarm_node_eth0
|
||||
|
||||
swarm_node_eth0:
|
||||
type: "OS::Neutron::Port"
|
||||
properties:
|
||||
network_id:
|
||||
get_param: fixed_network_id
|
||||
security_groups:
|
||||
- {get_param: secgroup_swarm_node_id}
|
||||
fixed_ips:
|
||||
- subnet_id:
|
||||
get_param: fixed_subnet_id
|
||||
|
||||
swarm_node_floating:
|
||||
type: "OS::Neutron::FloatingIP"
|
||||
properties:
|
||||
floating_network:
|
||||
get_param: external_network
|
||||
port_id:
|
||||
get_resource: swarm_node_eth0
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# docker storage. This allocates a cinder volume and attaches it
|
||||
# to the node.
|
||||
#
|
||||
|
||||
docker_volume:
|
||||
type: Magnum::Optional::Cinder::Volume
|
||||
properties:
|
||||
size: {get_param: docker_volume_size}
|
||||
volume_type: {get_param: docker_volume_type}
|
||||
|
||||
docker_volume_attach:
|
||||
type: Magnum::Optional::Cinder::VolumeAttachment
|
||||
properties:
|
||||
instance_uuid: {get_resource: swarm-node}
|
||||
volume_id: {get_resource: docker_volume}
|
||||
mountpoint: /dev/vdb
|
||||
|
||||
outputs:
|
||||
|
||||
swarm_node_ip:
|
||||
value: {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]}
|
||||
description: >
|
||||
This is the "private" address of the Swarm node.
|
||||
|
||||
swarm_node_external_ip:
|
||||
value: {get_attr: [swarm_node_floating, floating_ip_address]}
|
||||
description: >
|
||||
This is the "public" address of the Swarm node.
|
17
magnum/drivers/swarm_fedora_atomic_v2/version.py
Normal file
17
magnum/drivers/swarm_fedora_atomic_v2/version.py
Normal file
@ -0,0 +1,17 @@
|
||||
# Copyright 2016 - Rackspace Hosting
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
version = '2.0.0'
|
||||
driver = 'swarm_fedora_atomic_v2'
|
||||
container_version = '1.12.6'
|
@ -63,9 +63,9 @@ class ContainerStatus(fields.Enum):
|
||||
|
||||
class ClusterType(fields.Enum):
|
||||
ALL = (
|
||||
KUBERNETES, SWARM, MESOS, DCOS,
|
||||
KUBERNETES, SWARM, MESOS, DCOS, SWARM_MODE,
|
||||
) = (
|
||||
'kubernetes', 'swarm', 'mesos', 'dcos',
|
||||
'kubernetes', 'swarm', 'mesos', 'dcos', 'swarm-mode',
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
|
@ -76,7 +76,7 @@ if [[ "$COE" == "kubernetes" ]]; then
|
||||
remote_exec $SSH_USER "sudo df -h" dfh.log
|
||||
remote_exec $SSH_USER "sudo journalctl -u wc-notify --no-pager" wc-notify.log
|
||||
remote_exec $SSH_USER "sudo cat /etc/sysconfig/heat-params" heat-params
|
||||
elif [[ "$COE" == "swarm" ]]; then
|
||||
elif [[ "$COE" == "swarm" || "$COE" == "swarm-mode" ]]; then
|
||||
SSH_USER=fedora
|
||||
remote_exec $SSH_USER "sudo systemctl --full list-units --no-pager" systemctl_list_units.log
|
||||
remote_exec $SSH_USER "sudo journalctl -u cloud-config --no-pager" cloud-config.log
|
||||
@ -87,6 +87,7 @@ elif [[ "$COE" == "swarm" ]]; then
|
||||
remote_exec $SSH_USER "sudo journalctl -u etcd --no-pager" etcd.log
|
||||
remote_exec $SSH_USER "sudo journalctl -u swarm-manager --no-pager" swarm-manager.log
|
||||
remote_exec $SSH_USER "sudo journalctl -u swarm-agent --no-pager" swarm-agent.log
|
||||
remote_exec $SSH_USER "sudo journalctl -u swarm-worker --no-pager" swarm-worker.log
|
||||
remote_exec $SSH_USER "sudo journalctl -u docker-storage-setup --no-pager" docker-storage-setup.log
|
||||
remote_exec $SSH_USER "sudo systemctl status docker-storage-setup -l" docker-storage-setup.service.status.log
|
||||
remote_exec $SSH_USER "sudo systemctl show docker-storage-setup --no-pager" docker-storage-setup.service.show.log
|
||||
|
0
magnum/tests/functional/swarm_mode/__init__.py
Normal file
0
magnum/tests/functional/swarm_mode/__init__.py
Normal file
@ -0,0 +1,125 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import docker
|
||||
import requests
|
||||
import time
|
||||
|
||||
import magnum.conf
|
||||
from magnum.tests.functional.python_client_base import ClusterTest
|
||||
|
||||
|
||||
CONF = magnum.conf.CONF
|
||||
|
||||
|
||||
class TestSwarmModeAPIs(ClusterTest):
|
||||
"""This class will cover swarm cluster basic functional testing.
|
||||
|
||||
Will test all kinds of container action with tls_disabled=False mode.
|
||||
"""
|
||||
|
||||
coe = "swarm-mode"
|
||||
cluster_template_kwargs = {
|
||||
"tls_disabled": False,
|
||||
"network_driver": None,
|
||||
"volume_driver": None,
|
||||
"labels": {}
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super(TestSwarmModeAPIs, cls).setUpClass()
|
||||
cls.cluster_is_ready = None
|
||||
|
||||
def setUp(self):
|
||||
super(TestSwarmModeAPIs, self).setUp()
|
||||
if self.cluster_is_ready is True:
|
||||
return
|
||||
# Note(eliqiao): In our test cases, docker client or magnum client will
|
||||
# try to connect to swarm service which is running on master node,
|
||||
# the endpoint is cluster.api_address(listen port is included), but the
|
||||
# service is not ready right after the cluster was created, sleep for
|
||||
# an acceptable time to wait for service being started.
|
||||
# This is required, without this any api call will fail as
|
||||
# 'ConnectionError: [Errno 111] Connection refused'.
|
||||
msg = ("If you see this error in the functional test, it means "
|
||||
"the docker service took too long to come up. This may not "
|
||||
"be an actual error, so an option is to rerun the "
|
||||
"functional test.")
|
||||
if self.cluster_is_ready is False:
|
||||
# In such case, no need to test below cases on gate, raise a
|
||||
# meanful exception message to indicate ca setup failed after
|
||||
# cluster creation, better to do a `recheck`
|
||||
# We don't need to test since cluster is not ready.
|
||||
raise Exception(msg)
|
||||
|
||||
url = self.cs.clusters.get(self.cluster.uuid).api_address
|
||||
|
||||
# Note(eliqiao): docker_utils.CONF.docker.default_timeout is 10,
|
||||
# tested this default configure option not works on gate, it will
|
||||
# cause container creation failed due to time out.
|
||||
# Debug more found that we need to pull image when the first time to
|
||||
# create a container, set it as 180s.
|
||||
|
||||
docker_api_time_out = 180
|
||||
tls_config = docker.tls.TLSConfig(
|
||||
client_cert=(self.cert_file, self.key_file),
|
||||
verify=self.ca_file
|
||||
)
|
||||
|
||||
self.docker_client = docker.DockerClient(
|
||||
base_url=url,
|
||||
tls=tls_config,
|
||||
version='auto',
|
||||
timeout=docker_api_time_out)
|
||||
|
||||
self.docker_client_non_tls = docker.DockerClient(
|
||||
base_url=url,
|
||||
version='1.21',
|
||||
timeout=docker_api_time_out)
|
||||
|
||||
def test_create_remove_service(self):
|
||||
# Create and remove a service using docker python SDK.
|
||||
# Wait 15 mins until reach running and 5 mins until the service
|
||||
# is removed.
|
||||
|
||||
# Create a nginx service based on alpine linux
|
||||
service = self.docker_client.services.create(
|
||||
name='nginx',
|
||||
image='nginx:mainline-alpine')
|
||||
# wait for 15 mins to be running
|
||||
for i in range(90):
|
||||
if service.tasks()[0]['Status']['State'] == "running":
|
||||
break
|
||||
time.sleep(10)
|
||||
# Verify that it is running
|
||||
self.assertEqual('running', service.tasks()[0]['Status']['State'])
|
||||
# Remove the service and wait for 5 mins untils it is removed
|
||||
service.remove()
|
||||
for i in range(30):
|
||||
if self.docker_client.services.list() == []:
|
||||
break
|
||||
time.sleep(10)
|
||||
# Verify that it is deleted
|
||||
self.assertEqual([], self.docker_client.services.list())
|
||||
|
||||
def test_access_with_non_tls_client(self):
|
||||
"""Try to contact master's docker using the tcp protocol.
|
||||
|
||||
tcp returns ConnectionError whereas https returns SSLError. The
|
||||
default protocol we use in magnum is tcp which works fine docker
|
||||
python SDK docker>=2.0.0.
|
||||
"""
|
||||
try:
|
||||
self.docker_client_non_tls.info()
|
||||
except requests.exceptions.ConnectionError:
|
||||
pass
|
@ -356,7 +356,7 @@ class TestObject(test_base.TestCase, _TestObject):
|
||||
# http://docs.openstack.org/developer/magnum/objects.html
|
||||
object_data = {
|
||||
'Cluster': '1.14-281c582b16291c4f0666371e53975a5c',
|
||||
'ClusterTemplate': '1.17-74e4e6b1faca768714be809a828599c2',
|
||||
'ClusterTemplate': '1.17-f1ce5212b46506360b41ab5cb7658af4',
|
||||
'Certificate': '1.1-1924dc077daa844f0f9076332ef96815',
|
||||
'MyObj': '1.0-34c4b1aadefd177b13f9a2f894cc23cd',
|
||||
'X509KeyPair': '1.2-d81950af36c59a71365e33ce539d24f9',
|
||||
|
@ -63,6 +63,7 @@ magnum.drivers =
|
||||
k8s_fedora_atomic_v1 = magnum.drivers.k8s_fedora_atomic_v1.driver:Driver
|
||||
k8s_coreos_v1 = magnum.drivers.k8s_coreos_v1.driver:Driver
|
||||
swarm_fedora_atomic_v1 = magnum.drivers.swarm_fedora_atomic_v1.driver:Driver
|
||||
swarm_fedora_atomic_v2 = magnum.drivers.swarm_fedora_atomic_v2.driver:Driver
|
||||
mesos_ubuntu_v1 = magnum.drivers.mesos_ubuntu_v1.driver:Driver
|
||||
k8s_fedora_ironic_v1 = magnum.drivers.k8s_fedora_ironic_v1.driver:Driver
|
||||
|
||||
|
11
tox.ini
11
tox.ini
@ -87,6 +87,17 @@ commands =
|
||||
find . -type f -name "*.py[c|o]" -delete
|
||||
bash tools/pretty_tox.sh '{posargs}'
|
||||
|
||||
[testenv:functional-swarm-mode]
|
||||
sitepackages = True
|
||||
setenv = {[testenv]setenv}
|
||||
OS_TEST_PATH=./magnum/tests/functional/swarm_mode
|
||||
OS_TEST_TIMEOUT=7200
|
||||
deps =
|
||||
{[testenv]deps}
|
||||
commands =
|
||||
find . -type f -name "*.py[c|o]" -delete
|
||||
bash tools/pretty_tox.sh '{posargs}'
|
||||
|
||||
[testenv:functional-mesos]
|
||||
sitepackages = True
|
||||
setenv = {[testenv]setenv}
|
||||
|
Loading…
x
Reference in New Issue
Block a user