Browse Source

Remove demos directory from contrib

The demos are already part of kolla-ansible repository.

Change-Id: I075ea1308c24d4260cafbc502a2f2815df1ee14c
changes/11/502911/1
Christian Berendt 4 years ago
parent
commit
2c2bc48630
  1. 2
      README.rst
  2. 15
      contrib/demos/heat/README.rst
  3. 11
      contrib/demos/heat/launch
  4. 43
      contrib/demos/heat/steak-rg.yaml
  5. 54
      contrib/demos/heat/steak.yaml
  6. 5
      contrib/demos/magnum/redis
  7. 197
      contrib/demos/magnum/redis-kube/README.rst
  8. 28
      contrib/demos/magnum/redis-kube/redis-controller.yaml
  9. 33
      contrib/demos/magnum/redis-kube/redis-master.yaml
  10. 14
      contrib/demos/magnum/redis-kube/redis-proxy.yaml
  11. 23
      contrib/demos/magnum/redis-kube/redis-sentinel-controller.yaml
  12. 13
      contrib/demos/magnum/redis-kube/redis-sentinel-service.yaml
  13. 40
      contrib/demos/magnum/start
  14. 8
      contrib/demos/magnum/stop
  15. 2
      tools/loc

2
README.rst

@ -157,7 +157,7 @@ Kolla provides images to deploy the following infrastructure components:
Directories
===========
- ``contrib`` - Contains demos scenarios for Heat and Murano.
- ``contrib`` - Contains sample template override files.
- ``doc`` - Contains documentation.
- ``docker`` - Contains jinja2 templates for the Docker build system.
- ``etc`` - Contains a reference etc directory structure which requires

15
contrib/demos/heat/README.rst

@ -1,15 +0,0 @@
A Kolla Demo using Heat
=======================
By default, the launch script will spawn 3 Nova instances on a Neutron
network created from the tools/init-runonce script. Edit the VM\_COUNT
parameter in the launch script if you would like to spawn a different
amount of Nova instances. Edit the IMAGE\_FLAVOR if you would like to
launch images using a flavor other than m1.tiny.
Then run the script:
::
$ ./launch

11
contrib/demos/heat/launch

@ -1,11 +0,0 @@
VM_COUNT=3
IMAGE_FLAVOR=m1.small
PUBLIC_NET_ID=$(neutron net-list | grep public | awk '{print $2}')
DEMO_NET_ID=$(neutron net-list | grep demo | awk '{print $2}')
DEMO_SUBNET_ID=$(neutron net-list | grep demo | awk '{print $6}')
echo Public net id is $PUBLIC_NET_ID
echo Demo net id is $DEMO_NET_ID
echo Demo subnet id is $DEMO_SUBNET_ID
heat stack-create steak --template-file steak-rg.yaml --parameters="vm_count=$VM_COUNT;image_flavor=$IMAGE_FLAVOR;public_net_id=$PUBLIC_NET_ID;demo_net_id=$DEMO_NET_ID;demo_subnet_id=$DEMO_SUBNET_ID"

43
contrib/demos/heat/steak-rg.yaml

@ -1,43 +0,0 @@
heat_template_version: 2013-05-23
parameters:
public_net_id:
type: string
description: uuid of a network to use for floating ip addresses
demo_net_id:
type: string
description: uuid of a subnet on the fixed network to use for creating ports
demo_subnet_id:
type: string
description: uuid of a subnet on the fixed network to use for creating ports
vm_count:
type: string
description: Number of VMs to launch
image_flavor:
type: string
description: Image flavor to use when launching VMs
resources:
steak:
type: OS::Heat::ResourceGroup
properties:
count:
get_param: vm_count
resource_def:
type: steak.yaml
properties:
image_flavor: {get_param: image_flavor}
public_net_id: {get_param: public_net_id}
demo_net_id: {get_param: demo_net_id}
demo_subnet_id: {get_param: demo_subnet_id}
outputs:
eth0:
value: {get_attr: [steak, eth0]}
float:
value: {get_attr: [steak, float]}

54
contrib/demos/heat/steak.yaml

@ -1,54 +0,0 @@
heat_template_version: 2013-05-23
parameters:
public_net_id:
type: string
description: uuid of a network to use for floating ip addresses
demo_net_id:
type: string
description: uuid of a subnet on the fixed network to use for creating ports
demo_subnet_id:
type: string
description: uuid of a subnet on the fixed network to use for creating ports
image_flavor:
type: string
description: Number of VMs to launch
resources:
steak_node:
type: "OS::Nova::Server"
properties:
key_name: mykey
image: cirros
flavor:
get_param: image_flavor
networks:
- port:
get_resource: steak_node_eth0
steak_node_eth0:
type: "OS::Neutron::Port"
properties:
network_id:
get_param: demo_net_id
fixed_ips:
- subnet_id:
get_param: demo_subnet_id
steak_node_floating:
type: "OS::Neutron::FloatingIP"
properties:
floating_network_id:
get_param: public_net_id
port_id:
get_resource: steak_node_eth0
outputs:
eth0:
value: {get_attr: [steak_node_eth0, fixed_ips, 0, ip_address]}
float:
value: {get_attr: [steak_node_floating, floating_ip_address]}

5
contrib/demos/magnum/redis

@ -1,5 +0,0 @@
magnum pod-create --manifest redis-kube/redis-master.yaml --bay testbay
magnum service-create --manifest redis-kube/redis-sentinel-service.yaml --bay testbay
magnum rc-create --manifest redis-kube/redis-controller.yaml --bay testbay
magnum rc-create --manifest redis-kube/redis-sentinel-controller.yaml --bay testbay

197
contrib/demos/magnum/redis-kube/README.rst

@ -1,197 +0,0 @@
Reliable, Scalable Redis on Kubernetes
--------------------------------------
The following document describes the deployment of a reliable,
multi-node Redis on Kubernetes. It deploys a master with replicated
slaves, as well as replicated redis sentinels which are use for health
checking and failover.
Prerequisites
~~~~~~~~~~~~~
This example assumes that you have a Kubernetes cluster installed and
running, and that you have installed the ``kubectl`` command line tool
somewhere in your path. Please see the `getting
started <https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs/getting-started-guides>`__
for installation instructions for your platform.
A note for the impatient
~~~~~~~~~~~~~~~~~~~~~~~~
This is a somewhat long tutorial. If you want to jump straight to the
"do it now" commands, please see the `tl; dr <#tl-dr>`__ at the end.
Turning up an initial master/sentinel pod.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
is a
`*Pod* <https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/user-guide/pods.md>`__.
A Pod is one or more containers that *must* be scheduled onto the same
host. All containers in a pod share a network namespace, and may
optionally share mounted volumes.
We will used the shared network namespace to bootstrap our Redis
cluster. In particular, the very first sentinel needs to know how to
find the master (subsequent sentinels just ask the first sentinel).
Because all containers in a Pod share a network namespace, the sentinel
can simply look at ``$(hostname -i):6379``.
Here is the config for the initial master and sentinel pod:
`redis-master.yaml <redis-master.yaml>`__
Create this master as follows:
.. code:: sh
kubectl create -f examples/redis/v1beta3/redis-master.yaml
Turning up a sentinel service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In Kubernetes a *Service* describes a set of Pods that perform the same
task. For example, the set of nodes in a Cassandra cluster, or even the
single node we created above. An important use for a Service is to
create a load balancer which distributes traffic across members of the
set. But a *Service* can also be used as a standing query which makes a
dynamically changing set of Pods (or the single Pod we've already
created) available via the Kubernetes API.
In Redis, we will use a Kubernetes Service to provide a discoverable
endpoints for the Redis sentinels in the cluster. From the sentinels
Redis clients can find the master, and then the slaves and other
relevant info for the cluster. This enables new members to join the
cluster when failures occur.
Here is the definition of the sentinel
service:\ `redis-sentinel-service.yaml <redis-sentinel-service.yaml>`__
Create this service:
.. code:: sh
kubectl create -f examples/redis/v1beta3/redis-sentinel-service.yaml
Turning up replicated redis servers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
So far, what we have done is pretty manual, and not very fault-tolerant.
If the ``redis-master`` pod that we previously created is destroyed for
some reason (e.g. a machine dying) our Redis service goes away with it.
In Kubernetes a *Replication Controller* is responsible for replicating
sets of identical pods. Like a *Service* it has a selector query which
identifies the members of it's set. Unlike a *Service* it also has a
desired number of replicas, and it will create or delete *Pods* to
ensure that the number of *Pods* matches up with it's desired state.
Replication Controllers will "adopt" existing pods that match their
selector query, so let's create a Replication Controller with a single
replica to adopt our existing Redis server.
`redis-controller.yaml <redis-controller.yaml>`__
The bulk of this controller config is actually identical to the
redis-master pod definition above. It forms the template or "cookie
cutter" that defines what it means to be a member of this set.
Create this controller:
.. code:: sh
kubectl create -f examples/redis/v1beta3/redis-controller.yaml
We'll do the same thing for the sentinel. Here is the controller
config:\ `redis-sentinel-controller.yaml <redis-sentinel-controller.yaml>`__
We create it as follows:
.. code:: sh
kubectl create -f examples/redis/v1beta3/redis-sentinel-controller.yaml
Resize our replicated pods
~~~~~~~~~~~~~~~~~~~~~~~~~~
Initially creating those pods didn't actually do anything, since we only
asked for one sentinel and one redis server, and they already existed,
nothing changed. Now we will add more replicas:
.. code:: sh
kubectl resize rc redis --replicas=3
.. code:: sh
kubectl resize rc redis-sentinel --replicas=3
This will create two additional replicas of the redis server and two
additional replicas of the redis sentinel.
Unlike our original redis-master pod, these pods exist independently,
and they use the ``redis-sentinel-service`` that we defined above to
discover and join the cluster.
Delete our manual pod
~~~~~~~~~~~~~~~~~~~~~
The final step in the cluster turn up is to delete the original
redis-master pod that we created manually. While it was useful for
bootstrapping discovery in the cluster, we really don't want the
lifespan of our sentinel to be tied to the lifespan of one of our redis
servers, and now that we have a successful, replicated redis sentinel
service up and running, the binding is unnecessary.
Delete the master as follows:
.. code:: sh
kubectl delete pods redis-master
Now let's take a close look at what happens after this pod is deleted.
There are three things that happen:
1. The redis replication controller notices that its desired state is 3
replicas, but there are currently only 2 replicas, and so it creates
a new redis server to bring the replica count back up to 3
2. The redis-sentinel replication controller likewise notices the
missing sentinel, and also creates a new sentinel.
3. The redis sentinels themselves, realize that the master has
disappeared from the cluster, and begin the election procedure for
selecting a new master. They perform this election and selection, and
chose one of the existing redis server replicas to be the new master.
Conclusion
~~~~~~~~~~
At this point we now have a reliable, scalable Redis installation. By
resizing the replication controller for redis servers, we can increase
or decrease the number of read-slaves in our cluster. Likewise, if
failures occur, the redis-sentinels will perform master election and
select a new master.
tl; dr
~~~~~~
For those of you who are impatient, here is the summary of commands we
ran in this tutorial
.. code:: sh
# Create a bootstrap master
kubectl create -f examples/redis/v1beta3/redis-master.yaml
# Create a service to track the sentinels
kubectl create -f examples/redis/v1beta3/redis-sentinel-service.yaml
# Create a replication controller for redis servers
kubectl create -f examples/redis/v1beta3/redis-controller.yaml
# Create a replication controller for redis sentinels
kubectl create -f examples/redis/v1beta3/redis-sentinel-controller.yaml
# Resize both replication controllers
kubectl resize rc redis --replicas=3
kubectl resize rc redis-sentinel --replicas=3
# Delete the original master pod
kubectl delete pods redis-master

28
contrib/demos/magnum/redis-kube/redis-controller.yaml

@ -1,28 +0,0 @@
apiVersion: v1beta3
kind: ReplicationController
metadata:
name: redis
spec:
replicas: 2
selector:
name: redis
template:
metadata:
labels:
name: redis
spec:
containers:
- name: redis
image: kubernetes/redis:v1
ports:
- containerPort: 6379
resources:
limits:
cpu: "1"
volumeMounts:
- mountPath: /redis-master-data
name: data
volumes:
- name: data
emptyDir: {}

33
contrib/demos/magnum/redis-kube/redis-master.yaml

@ -1,33 +0,0 @@
apiVersion: v1beta3
kind: Pod
metadata:
labels:
name: redis
redis-sentinel: "true"
role: master
name: redis-master
spec:
containers:
- name: master
image: kubernetes/redis:v1
env:
- name: MASTER
value: "true"
ports:
- containerPort: 6379
resources:
limits:
cpu: "1"
volumeMounts:
- mountPath: /redis-master-data
name: data
- name: sentinel
image: kubernetes/redis:v1
env:
- name: SENTINEL
value: "true"
ports:
- containerPort: 26379
volumes:
- name: data
emptyDir: {}

14
contrib/demos/magnum/redis-kube/redis-proxy.yaml

@ -1,14 +0,0 @@
apiVersion: v1beta3
kind: Pod
metadata:
labels:
name: redis-proxy
role: proxy
name: redis-proxy
spec:
containers:
- name: proxy
image: kubernetes/redis-proxy:v1
ports:
- containerPort: 6379
name: api

23
contrib/demos/magnum/redis-kube/redis-sentinel-controller.yaml

@ -1,23 +0,0 @@
apiVersion: v1beta3
kind: ReplicationController
metadata:
name: redis-sentinel
spec:
replicas: 2
selector:
redis-sentinel: "true"
template:
metadata:
labels:
name: redis-sentinel
redis-sentinel: "true"
role: sentinel
spec:
containers:
- name: sentinel
image: kubernetes/redis:v1
env:
- name: SENTINEL
value: "true"
ports:
- containerPort: 26379

13
contrib/demos/magnum/redis-kube/redis-sentinel-service.yaml

@ -1,13 +0,0 @@
apiVersion: v1beta3
kind: Service
metadata:
labels:
name: sentinel
role: service
name: redis-sentinel
spec:
ports:
- port: 26379
targetPort: 26379
selector:
redis-sentinel: "true"

40
contrib/demos/magnum/start

@ -1,40 +0,0 @@
#!/bin/bash
NETWORK_MANAGER=$(grep -sri NETWORK_MANAGER ../../compose/openstack.env | cut -f2 -d "=")
if [ "$NETWORK_MANAGER" != "neutron" ]; then
echo 'Magnum depends on the Neutron network manager to operate.'
echo 'Exiting because the network manager is' "$NETWORK_MANAGER".
exit 1
fi
echo Downloading glance image.
IMAGE_URL=https://fedorapeople.org/groups/magnum
IMAGE=fedora-21-atomic-3.qcow2
if ! [ -f "$IMAGE" ]; then
curl -L -o ./$IMAGE $IMAGE_URL/$IMAGE
fi
NIC_ID=$(neutron net-show public1 | awk '/ id /{print $4}')
glance image-delete fedora-21-atomic-3 2> /dev/null
echo Loading fedora-atomic image into glance
glance image-create --name fedora-21-atomic-3 --progress --is-public true --disk-format qcow2 --container-format bare --file ./$IMAGE
GLANCE_IMAGE_ID=$(glance image-show fedora-21-atomic-3 | grep id | awk '{print $4}')
echo registering os-distro property with image
glance image-update $GLANCE_IMAGE_ID --property os_distro=fedora-atomic
echo Creating baymodel
magnum baymodel-create \
--name testbaymodel \
--image-id $GLANCE_IMAGE_ID \
--keypair-id mykey \
--fixed-network 10.0.3.0/24 \
--external-network-id $NIC_ID \
--dns-nameserver 8.8.8.8 --flavor-id m1.small \
--docker-volume-size 5 --coe kubernetes
echo Creating Bay
magnum bay-create --name testbay --baymodel testbaymodel --node-count 2

8
contrib/demos/magnum/stop

@ -1,8 +0,0 @@
#!/bin/bash
magnum bay-delete testbay
while magnum bay-list | grep -q testbay; do
sleep 1
done
magnum baymodel-delete testbaymodel

2
tools/loc

@ -5,7 +5,6 @@ DOC=`find doc -type f -exec cat {} \; | wc -l`
TESTS=`find tests -type f -exec cat {} \; | wc -l`
BUILD=`find kolla -type f -exec cat {} \; | wc -l`
DEMOS=`find contrib/demos -type f -exec cat {} \; | wc -l`
SPECS=`find specs -type f -exec cat {} \; | wc -l`
ETC=`find etc -type f -exec cat {} \; | wc -l`
TOOLS=`find tools -type f -exec cat {} \; | wc -l`
@ -17,7 +16,6 @@ TOTAL=$(($CORE+$SUPP))
echo "Build $BUILD"
echo "Demos $DEMOS"
echo "Doc $DOC"
echo "Etc $ETC"
echo "Docker $DOCKER"

Loading…
Cancel
Save