Browse Source

Deployment, Readiness, Safe Shutdown, & Scaling for glance

This patch switches glance over to use the k8s deployment type.
It also puts an haproxy in front of it so it can safely be drained
to perform rolling upgrades/node migrations and scale down. It
blocks access until the service is up via readiness probe. Lastly,
it removes an unneeded volume when ceph direct access is configured.
It also makes the glance services externally available.

We need to add more haproxy support in the cli as a follow up.

Partially-Implements: blueprint deployments
Partially-Implements: blueprint kolla-kubernetes-service-exposure
Partially-Implements: blueprint api-termination

Change-Id: Id95c2f0d891882a6cc91a8248156c998a204a294
changes/95/354895/20
Kevin Fox 6 years ago
parent
commit
6fa5b0273d
  1. 10
      etc/kolla-kubernetes/service_resources.yml
  2. 7
      kolla_kubernetes/service_resources.py
  3. 49
      services/common/api-haproxy-configmap.yml.j2
  4. 124
      services/glance/glance-api-pod.yml.j2
  5. 2
      services/glance/glance-api-service.yml.j2
  6. 4
      services/glance/glance-bootstrap-job.yml.j2
  7. 115
      services/glance/glance-registry-pod.yml.j2
  8. 2
      services/glance/glance-registry-service.yml.j2

10
etc/kolla-kubernetes/service_resources.yml

@ -185,8 +185,18 @@ kolla-kubernetes:
- name: glance-bootstrap-job
template: services/glance/glance-bootstrap-job.yml.j2
pod:
- name: glance-api-haproxy-configmap
template: services/common/api-haproxy-configmap.yml.j2
vars:
configmap_name: glance-api-haproxy
port_name: glance_api_port
- name: glance-api-pod
template: services/glance/glance-api-pod.yml.j2
- name: glance-registry-haproxy-configmap
template: services/common/api-haproxy-configmap.yml.j2
vars:
configmap_name: glance-registry-haproxy
port_name: glance_registry_port
- name: glance-registry-pod
template: services/glance/glance-registry-pod.yml.j2
- name: nova

7
kolla_kubernetes/service_resources.py

@ -76,6 +76,10 @@ class KollaKubernetesResources(object):
files.append(service_ansible_file)
files.append(os.path.join(kolla_dir,
'ansible/roles/common/defaults/main.yml'))
# FIXME probably should move this stuff into
# ansible/roles/common/defaults/main.yml instead.
files.append(os.path.join(kolla_dir,
'ansible/roles/haproxy/defaults/main.yml'))
# Create the config dict
x = JinjaUtils.merge_configs_to_dict(
@ -84,6 +88,9 @@ class KollaKubernetesResources(object):
# Render values containing nested jinja variables
r = JinjaUtils.dict_self_render(x)
# Add a self referential link so templates can look up things by name.
r['global'] = r
# Update the cache
KollaKubernetesResources._jinja_dict_cache[cache_key] = r
return r

49
services/common/api-haproxy-configmap.yml.j2

@ -0,0 +1,49 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ kolla_kubernetes.template.vars.configmap_name }}
data:
haproxy.cfg: |
global
chroot /var/lib/haproxy
user haproxy
group haproxy
daemon
log /var/lib/kolla/heka/log local0
maxconn 4000
stats socket /var/lib/kolla/haproxy/haproxy.sock
defaults
log global
mode http
option redispatch
option httplog
option forwardfor
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout check 10s
listen api
bind 0.0.0.0:{{ global[kolla_kubernetes.template.vars.port_name] }}
server local-api 127.0.0.1:8080 check inter 2000 rise 2 fall 5
{% if kolla_kubernetes.template.vars.configmap_name == 'nova-api' %}
listen metadata
bind 0.0.0.0:{{ nova_metadata_port }}
server local-meta 127.0.0.1:8081 check inter 2000 rise 2 fall 5
{% endif %}
config.json: |
{
"command": "/usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid",
"config_files": [{
"source": "{{ container_config_directory }}/haproxy.cfg",
"dest": "/etc/haproxy/haproxy.cfg",
"owner": "root",
"perm": "0644"
}]
}

124
services/glance/glance-api-pod.yml.j2

@ -1,25 +1,121 @@
{%- set resourceName = kolla_kubernetes.cli.args.service_name %}
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: glance-api
spec:
replicas: {{ glance_api_replicas }}
selector:
service: glance
type: api
strategy:
{% if glance_backend_ceph == "yes" %}
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
{% else %}
rollingUpdate:
maxSurge: 0
maxUnavailable: 100%
type: RollingUpdate
{% endif %}
template:
metadata:
labels:
service: glance
type: api
annotations:
{% if glance_backend_ceph == "yes" %}
kolla_upgrade: minor_rolling_safe
{% endif %}
#FIXME Once out of alpha, this should be converted to yaml.
#FIXME once all services are converted, bind_host can be dropped here and changed to default to 127.0.0.1 instead of 0.0.0.0.
# The init container overrides the listen address and port to ensure it does not conflict with haproxy and prevent
# other containers from directly accessing the service
pod.alpha.kubernetes.io/init-containers: '[
{
"name": "update-config",
"image": "{{ kolla_toolbox_image_full }}",
"command": [
"/bin/sh",
"-c",
"cp -a /srv/configmap/..data/* /srv/pod-main-config/;
crudini --set /srv/pod-main-config/glance-api.conf DEFAULT bind_host 127.0.0.1;
crudini --set /srv/pod-main-config/glance-api.conf DEFAULT bind_port 8080;"
],
"volumeMounts": [
{
"name": "glance-api-configmap",
"mountPath": "/srv/configmap"
},
{
"name": "pod-main-config",
"mountPath": "/srv/pod-main-config"
}
]
}
]'
spec:
#You've got 2 days to drain or figure out why it won't.
terminationGracePeriodSeconds: {{ 2 * 24 * 60 * 60 }}
containers:
- name: haproxy
image: "{{ haproxy_image_full }}"
command:
- /bin/bash
- -c
- |
kolla_start;
touch /var/lib/kolla-kubernetes/event/shutdown;
readinessProbe:
httpGet:
path: /healthcheck
port: {{ glance_api_port }}
initialDelaySeconds: 5
timeoutSeconds: 5
lifecycle:
preStop:
exec:
#FIXME move script into haproxy container
#NOTE this only works if you arn't doing a haproxy reconfigure too.
#But shouldn't ever have to do that in a setup like this.
command:
- /bin/bash
- -c
- |
kill -USR1 $(</var/run/haproxy.pid);
while true; do sleep 1000; done
volumeMounts:
- mountPath: /var/lib/kolla-kubernetes/event
name: kolla-kubernetes-events
- mountPath: {{ container_config_directory }}
name: glance-haproxy-config
- mountPath: /var/log/kolla/
name: kolla-logs
- mountPath: /etc/localtime
name: etc-localtime
env:
- name: KOLLA_CONFIG_STRATEGY
value: {{ config_strategy }}
ports:
- containerPort: {{ glance_api_port }}
name: glance-api
- name: glance-api
image: "{{ glance_api_image_full }}"
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -c
- while true; do sleep 1; [ -f /var/lib/kolla-kubernetes/event/shutdown ] && break; done
volumeMounts:
- mountPath: /var/lib/kolla-kubernetes/event
name: kolla-kubernetes-events
- mountPath: {{ container_config_directory }}
name: glance-config
name: pod-main-config
{% if glance_backend_ceph != "yes" %}
- mountPath: /var/lib/glance/
name: glance-persistent-storage
{% endif %}
- mountPath: /var/log/kolla/
name: kolla-logs
- mountPath: /etc/localtime
@ -27,20 +123,24 @@ spec:
env:
- name: KOLLA_CONFIG_STRATEGY
value: {{ config_strategy }}
ports:
- containerPort: {{ glance_api_port }}
name: glance-api
volumes:
- name: glance-config
- name: kolla-kubernetes-events
emptyDir: {}
- name: pod-main-config
emptyDir: {}
- name: glance-api-configmap
configMap:
name: glance-api-configmap
- name: glance-haproxy-config
configMap:
name: glance-api-haproxy
{% if glance_backend_ceph != "yes" %}
- name: glance-persistent-storage
persistentVolumeClaim:
claimName: {{ resourceName }}
{% endif %}
- name: etc-localtime
hostPath:
path: /etc/localtime
- name: kolla-logs
emptyDir: {}
metadata:
name: glance-api

2
services/glance/glance-api-service.yml.j2

@ -1,6 +1,8 @@
apiVersion: v1
kind: Service
spec:
externalIPs:
- {{ kolla_kubernetes_external_vip }}
ports:
- port: {{ glance_api_port }}
name: glance-api

4
services/glance/glance-bootstrap-job.yml.j2

@ -84,8 +84,10 @@ spec:
- mountPath: {{ container_config_directory }}
name: glance-api-config
readOnly: true
{% if glance_backend_ceph != "yes" %}
- mountPath: /var/lib/glance/
name: glance-persistent-storage
{% endif %}
- mountPath: /var/log/kolla
name: kolla-logs
- image: "{{ kolla_toolbox_image_full }}"
@ -214,9 +216,11 @@ spec:
- name: glance-api-config
configMap:
name: glance-api-configmap
{% if glance_backend_ceph != "yes" %}
- name: glance-persistent-storage
persistentVolumeClaim:
claimName: {{ resourceName }}
{% endif %}
- name: dev
hostPath:
path: /dev

115
services/glance/glance-registry-pod.yml.j2

@ -1,35 +1,128 @@
apiVersion: v1
kind: ReplicationController
{%- set resourceName = kolla_kubernetes.cli.args.service_name %}
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: glance-registry
spec:
replicas: {{ glance_registry_replicas }}
selector:
service: glance
type: registry
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
service: glance
type: registry
annotations:
kolla_upgrade: minor_rolling_safe
#FIXME Once out of alpha, this should be converted to yaml.
#FIXME once all services are converted, bind_host can be dropped here and changed to default to 127.0.0.1 instead of 0.0.0.0.
# The init container overrides the listen address and port to ensure it does not conflict with haproxy and prevent
# other containers from directly accessing the service
pod.alpha.kubernetes.io/init-containers: '[
{
"name": "update-config",
"image": "{{ kolla_toolbox_image_full }}",
"command": [
"/bin/sh",
"-c",
"cp -a /srv/configmap/..data/* /srv/pod-main-config/;
crudini --set /srv/pod-main-config/glance-registry.conf DEFAULT bind_host 127.0.0.1;
crudini --set /srv/pod-main-config/glance-registry.conf DEFAULT bind_port 8080;"
],
"volumeMounts": [
{
"name": "glance-registry-configmap",
"mountPath": "/srv/configmap"
},
{
"name": "pod-main-config",
"mountPath": "/srv/pod-main-config"
}
]
}
]'
spec:
#You've got 2 days to drain or figure out why it won't.
terminationGracePeriodSeconds: {{ 2 * 24 * 60 * 60 }}
containers:
- name: glance-registry
image: "{{ glance_registry_image_full }}"
- name: haproxy
image: "{{ haproxy_image_full }}"
command:
- /bin/bash
- -c
- |
kolla_start;
touch /var/lib/kolla-kubernetes/event/shutdown;
readinessProbe:
httpGet:
path: /healthcheck
port: {{ glance_registry_port }}
initialDelaySeconds: 5
timeoutSeconds: 5
lifecycle:
preStop:
exec:
#FIXME move script into haproxy container
#NOTE this only works if you arn't doing a haproxy reconfigure too.
#But shouldn't ever have to do that in a setup like this.
command:
- /bin/bash
- -c
- |
kill -USR1 $(</var/run/haproxy.pid);
while true; do sleep 1000; done
volumeMounts:
- mountPath: /var/lib/kolla-kubernetes/event
name: kolla-kubernetes-events
- mountPath: {{ container_config_directory }}
name: glance-config
name: glance-haproxy-config
- mountPath: /var/log/kolla/
name: kolla-logs
- mountPath: /etc/localtime
name: etc-localtime
env:
- name: KOLLA_CONFIG_STRATEGY
value: {{ config_strategy }}
ports:
- containerPort: {{ glance_registry_port }}
name: glance-registry
- name: glance-registry
image: "{{ glance_registry_image_full }}"
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -c
- while true; do sleep 1; [ -f /var/lib/kolla-kubernetes/event/shutdown ] && break; done
volumeMounts:
- mountPath: /var/lib/kolla-kubernetes/event
name: kolla-kubernetes-events
- mountPath: {{ container_config_directory }}
name: pod-main-config
- mountPath: /var/log/kolla/
name: kolla-logs
- mountPath: /etc/localtime
name: etc-localtime
env:
- name: KOLLA_CONFIG_STRATEGY
value: {{ config_strategy }}
volumes:
- name: glance-config
- name: kolla-kubernetes-events
emptyDir: {}
- name: pod-main-config
emptyDir: {}
- name: glance-registry-configmap
configMap:
name: glance-registry-configmap
- name: glance-haproxy-config
configMap:
name: glance-registry-haproxy
- name: etc-localtime
hostPath:
path: /etc/localtime
- name: kolla-logs
emptyDir: {}
metadata:
name: glance-registry

2
services/glance/glance-registry-service.yml.j2

@ -1,6 +1,8 @@
apiVersion: v1
kind: Service
spec:
externalIPs:
- {{ kolla_kubernetes_external_vip }}
ports:
- port: {{ glance_registry_port }}
name: glance-registry

Loading…
Cancel
Save