Merge branch 'master' into nova_chart

This commit is contained in:
Alan Meadows 2017-01-10 15:25:30 -08:00 committed by GitHub
commit fc963e3090
70 changed files with 3115 additions and 217 deletions

View File

@ -1,12 +1,12 @@
.PHONY: ceph bootstrap mariadb keystone memcached rabbitmq common openstack neutron nova maas all clean
.PHONY: ceph bootstrap mariadb keystone memcached rabbitmq common openstack neutron nova cinder heat maas all clean
B64_DIRS := common/secrets
B64_EXCLUDE := $(wildcard common/secrets/*.b64)
CHARTS := ceph mariadb rabbitmq GLANCE memcached keystone glance horizon neutron nova maas openstack
CHARTS := ceph mariadb rabbitmq memcached keystone glance horizon neutron nova cinder heat maas openstack
COMMON_TPL := common/templates/_globals.tpl
all: common ceph bootstrap mariadb rabbitmq memcached keystone glance horizon neutron nova maas openstack
all: common ceph bootstrap mariadb rabbitmq memcached keystone glance horizon neutron nova cinder heat maas openstack
common: build-common
@ -19,6 +19,8 @@ mariadb: build-mariadb
keystone: build-keystone
cinder: build-cinder
horizon: build-horizon
rabbitmq: build-rabbitmq
@ -29,6 +31,8 @@ neutron: build-neutron
nova: build-nova
heat: build-heat
maas: build-maas
memcached: build-memcached
@ -46,4 +50,3 @@ build-%:
if [ -f $*/requirements.yaml ]; then helm dep up $*; fi
helm lint $*
helm package $*

3
cinder/Chart.yaml Normal file
View File

@ -0,0 +1,3 @@
description: A Helm chart for cinder
name: cinder
version: 0.1.0

4
cinder/requirements.yaml Normal file
View File

@ -0,0 +1,4 @@
dependencies:
- name: common
repository: http://localhost:8879/charts
version: 0.1.0

View File

@ -0,0 +1,45 @@
# This file is required because we use a slightly different endpoint layout in
# the values yaml, until we can make this change for all services.
# this function returns the endpoint uri for a service, it takes an tuple
# input in the form: service-type, endpoint-class, port-name. eg:
# { tuple "orchestration" "public" "api" . | include "endpoint_type_lookup_addr" }
# will return the appropriate URI. Once merged this should phase out the above.
{{- define "endpoint_type_lookup_addr" -}}
{{- $type := index . 0 -}}
{{- $endpoint := index . 1 -}}
{{- $port := index . 2 -}}
{{- $context := index . 3 -}}
{{- $endpointMap := index $context.Values.endpoints $type }}
{{- $fqdn := $context.Release.Namespace -}}
{{- if $context.Values.endpoints.fqdn -}}
{{- $fqdn := $context.Values.endpoints.fqdn -}}
{{- end -}}
{{- with $endpointMap -}}
{{- $endpointScheme := .scheme }}
{{- $endpointHost := index .hosts $endpoint | default .hosts.default}}
{{- $endpointPort := index .port $port }}
{{- $endpointPath := .path }}
{{- printf "%s://%s.%s:%1.f%s" $endpointScheme $endpointHost $fqdn $endpointPort $endpointPath | quote -}}
{{- end -}}
{{- end -}}
#-------------------------------
# endpoint name lookup
#-------------------------------
# this function is used in endpoint management templates
# it returns the service type for an openstack service eg:
# { tuple orchestration . | include "ks_endpoint_type" }
# will return "heat"
{{- define "endpoint_name_lookup" -}}
{{- $type := index . 0 -}}
{{- $context := index . 1 -}}
{{- $endpointMap := index $context.Values.endpoints $type }}
{{- $endpointName := index $endpointMap "name" }}
{{- $endpointName | quote -}}
{{- end -}}

View File

@ -0,0 +1,21 @@
#!/bin/bash
set -ex
export HOME=/tmp
ansible localhost -vvv \
-m mysql_db -a "login_host='{{ .Values.database.address }}' \
login_port='{{ .Values.database.port }}' \
login_user='{{ .Values.database.root_user }}' \
login_password='{{ .Values.database.root_password }}' \
name='{{ .Values.database.cinder_database_name }}'"
ansible localhost -vvv \
-m mysql_user -a "login_host='{{ .Values.database.address }}' \
login_port='{{ .Values.database.port }}' \
login_user='{{ .Values.database.root_user }}' \
login_password='{{ .Values.database.root_password }}' \
name='{{ .Values.database.cinder_user }}' \
password='{{ .Values.database.cinder_password }}' \
host='%' \
priv='{{ .Values.database.cinder_database_name }}.*:ALL' \
append_privs='yes'"

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: cinder-bin
data:
db-init.sh: |+
{{ tuple "bin/_db-init.sh.tpl" . | include "template" | indent 4 }}
ks-service.sh: |+
{{- include "common_keystone_service" . | indent 4 }}
ks-endpoints.sh: |+
{{- include "common_keystone_endpoints" . | indent 4 }}
ks-user.sh: |+
{{- include "common_keystone_user" . | indent 4 }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: cinder-etc
data:
cinder.conf: |+
{{ tuple "etc/_cinder.conf.tpl" . | include "template" | indent 4 }}
api-paste.ini: |+
{{ tuple "etc/_cinder-api-paste.ini.tpl" . | include "template" | indent 4 }}
policy.json: |+
{{ tuple "etc/_policy.json.tpl" . | include "template" | indent 4 }}
ceph.conf: |+
{{ tuple "etc/_ceph.conf.tpl" . | include "template" | indent 4 }}
ceph.client.{{ .Values.ceph.cinder_user }}.keyring: |+
{{ tuple "etc/_ceph-cinder.keyring.tpl" . | include "template" | indent 4 }}

View File

@ -0,0 +1,93 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: cinder-api
spec:
replicas: {{ .Values.replicas.api }}
revisionHistoryLimit: {{ .Values.upgrades.revision_history }}
strategy:
type: {{ .Values.upgrades.pod_replacement_strategy }}
{{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }}
rollingUpdate:
maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }}
maxSurge: {{ .Values.upgrades.rolling_update.max_surge }}
{{ end }}
template:
metadata:
labels:
app: cinder-api
annotations:
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }}
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }}
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.api.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.api.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: cinder-api
image: {{ .Values.images.api }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- cinder-api
- --config-dir
- /etc/cinder/conf
ports:
- containerPort: {{ .Values.service.api.port }}
readinessProbe:
tcpSocket:
port: {{ .Values.service.api.port }}
volumeMounts:
- name: pod-etc-cinder
mountPath: /etc/cinder
- name: pod-var-cache-cinder
mountPath: /var/cache/cinder
- name: cinderconf
mountPath: /etc/cinder/conf/cinder.conf
subPath: cinder.conf
readOnly: true
- name: cinderpaste
mountPath: /etc/cinder/api-paste.ini
subPath: api-paste.ini
readOnly: true
- name: cinderpolicy
mountPath: /etc/cinder/policy.json
subPath: policy.json
readOnly: true
volumes:
- name: pod-etc-cinder
emptyDir: {}
- name: pod-var-cache-cinder
emptyDir: {}
- name: cinderconf
configMap:
name: cinder-etc
- name: cinderpaste
configMap:
name: cinder-etc
- name: cinderpolicy
configMap:
name: cinder-etc

View File

@ -0,0 +1,88 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: cinder-scheduler
spec:
replicas: {{ .Values.replicas.scheduler }}
revisionHistoryLimit: {{ .Values.upgrades.revision_history }}
strategy:
type: {{ .Values.upgrades.pod_replacement_strategy }}
{{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }}
rollingUpdate:
maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }}
maxSurge: {{ .Values.upgrades.rolling_update.max_surge }}
{{ end }}
template:
metadata:
labels:
app: cinder-scheduler
annotations:
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }}
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }}
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.scheduler.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.scheduler.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: cinder-scheduler
image: {{ .Values.images.scheduler }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- cinder-scheduler
- --config-dir
- /etc/cinder/conf
volumeMounts:
- name: pod-etc-cinder
mountPath: /etc/cinder
- name: pod-var-cache-cinder
mountPath: /var/cache/cinder
- name: cinderconf
mountPath: /etc/cinder/conf/cinder.conf
subPath: cinder.conf
readOnly: true
- name: cinderpaste
mountPath: /etc/cinder/api-paste.ini
subPath: api-paste.ini
readOnly: true
- name: cinderpolicy
mountPath: /etc/cinder/policy.json
subPath: policy.json
readOnly: true
volumes:
- name: pod-etc-cinder
emptyDir: {}
- name: pod-var-cache-cinder
emptyDir: {}
- name: cinderconf
configMap:
name: cinder-etc
- name: cinderpaste
configMap:
name: cinder-etc
- name: cinderpolicy
configMap:
name: cinder-etc

View File

@ -0,0 +1,84 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: cinder-volume
spec:
replicas: {{ .Values.replicas.volume }}
revisionHistoryLimit: {{ .Values.upgrades.revision_history }}
strategy:
type: {{ .Values.upgrades.pod_replacement_strategy }}
{{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }}
rollingUpdate:
maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }}
maxSurge: {{ .Values.upgrades.rolling_update.max_surge }}
{{ end }}
template:
metadata:
labels:
app: cinder-volume
annotations:
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }}
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }}
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.volume.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: cinder-volume
image: {{ .Values.images.volume }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- cinder-volume
- --config-dir
- /etc/cinder/conf
volumeMounts:
- name: pod-etc-cinder
mountPath: /etc/cinder
- name: pod-var-cache-cinder
mountPath: /var/cache/cinder
- name: cinderconf
mountPath: /etc/cinder/conf/cinder.conf
subPath: cinder.conf
readOnly: true
- name: cephconf
mountPath: /etc/ceph/ceph.conf
subPath: ceph.conf
readOnly: true
- name: cephclientcinderkeyring
mountPath: /etc/ceph/ceph.client.{{ .Values.ceph.cinder_user }}.keyring
subPath: ceph.client.{{ .Values.ceph.cinder_user }}.keyring
readOnly: true
volumes:
- name: pod-etc-cinder
emptyDir: {}
- name: pod-var-cache-cinder
emptyDir: {}
- name: cinderconf
configMap:
name: cinder-etc
- name: cephconf
configMap:
name: cinder-etc
- name: cephclientcinderkeyring
configMap:
name: cinder-etc

View File

@ -0,0 +1,6 @@
[client.{{ .Values.ceph.cinder_user }}]
{{- if .Values.ceph.cinder_keyring }}
key = {{ .Values.ceph.cinder_keyring }}
{{- else }}
key = {{- include "secrets/ceph-client-key" . -}}
{{- end }}

View File

@ -0,0 +1,16 @@
[global]
rgw_thread_pool_size = 1024
rgw_num_rados_handles = 100
{{- if .Values.ceph.monitors }}
[mon]
{{ range .Values.ceph.monitors }}
[mon.{{ . }}]
host = {{ . }}
mon_addr = {{ . }}
{{ end }}
{{- else }}
mon_host = ceph-mon.ceph
{{- end }}
[client]
rbd_cache_enabled = true
rbd_cache_writethrough_until_flush = true

View File

@ -0,0 +1,75 @@
#############
# OpenStack #
#############
[composite:osapi_volume]
use = call:cinder.api:root_app_factory
/: apiversions
/v1: openstack_volume_api_v1
/v2: openstack_volume_api_v2
/v3: openstack_volume_api_v3
[composite:openstack_volume_api_v1]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv1
keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
[composite:openstack_volume_api_v2]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv2
keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
[composite:openstack_volume_api_v3]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv3
keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3
keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3
[filter:request_id]
paste.filter_factory = oslo_middleware.request_id:RequestId.factory
[filter:http_proxy_to_wsgi]
paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
[filter:cors]
paste.filter_factory = oslo_middleware.cors:filter_factory
oslo_config_project = cinder
[filter:faultwrap]
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
[filter:osprofiler]
paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
[filter:noauth]
paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
[filter:sizelimit]
paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory
[app:apiv1]
paste.app_factory = cinder.api.v1.router:APIRouter.factory
[app:apiv2]
paste.app_factory = cinder.api.v2.router:APIRouter.factory
[app:apiv3]
paste.app_factory = cinder.api.v3.router:APIRouter.factory
[pipeline:apiversions]
pipeline = cors http_proxy_to_wsgi faultwrap osvolumeversionapp
[app:osvolumeversionapp]
paste.app_factory = cinder.api.versions:Versions.factory
##########
# Shared #
##########
[filter:keystonecontext]
paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory

View File

@ -0,0 +1,64 @@
[DEFAULT]
debug = {{ .Values.misc.debug }}
use_syslog = False
use_stderr = True
enable_v1_api = false
volume_name_template = %s
osapi_volume_workers = {{ .Values.api.workers }}
osapi_volume_listen = 0.0.0.0
osapi_volume_listen_port = {{ .Values.service.api.port }}
api_paste_config = /etc/cinder/api-paste.ini
glance_api_servers = "{{ .Values.glance.proto }}://{{ .Values.glance.host }}:{{ .Values.glance.port }}"
glance_api_version = {{ .Values.glance.version }}
enabled_backends = {{ include "joinListWithColon" .Values.backends.enabled }}
auth_strategy = keystone
os_region_name = {{ .Values.keystone.cinder_region_name }}
# ensures that our volume worker service-list doesn't
# explode with dead agents from terminated containers
# by pinning the agent identifier
host=cinder-volume-worker
[database]
connection = mysql+pymysql://{{ .Values.database.cinder_user }}:{{ .Values.database.cinder_password }}@{{ .Values.database.address }}:{{ .Values.database.port }}/{{ .Values.database.cinder_database_name }}
max_retries = -1
[keystone_authtoken]
auth_url = {{ .Values.keystone.auth_url }}
auth_type = password
project_domain_name = {{ .Values.keystone.cinder_project_domain }}
user_domain_name = {{ .Values.keystone.cinder_user_domain }}
project_name = {{ .Values.keystone.cinder_project_name }}
username = {{ .Values.keystone.cinder_user }}
password = {{ .Values.keystone.cinder_password }}
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[oslo_messaging_rabbit]
rabbit_userid = {{ .Values.messaging.user }}
rabbit_password = {{ .Values.messaging.password }}
rabbit_ha_queues = true
rabbit_hosts = {{ .Values.messaging.hosts }}
[rbd1]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = {{ .Values.backends.rbd1.pool }}
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
{{- if .Values.backends.rbd1.secret }}
rbd_user = {{ .Values.backends.rbd1.user }}
{{- else }}
rbd_secret_uuid = {{- include "secrets/ceph-client-key" . -}}
{{- end }}
rbd_secret_uuid = {{ .Values.backends.rbd1.secret }}
report_discard_supported = True

View File

@ -0,0 +1,138 @@
{
"context_is_admin": "role:admin",
"admin_or_owner": "is_admin:True or project_id:%(project_id)s",
"default": "rule:admin_or_owner",
"admin_api": "is_admin:True",
"volume:create": "",
"volume:delete": "rule:admin_or_owner",
"volume:get": "rule:admin_or_owner",
"volume:get_all": "rule:admin_or_owner",
"volume:get_volume_metadata": "rule:admin_or_owner",
"volume:create_volume_metadata": "rule:admin_or_owner",
"volume:delete_volume_metadata": "rule:admin_or_owner",
"volume:update_volume_metadata": "rule:admin_or_owner",
"volume:get_volume_admin_metadata": "rule:admin_api",
"volume:update_volume_admin_metadata": "rule:admin_api",
"volume:get_snapshot": "rule:admin_or_owner",
"volume:get_all_snapshots": "rule:admin_or_owner",
"volume:create_snapshot": "rule:admin_or_owner",
"volume:delete_snapshot": "rule:admin_or_owner",
"volume:update_snapshot": "rule:admin_or_owner",
"volume:get_snapshot_metadata": "rule:admin_or_owner",
"volume:delete_snapshot_metadata": "rule:admin_or_owner",
"volume:update_snapshot_metadata": "rule:admin_or_owner",
"volume:extend": "rule:admin_or_owner",
"volume:update_readonly_flag": "rule:admin_or_owner",
"volume:retype": "rule:admin_or_owner",
"volume:update": "rule:admin_or_owner",
"volume_extension:types_manage": "rule:admin_api",
"volume_extension:types_extra_specs": "rule:admin_api",
"volume_extension:access_types_qos_specs_id": "rule:admin_api",
"volume_extension:access_types_extra_specs": "rule:admin_api",
"volume_extension:volume_type_access": "rule:admin_or_owner",
"volume_extension:volume_type_access:addProjectAccess": "rule:admin_api",
"volume_extension:volume_type_access:removeProjectAccess": "rule:admin_api",
"volume_extension:volume_type_encryption": "rule:admin_api",
"volume_extension:volume_encryption_metadata": "rule:admin_or_owner",
"volume_extension:extended_snapshot_attributes": "rule:admin_or_owner",
"volume_extension:volume_image_metadata": "rule:admin_or_owner",
"volume_extension:quotas:show": "",
"volume_extension:quotas:update": "rule:admin_api",
"volume_extension:quotas:delete": "rule:admin_api",
"volume_extension:quota_classes": "rule:admin_api",
"volume_extension:quota_classes:validate_setup_for_nested_quota_use": "rule:admin_api",
"volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
"volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
"volume_extension:backup_admin_actions:reset_status": "rule:admin_api",
"volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
"volume_extension:volume_admin_actions:force_detach": "rule:admin_api",
"volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api",
"volume_extension:backup_admin_actions:force_delete": "rule:admin_api",
"volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api",
"volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api",
"volume_extension:volume_actions:upload_public": "rule:admin_api",
"volume_extension:volume_actions:upload_image": "rule:admin_or_owner",
"volume_extension:volume_host_attribute": "rule:admin_api",
"volume_extension:volume_tenant_attribute": "rule:admin_or_owner",
"volume_extension:volume_mig_status_attribute": "rule:admin_api",
"volume_extension:hosts": "rule:admin_api",
"volume_extension:services:index": "rule:admin_api",
"volume_extension:services:update" : "rule:admin_api",
"volume_extension:volume_manage": "rule:admin_api",
"volume_extension:volume_unmanage": "rule:admin_api",
"volume_extension:list_manageable": "rule:admin_api",
"volume_extension:capabilities": "rule:admin_api",
"volume:create_transfer": "rule:admin_or_owner",
"volume:accept_transfer": "",
"volume:delete_transfer": "rule:admin_or_owner",
"volume:get_transfer": "rule:admin_or_owner",
"volume:get_all_transfers": "rule:admin_or_owner",
"volume_extension:replication:promote": "rule:admin_api",
"volume_extension:replication:reenable": "rule:admin_api",
"volume:failover_host": "rule:admin_api",
"volume:freeze_host": "rule:admin_api",
"volume:thaw_host": "rule:admin_api",
"backup:create" : "",
"backup:delete": "rule:admin_or_owner",
"backup:get": "rule:admin_or_owner",
"backup:get_all": "rule:admin_or_owner",
"backup:restore": "rule:admin_or_owner",
"backup:backup-import": "rule:admin_api",
"backup:backup-export": "rule:admin_api",
"backup:update": "rule:admin_or_owner",
"snapshot_extension:snapshot_actions:update_snapshot_status": "",
"snapshot_extension:snapshot_manage": "rule:admin_api",
"snapshot_extension:snapshot_unmanage": "rule:admin_api",
"snapshot_extension:list_manageable": "rule:admin_api",
"consistencygroup:create" : "group:nobody",
"consistencygroup:delete": "group:nobody",
"consistencygroup:update": "group:nobody",
"consistencygroup:get": "group:nobody",
"consistencygroup:get_all": "group:nobody",
"consistencygroup:create_cgsnapshot" : "group:nobody",
"consistencygroup:delete_cgsnapshot": "group:nobody",
"consistencygroup:get_cgsnapshot": "group:nobody",
"consistencygroup:get_all_cgsnapshots": "group:nobody",
"group:group_types_manage": "rule:admin_api",
"group:group_types_specs": "rule:admin_api",
"group:access_group_types_specs": "rule:admin_api",
"group:group_type_access": "rule:admin_or_owner",
"group:create" : "",
"group:delete": "rule:admin_or_owner",
"group:update": "rule:admin_or_owner",
"group:get": "rule:admin_or_owner",
"group:get_all": "rule:admin_or_owner",
"group:create_group_snapshot": "",
"group:delete_group_snapshot": "rule:admin_or_owner",
"group:update_group_snapshot": "rule:admin_or_owner",
"group:get_group_snapshot": "rule:admin_or_owner",
"group:get_all_group_snapshots": "rule:admin_or_owner",
"scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api",
"message:delete": "rule:admin_or_owner",
"message:get": "rule:admin_or_owner",
"message:get_all": "rule:admin_or_owner",
"clusters:get": "rule:admin_api",
"clusters:get_all": "rule:admin_api",
"clusters:update": "rule:admin_api"
}

View File

@ -0,0 +1,54 @@
apiVersion: batch/v1
kind: Job
metadata:
name: cinder-db-init
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_init.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_init.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
containers:
- name: cinder-db-init
image: {{ .Values.images.db_init | quote }}
imagePullPolicy: {{ .Values.images.pull_policy | quote }}
env:
- name: ANSIBLE_LIBRARY
value: /usr/share/ansible/
command:
- bash
- /tmp/db-init.sh
volumeMounts:
- name: dbinitsh
mountPath: /tmp/db-init.sh
subPath: db-init.sh
readOnly: true
volumes:
- name: dbinitsh
configMap:
name: cinder-bin

View File

@ -0,0 +1,59 @@
apiVersion: batch/v1
kind: Job
metadata:
name: cinder-db-sync
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_sync.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_sync.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
containers:
- name: cinder-db-sync
image: {{ .Values.images.db_sync }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- cinder-manage
args:
- --config-dir
- /etc/cinder/conf
- db
- sync
volumeMounts:
- name: pod-etc-cinder
mountPath: /etc/cinder
- name: cinderconf
mountPath: /etc/cinder/conf/cinder.conf
subPath: cinder.conf
readOnly: true
volumes:
- name: pod-etc-cinder
emptyDir: {}
- name: cinderconf
configMap:
name: cinder-etc

View File

@ -0,0 +1,65 @@
{{- $envAll := . }}
{{- $ksAdminSecret := $envAll.Values.keystone.admin_secret | default "cinder-env-keystone-admin" }}
apiVersion: batch/v1
kind: Job
metadata:
name: cinder-ks-endpoints
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.ks_service.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
containers:
{{- range $key1, $osServiceType := tuple "volume" "volumev2" "volumev3" }}
{{- range $key2, $osServiceEndPoint := tuple "admin" "internal" "public" }}
- name: {{ $osServiceType }}-ks-endpoints-{{ $osServiceEndPoint }}
image: {{ $envAll.Values.images.ks_endpoints }}
imagePullPolicy: {{ $envAll.Values.images.pull_policy }}
command:
- bash
- /tmp/ks-endpoints.sh
volumeMounts:
- name: ks-endpoints-sh
mountPath: /tmp/ks-endpoints.sh
subPath: ks-endpoints.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: OS_SVC_ENDPOINT
value: {{ $osServiceEndPoint }}
- name: OS_SERVICE_NAME
value: {{ tuple $osServiceType $envAll | include "endpoint_name_lookup" }}
- name: OS_SERVICE_TYPE
value: {{ $osServiceType }}
- name: OS_SERVICE_ENDPOINT
value: {{ tuple $osServiceType $osServiceEndPoint "api" $envAll | include "endpoint_type_lookup_addr" }}
{{- end }}
{{- end }}
volumes:
- name: ks-endpoints-sh
configMap:
name: cinder-bin

View File

@ -0,0 +1,59 @@
{{- $envAll := . }}
{{- $ksAdminSecret := .Values.keystone.admin_secret | default "cinder-env-keystone-admin" }}
apiVersion: batch/v1
kind: Job
metadata:
name: cinder-ks-service
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.ks_service.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
containers:
{{- range $key1, $osServiceType := tuple "volume" "volumev2" "volumev3" }}
- name: {{ $osServiceType }}-ks-service-registration
image: {{ $envAll.Values.images.ks_service }}
imagePullPolicy: {{ $envAll.Values.images.pull_policy }}
command:
- bash
- /tmp/ks-service.sh
volumeMounts:
- name: ks-service-sh
mountPath: /tmp/ks-service.sh
subPath: ks-service.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: OS_SERVICE_NAME
value: {{ tuple $osServiceType $envAll | include "endpoint_name_lookup" }}
- name: OS_SERVICE_TYPE
value: {{ $osServiceType }}
{{- end }}
volumes:
- name: ks-service-sh
configMap:
name: cinder-bin

View File

@ -0,0 +1,60 @@
{{- $ksAdminSecret := .Values.keystone.admin_secret | default "cinder-env-keystone-admin" }}
{{- $ksUserSecret := .Values.keystone.user_secret | default "cinder-env-keystone-user" }}
apiVersion: batch/v1
kind: Job
metadata:
name: cinder-ks-user
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.ks_user.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
containers:
- name: cinder-ks-user
image: {{ .Values.images.ks_user }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- bash
- /tmp/ks-user.sh
volumeMounts:
- name: ks-user-sh
mountPath: /tmp/ks-user.sh
subPath: ks-user.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: SERVICE_OS_SERVICE_NAME
value: "cinder"
{{- with $env := dict "ksUserSecret" $ksUserSecret }}
{{- include "env_ks_user_create_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: SERVICE_OS_ROLE
value: {{ .Values.keystone.cinder_user_role | quote }}
volumes:
- name: ks-user-sh
configMap:
name: cinder-bin

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Secret
metadata:
name: cinder-env-keystone-admin
type: Opaque
data:
OS_AUTH_URL: |
{{ .Values.keystone.auth_url | b64enc | indent 4 }}
OS_REGION_NAME: |
{{ .Values.keystone.admin_region_name | b64enc | indent 4 }}
OS_PROJECT_DOMAIN_NAME: |
{{ .Values.keystone.admin_project_domain | b64enc | indent 4 }}
OS_PROJECT_NAME: |
{{ .Values.keystone.admin_project_name | b64enc | indent 4 }}
OS_USER_DOMAIN_NAME: |
{{ .Values.keystone.admin_user_domain | b64enc | indent 4 }}
OS_USERNAME: |
{{ .Values.keystone.admin_user | b64enc | indent 4 }}
OS_PASSWORD: |
{{ .Values.keystone.admin_password | b64enc | indent 4 }}

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Secret
metadata:
name: cinder-env-keystone-user
type: Opaque
data:
OS_AUTH_URL: |
{{ .Values.keystone.auth_url | b64enc | indent 4 }}
OS_REGION_NAME: |
{{ .Values.keystone.cinder_region_name | b64enc | indent 4 }}
OS_PROJECT_DOMAIN_NAME: |
{{ .Values.keystone.cinder_project_domain | b64enc | indent 4 }}
OS_PROJECT_NAME: |
{{ .Values.keystone.cinder_project_name | b64enc | indent 4 }}
OS_USER_DOMAIN_NAME: |
{{ .Values.keystone.cinder_user_domain | b64enc | indent 4 }}
OS_USERNAME: |
{{ .Values.keystone.cinder_user | b64enc | indent 4 }}
OS_PASSWORD: |
{{ .Values.keystone.cinder_password | b64enc | indent 4 }}

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.service.api.name }}
spec:
ports:
- port: {{ .Values.service.api.port }}
selector:
app: cinder-api

177
cinder/values.yaml Normal file
View File

@ -0,0 +1,177 @@
# Default values for keystone.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
replicas:
api: 1
volume: 1
scheduler: 1
labels:
node_selector_key: openstack-control-plane
node_selector_value: enabled
images:
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0
db_init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
db_sync: quay.io/stackanetes/stackanetes-cinder-api:newton
ks_user: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
ks_service: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
ks_endpoints: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
api: quay.io/stackanetes/stackanetes-cinder-api:newton
scheduler: quay.io/stackanetes/stackanetes-cinder-scheduler:newton
volume: quay.io/stackanetes/stackanetes-cinder-volume:newton
pull_policy: "IfNotPresent"
upgrades:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
keystone:
auth_uri: "http://keystone-api:5000"
auth_url: "http://keystone-api:35357"
admin_user: "admin"
admin_user_domain: "default"
admin_password: "password"
admin_project_name: "admin"
admin_project_domain: "default"
admin_region_name: "RegionOne"
cinder_user: "cinder"
cinder_user_domain: "default"
cinder_user_role: "admin"
cinder_password: "password"
cinder_project_name: "service"
cinder_project_domain: "default"
cinder_region_name: "RegionOne"
service:
api:
name: "cinder-api"
port: 8776
proto: "http"
database:
address: mariadb
port: 3306
root_user: root
root_password: password
cinder_database_name: cinder
cinder_password: password
cinder_user: cinder
ceph:
enabled: true
monitors: []
cinder_user: "admin"
# a null value for the keyring will
# attempt to use the key from
# common/secrets/ceph-client-key
cinder_keyring: null
backends:
enabled:
- rbd1
rbd1:
secret: null
user: "admin"
pool: "volumes"
glance:
proto: "http"
host: "glance-api"
port: 9292
version: 2
messaging:
hosts: rabbitmq
user: rabbitmq
password: password
api:
workers: 8
misc:
debug: false
dependencies:
db_init:
jobs:
- mariadb-seed
service:
- mariadb
db_sync:
jobs:
- cinder-db-init
service:
- mariadb
ks_user:
service:
- keystone-api
ks_service:
service:
- keystone-api
ks_endpoints:
jobs:
- cinder-ks-service
service:
- keystone-api
api:
jobs:
- cinder-db-sync
- cinder-ks-user
- cinder-ks-endpoints
service:
- mariadb
- keystone-api
volume:
service:
- keystone-api
- cinder-api
scheduler:
service:
- keystone-api
- cinder-api
# We use a different layout of the endpoints here to account for versioning
# this swaps the service name and type, and should be rolled out to other
# services.
endpoints:
identity:
name: keystone
hosts:
default: keystone-api
path: /v3
scheme: 'http'
port:
admin: 35357
public: 5000
volume:
name: cinder
hosts:
default: cinder-api
path: '/v1/%(tenant_id)s'
scheme: 'http'
port:
api: 8776
volumev2:
name: cinder
hosts:
default: cinder-api
path: '/v2/%(tenant_id)s'
scheme: 'http'
port:
api: 8776
volumev3:
name: cinder
hosts:
default: cinder-api
path: '/v3/%(tenant_id)s'
scheme: 'http'
port:
api: 8776

View File

@ -87,8 +87,51 @@
{{- end -}}
{{- end -}}
# this function returns the endpoint uri for a service, it takes an tuple
# input in the form: service-name, endpoint-class, port-name. eg:
# { tuple "heat" "public" "api" . | include "endpoint_addr_lookup" }
# will return the appropriate URI. Once merged this should phase out the above.
{{- define "endpoint_addr_lookup" -}}
{{- $name := index . 0 -}}
{{- $endpoint := index . 1 -}}
{{- $port := index . 2 -}}
{{- $context := index . 3 -}}
{{- $nameNorm := $name | replace "-" "_" }}
{{- $endpointMap := index $context.Values.endpoints $nameNorm }}
{{- $fqdn := $context.Release.Namespace -}}
{{- if $context.Values.endpoints.fqdn -}}
{{- $fqdn := $context.Values.endpoints.fqdn -}}
{{- end -}}
{{- with $endpointMap -}}
{{- $endpointScheme := .scheme }}
{{- $endpointHost := index .hosts $endpoint | default .hosts.default}}
{{- $endpointPort := index .port $port }}
{{- $endpointPath := .path }}
{{- printf "%s://%s.%s:%1.f%s" $endpointScheme $endpointHost $fqdn $endpointPort $endpointPath | quote -}}
{{- end -}}
{{- end -}}
#-------------------------------
# endpoint type lookup
#-------------------------------
# this function is used in endpoint management templates
# it returns the service type for an openstack service eg:
# { tuple heat . | include "ks_endpoint_type" }
# will return "orchestration"
{{- define "endpoint_type_lookup" -}}
{{- $name := index . 0 -}}
{{- $context := index . 1 -}}
{{- $nameNorm := $name | replace "-" "_" }}
{{- $endpointMap := index $context.Values.endpoints $nameNorm }}
{{- $endpointType := index $endpointMap "type" }}
{{- $endpointType | quote -}}
{{- end -}}
#-------------------------------
# kolla helpers
#-------------------------------
{{ define "keystone_auth" }}{'auth_url':'{{ include "endpoint_keystone_internal" . }}', 'username':'{{ .Values.keystone.admin_user }}','password':'{{ .Values.keystone.admin_password }}','project_name':'{{ .Values.keystone.admin_project_name }}','domain_name':'default'}{{end}}

View File

@ -21,4 +21,3 @@
{{- $wtf := $context.Template.Name | replace $last $name -}}
{{- include $wtf $context | sha256sum | quote -}}
{{- end -}}

View File

@ -0,0 +1,57 @@
{{- define "common_keystone_domain_user" }}
#!/bin/bash
# Copyright 2017 Pete Birley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
# Manage domain
SERVICE_OS_DOMAIN_ID=$(openstack domain create --or-show --enable -f value -c id \
--description="Service Domain for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_DOMAIN_NAME}" \
"${SERVICE_OS_DOMAIN_NAME}")
# Display domain
openstack domain show "${SERVICE_OS_DOMAIN_ID}"
# Manage user
SERVICE_OS_USERID=$(openstack user create --or-show --enable -f value -c id \
--domain="${SERVICE_OS_DOMAIN_ID}" \
--description "Service User for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_DOMAIN_NAME}" \
--password="${SERVICE_OS_PASSWORD}" \
"${SERVICE_OS_USERNAME}")
# Display user
openstack user show "${SERVICE_OS_USERID}"
# Manage role
SERVICE_OS_ROLE_ID=$(openstack role show -f value -c id \
--domain="${SERVICE_OS_DOMAIN_ID}" \
"${SERVICE_OS_ROLE}" || openstack role create -f value -c id \
--domain="${SERVICE_OS_DOMAIN_ID}" \
"${SERVICE_OS_ROLE}" )
# Manage user role assignment
openstack role add \
--domain="${SERVICE_OS_DOMAIN_ID}" \
--user="${SERVICE_OS_USERID}" \
--user-domain="${SERVICE_OS_DOMAIN_ID}" \
"${SERVICE_OS_ROLE_ID}"
# Display user role assignment
openstack role assignment list \
--role="${SERVICE_OS_ROLE_ID}" \
--user-domain="${SERVICE_OS_DOMAIN_ID}" \
--user="${SERVICE_OS_USERID}"
{{- end }}

View File

@ -0,0 +1,65 @@
{{- define "common_keystone_endpoints" }}
#!/bin/bash
# Copyright 2017 Pete Birley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
# Get Service ID
OS_SERVICE_ID=$( openstack service list -f csv --quote none | \
grep ",${OS_SERVICE_NAME},${OS_SERVICE_TYPE}$" | \
sed -e "s/,${OS_SERVICE_NAME},${OS_SERVICE_TYPE}//g" )
# Get Endpoint ID if it exists
OS_ENDPOINT_ID=$( openstack endpoint list -f csv --quote none | \
grep "^[a-z0-9]*,${OS_REGION_NAME},${OS_SERVICE_NAME},${OS_SERVICE_TYPE},True,${OS_SVC_ENDPOINT}," | \
awk -F ',' '{ print $1 }' )
# Making sure only a single endpoint exists for a service within a region
if [ "$(echo $OS_ENDPOINT_ID | wc -w)" -gt "1" ]; then
echo "More than one endpoint found, cleaning up"
for ENDPOINT_ID in $OS_ENDPOINT_ID; do
openstack endpoint delete ${ENDPOINT_ID}
done
unset OS_ENDPOINT_ID
fi
# Determine if Endpoint needs updated
if [[ ${OS_ENDPOINT_ID} ]]; then
OS_ENDPOINT_URL_CURRENT=$(openstack endpoint show ${OS_ENDPOINT_ID} --f value -c url)
if [ "${OS_ENDPOINT_URL_CURRENT}" == "${OS_SERVICE_ENDPOINT}" ]; then
echo "Endpoints Match: no action required"
OS_ENDPOINT_UPDATE="False"
else
echo "Endpoints Dont Match: removing existing entries"
openstack endpoint delete ${OS_ENDPOINT_ID}
OS_ENDPOINT_UPDATE="True"
fi
else
OS_ENDPOINT_UPDATE="True"
fi
# Update Endpoint if required
if [[ "${OS_ENDPOINT_UPDATE}" == "True" ]]; then
OS_ENDPOINT_ID=$( openstack endpoint create -f value -c id \
--region="${OS_REGION_NAME}" \
"${OS_SERVICE_ID}" \
${OS_SVC_ENDPOINT} \
"${OS_SERVICE_ENDPOINT}" )
fi
# Display the Endpoint
openstack endpoint show ${OS_ENDPOINT_ID}
{{- end }}

View File

@ -0,0 +1,37 @@
{{- define "common_keystone_service" }}
#!/bin/bash
# Copyright 2017 Pete Birley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
# Service boilerplate description
OS_SERVICE_DESC="${OS_REGION_NAME}: ${OS_SERVICE_NAME} (${OS_SERVICE_TYPE}) service"
# Get Service ID if it exists
unset OS_SERVICE_ID
OS_SERVICE_ID=$( openstack service list -f csv --quote none | \
grep ",${OS_SERVICE_NAME},${OS_SERVICE_TYPE}$" | \
sed -e "s/,${OS_SERVICE_NAME},${OS_SERVICE_TYPE}//g" )
# If a Service ID was not found, then create the service
if [[ -z ${OS_SERVICE_ID} ]]; then
OS_SERVICE_ID=$(openstack service create -f value -c id \
--name="${OS_SERVICE_NAME}" \
--description "${OS_SERVICE_DESC}" \
--enable \
"${OS_SERVICE_TYPE}")
fi
{{- end }}

View File

@ -0,0 +1,60 @@
{{- define "common_keystone_user" }}
#!/bin/bash
# Copyright 2017 Pete Birley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
# Manage user project
USER_PROJECT_DESC="Service Project for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_PROJECT_DOMAIN_NAME}"
USER_PROJECT_ID=$(openstack project create --or-show --enable -f value -c id \
--domain="${SERVICE_OS_PROJECT_DOMAIN_NAME}" \
--description="${USER_PROJECT_DESC}" \
"${SERVICE_OS_PROJECT_NAME}");
# Display project
openstack project show "${USER_PROJECT_ID}"
# Manage user
USER_DESC="Service User for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_USER_DOMAIN_NAME}/${SERVICE_OS_SERVICE_NAME}"
USER_ID=$(openstack user create --or-show --enable -f value -c id \
--domain="${SERVICE_OS_USER_DOMAIN_NAME}" \
--project-domain="${SERVICE_OS_PROJECT_DOMAIN_NAME}" \
--project="${USER_PROJECT_ID}" \
--description="${USER_DESC}" \
--password="${SERVICE_OS_PASSWORD}" \
"${SERVICE_OS_USERNAME}");
# Display user
openstack user show "${USER_ID}"
# Manage user role
USER_ROLE_ID=$(openstack role create --or-show -f value -c id \
"${SERVICE_OS_ROLE}");
# Manage user role assignment
openstack role add \
--user="${USER_ID}" \
--user-domain="${SERVICE_OS_USER_DOMAIN_NAME}" \
--project-domain="${SERVICE_OS_PROJECT_DOMAIN_NAME}" \
--project="${USER_PROJECT_ID}" \
"${USER_ROLE_ID}"
# Display user role assignment
openstack role assignment list \
--role="${SERVICE_OS_ROLE}" \
--user-domain="${SERVICE_OS_USER_DOMAIN_NAME}" \
--user="${USER_ID}"
{{- end }}

View File

@ -0,0 +1,40 @@
{{- define "env_ks_openrc_tpl" }}
{{- $ksUserSecret := .ksUserSecret }}
- name: OS_IDENTITY_API_VERSION
value: "3"
- name: OS_AUTH_URL
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_AUTH_URL
- name: OS_REGION_NAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_REGION_NAME
- name: OS_PROJECT_DOMAIN_NAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_PROJECT_DOMAIN_NAME
- name: OS_PROJECT_NAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_PROJECT_NAME
- name: OS_USER_DOMAIN_NAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_USER_DOMAIN_NAME
- name: OS_USERNAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_USERNAME
- name: OS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_PASSWORD
{{- end }}

View File

@ -0,0 +1,33 @@
{{- define "env_ks_user_create_openrc_tpl" }}
{{- $ksUserSecret := .ksUserSecret }}
- name: SERVICE_OS_REGION_NAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_REGION_NAME
- name: SERVICE_OS_PROJECT_DOMAIN_NAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_PROJECT_DOMAIN_NAME
- name: SERVICE_OS_PROJECT_NAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_PROJECT_NAME
- name: SERVICE_OS_USER_DOMAIN_NAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_USER_DOMAIN_NAME
- name: SERVICE_OS_USERNAME
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_USERNAME
- name: SERVICE_OS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ $ksUserSecret }}
key: OS_PASSWORD
{{- end }}

View File

@ -1,9 +1,9 @@
# Development of Openstack-Helm
Community development is extremely important to us. As an open source development team, we want the development of Openstack-Helm to be an easy experience. Please evaluate, and make recommendations. We want developers to feel welcomed to contribute to this project. Below are some instructions and suggestions to help you get started.
Community development is extremely important to us. As an open source development team, we want the development of Openstack-Helm to be an easy experience. Please evaluate, and make recommendations. We want developers to feel welcome to contribute to this project. Below are some instructions and suggestions to help you get started.
# Requirements
We've tried to minimize the amount of prerequisites required in order to get started. The main prerequisite is to install the most recent versions of Minikube and Helm.
We've tried to minimize the number of prerequisites required in order to get started. The main prerequisite is to install the most recent versions of Minikube and Helm.
**Kubernetes Minikube:**
Ensure that you have installed a recent version of [Kubernetes/Minikube](http://kubernetes.io/docs/getting-started-guides/minikube/).
@ -75,7 +75,7 @@ kube-system tiller-deploy-3299276078-n98ct 1/1 Running 0
With Helm installed, you will need to start a local [Helm server](https://github.com/kubernetes/helm/blob/7a15ad381eae794a36494084972e350306e498fd/docs/helm/helm_serve.md#helm-serve) (in the background), and point to a locally configured Helm [repository](https://github.com/kubernetes/helm/blob/7a15ad381eae794a36494084972e350306e498fd/docs/helm/helm_repo_index.md#helm-repo-index):
```
$ helm serve . &
$ helm serve &
$ helm repo add local http://localhost:8879/charts
"local" has been added to your repositories
```
@ -107,13 +107,13 @@ Perfect! Youre ready to install, develop, deploy, destroy, and repeat (when n
# Installation and Testing
After following the instructions above you're environment is in a state where you can enhance the current charts, or develop new charts for the project. If you need to make changes to a chart, simply re-run `make` against the project in the top-tier directory. The charts will be updated and automatically re-pushed to your local repository.
After following the instructions above your environment is in a state where you can enhance the current charts, or develop new charts for the project. If you need to make changes to a chart, simply re-run `make` against the project in the top-tier directory. The charts will be updated and automatically re-pushed to your local repository.
Consider the following when using Minikube and development mode:
* Persistent Storage used for Minikube development mode is `hostPath`. The Ceph PVC's included with this project are not intended to work with Minikube.
* There is *no need* to install the `common` `ceph` or `bootstrap` charts. These charts are required for deploying Ceph PVC's.
* Familiarize yourself wtih `values.yaml` included wtih the MariaDB chart. You will will want to have the `hostPath` directory created prior to deploying MariaDB.
* Familiarize yourself with `values.yaml` included with the MariaDB chart. You will want to have the `hostPath` directory created prior to deploying MariaDB.
* If Ceph development is required, you will need to follow the [getting started guide](https://github.com/att-comdev/openstack-helm/blob/master/docs/installation/getting-started.md) rather than this development mode documentation.
To deploy Openstack-Helm in development mode, ensure you've created a minikube-approved `hostPath` volume. Minikube is very specific about what is expected for `hostPath` volumes. The following volumes are acceptable for minikube deployments:
@ -160,14 +160,16 @@ $ helm install --name=memcached local/memcached --namespace=openstack
$ helm install --name=rabbitmq local/rabbitmq --namespace=openstack
$ helm install --name=keystone local/keystone --namespace=openstack
$ helm install --name=horizon local/horizon --namespace=openstack
$ helm install --name=cinder local/cinder --namespace=openstack
$ helm install --name=glance local/glance --namespace=openstack
$ helm install --name=nova local/nova --namespace=openstack
$ helm install --name=neutron local/neutron --namespace=openstack
$ helm install --name=heat local/heat --namespace=openstack
```
# Horizon Management
After each of the chart is deployed, you may wish to change the typical service endpoint for Horizon to a `nodePort` service endpoint (this is unique to Minikube deployments). Use the `kubectl edit` command to edit this service manually.
After each chart is deployed, you may wish to change the typical service endpoint for Horizon to a `nodePort` service endpoint (this is unique to Minikube deployments). Use the `kubectl edit` command to edit this service manually.
```
$ sudo kubectl edit svc horizon -n openstack
@ -201,7 +203,7 @@ status:
```
**Accessing Horizon:**<br>
*Now you're ready to manage Openstack! Point your browser to the following:*<br>
*Now you're ready to manage OpenStack! Point your browser to the following:*<br>
***URL:*** *http://192.168.99.100:31537/* <br>
***User:*** *admin* <br>
***Pass:*** *password* <br>
@ -210,7 +212,7 @@ If you have any questions, comments, or find any bugs, please submit an issue so
# Troubleshooting
In order to protect your general sanity, we've included a currated list of verification and troubleshooting steps that may help you avoid some potential issues while developing Openstack-Helm.
In order to protect your general sanity, we've included a curated list of verification and troubleshooting steps that may help you avoid some potential issues while developing Openstack-Helm.
**MariaDB**<br>
To verify the state of MariaDB, use the following command:

3
heat/Chart.yaml Normal file
View File

@ -0,0 +1,3 @@
description: A Helm chart for heat
name: heat
version: 0.1.0

4
heat/requirements.yaml Normal file
View File

@ -0,0 +1,4 @@
dependencies:
- name: common
repository: http://localhost:8879/charts
version: 0.1.0

View File

@ -0,0 +1,21 @@
#!/bin/bash
set -ex
export HOME=/tmp
ansible localhost -vvv \
-m mysql_db -a "login_host='{{ .Values.database.address }}' \
login_port='{{ .Values.database.port }}' \
login_user='{{ .Values.database.root_user }}' \
login_password='{{ .Values.database.root_password }}' \
name='{{ .Values.database.heat_database_name }}'"
ansible localhost -vvv \
-m mysql_user -a "login_host='{{ .Values.database.address }}' \
login_port='{{ .Values.database.port }}' \
login_user='{{ .Values.database.root_user }}' \
login_password='{{ .Values.database.root_password }}' \
name='{{ .Values.database.heat_user }}' \
password='{{ .Values.database.heat_password }}' \
host='%' \
priv='{{ .Values.database.heat_database_name }}.*:ALL' \
append_privs='yes'"

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: heat-bin
data:
db-init.sh: |+
{{ tuple "bin/_db-init.sh.tpl" . | include "template" | indent 4 }}
ks-service.sh: |+
{{- include "common_keystone_service" . | indent 4 }}
ks-endpoints.sh: |+
{{- include "common_keystone_endpoints" . | indent 4 }}
ks-user.sh: |+
{{- include "common_keystone_user" . | indent 4 }}
ks-domain-user.sh: |+
{{- include "common_keystone_domain_user" . | indent 4 }}

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: heat-etc
data:
heat.conf: |+
{{ tuple "etc/_heat.conf.tpl" . | include "template" | indent 4 }}
api-paste.ini: |+
{{ tuple "etc/_heat-api-paste.ini.tpl" . | include "template" | indent 4 }}
policy.json: |+
{{ tuple "etc/_heat-policy.json.tpl" . | include "template" | indent 4 }}

View File

@ -0,0 +1,83 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heat-api
spec:
replicas: {{ .Values.replicas.api }}
template:
metadata:
labels:
app: heat-api
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.api.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.api.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: heat-api
image: {{ .Values.images.api }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- heat-api
- --config-dir
- /etc/heat/conf
ports:
- containerPort: {{ .Values.service.api.port }}
readinessProbe:
tcpSocket:
port: {{ .Values.service.api.port }}
volumeMounts:
- name: pod-etc-heat
mountPath: /etc/heat
- name: pod-var-cache-heat
mountPath: /var/cache/heat
- name: heatconf
mountPath: /etc/heat/conf/heat.conf
subPath: heat.conf
readOnly: true
- name: heatpaste
mountPath: /etc/heat/api-paste.ini
subPath: api-paste.ini
readOnly: true
- name: heatpolicy
mountPath: /etc/heat/policy.json
subPath: policy.json
readOnly: true
volumes:
- name: pod-etc-heat
emptyDir: {}
- name: pod-var-cache-heat
emptyDir: {}
- name: heatconf
configMap:
name: heat-etc
- name: heatpaste
configMap:
name: heat-etc
- name: heatpolicy
configMap:
name: heat-etc

View File

@ -0,0 +1,83 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heat-cfn
spec:
replicas: {{ .Values.replicas.cfn }}
template:
metadata:
labels:
app: heat-cfn
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.cfn.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.cfn.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: heat-cfn
image: {{ .Values.images.cfn }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- heat-api-cfn
- --config-dir
- /etc/heat/conf
ports:
- containerPort: {{ .Values.service.cfn.port }}
readinessProbe:
tcpSocket:
port: {{ .Values.service.cfn.port }}
volumeMounts:
- name: pod-etc-heat
mountPath: /etc/heat
- name: pod-var-cache-heat
mountPath: /var/cache/heat
- name: heatconf
mountPath: /etc/heat/conf/heat.conf
subPath: heat.conf
readOnly: true
- name: heatpaste
mountPath: /etc/heat/api-paste.ini
subPath: api-paste.ini
readOnly: true
- name: heatpolicy
mountPath: /etc/heat/policy.json
subPath: policy.json
readOnly: true
volumes:
- name: pod-etc-heat
emptyDir: {}
- name: pod-var-cache-heat
emptyDir: {}
- name: heatconf
configMap:
name: heat-etc
- name: heatpaste
configMap:
name: heat-etc
- name: heatpolicy
configMap:
name: heat-etc

View File

@ -0,0 +1,83 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heat-cloudwatch
spec:
replicas: {{ .Values.replicas.cloudwatch }}
template:
metadata:
labels:
app: heat-cloudwatch
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.cloudwatch.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.cloudwatch.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: heat-cloudwatch
image: {{ .Values.images.cloudwatch }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- heat-api-cloudwatch
- --config-dir
- /etc/heat/conf
ports:
- containerPort: {{ .Values.service.cloudwatch.port }}
readinessProbe:
tcpSocket:
port: {{ .Values.service.cloudwatch.port }}
volumeMounts:
- name: pod-etc-heat
mountPath: /etc/heat
- name: pod-var-cache-heat
mountPath: /var/cache/heat
- name: heatconf
mountPath: /etc/heat/conf/heat.conf
subPath: heat.conf
readOnly: true
- name: heatpaste
mountPath: /etc/heat/api-paste.ini
subPath: api-paste.ini
readOnly: true
- name: heatpolicy
mountPath: /etc/heat/policy.json
subPath: policy.json
readOnly: true
volumes:
- name: pod-etc-heat
emptyDir: {}
- name: pod-var-cache-heat
emptyDir: {}
- name: heatconf
configMap:
name: heat-etc
- name: heatpaste
configMap:
name: heat-etc
- name: heatpolicy
configMap:
name: heat-etc

View File

@ -0,0 +1,104 @@
# heat-api pipeline
[pipeline:heat-api]
pipeline = cors request_id faultwrap http_proxy_to_wsgi versionnegotiation osprofiler authurl authtoken context apiv1app
# heat-api pipeline for standalone heat
# ie. uses alternative auth backend that authenticates users against keystone
# using username and password instead of validating token (which requires
# an admin/service token).
# To enable, in heat.conf:
# [paste_deploy]
# flavor = standalone
#
[pipeline:heat-api-standalone]
pipeline = cors request_id faultwrap http_proxy_to_wsgi versionnegotiation authurl authpassword context apiv1app
# heat-api pipeline for custom cloud backends
# i.e. in heat.conf:
# [paste_deploy]
# flavor = custombackend
#
[pipeline:heat-api-custombackend]
pipeline = cors request_id faultwrap versionnegotiation context custombackendauth apiv1app
# heat-api-cfn pipeline
[pipeline:heat-api-cfn]
pipeline = cors cfnversionnegotiation osprofiler ec2authtoken authtoken context apicfnv1app
# heat-api-cfn pipeline for standalone heat
# relies exclusively on authenticating with ec2 signed requests
[pipeline:heat-api-cfn-standalone]
pipeline = cors cfnversionnegotiation ec2authtoken context apicfnv1app
# heat-api-cloudwatch pipeline
[pipeline:heat-api-cloudwatch]
pipeline = cors versionnegotiation osprofiler ec2authtoken authtoken context apicwapp
# heat-api-cloudwatch pipeline for standalone heat
# relies exclusively on authenticating with ec2 signed requests
[pipeline:heat-api-cloudwatch-standalone]
pipeline = cors versionnegotiation ec2authtoken context apicwapp
[app:apiv1app]
paste.app_factory = heat.common.wsgi:app_factory
heat.app_factory = heat.api.openstack.v1:API
[app:apicfnv1app]
paste.app_factory = heat.common.wsgi:app_factory
heat.app_factory = heat.api.cfn.v1:API
[app:apicwapp]
paste.app_factory = heat.common.wsgi:app_factory
heat.app_factory = heat.api.cloudwatch:API
[filter:versionnegotiation]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.openstack:version_negotiation_filter
[filter:cors]
paste.filter_factory = oslo_middleware.cors:filter_factory
oslo_config_project = heat
[filter:faultwrap]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.openstack:faultwrap_filter
[filter:cfnversionnegotiation]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.cfn:version_negotiation_filter
[filter:cwversionnegotiation]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.cloudwatch:version_negotiation_filter
[filter:context]
paste.filter_factory = heat.common.context:ContextMiddleware_filter_factory
[filter:ec2authtoken]
paste.filter_factory = heat.api.aws.ec2token:EC2Token_filter_factory
[filter:http_proxy_to_wsgi]
paste.filter_factory = oslo_middleware:HTTPProxyToWSGI.factory
# Middleware to set auth_url header appropriately
[filter:authurl]
paste.filter_factory = heat.common.auth_url:filter_factory
# Auth middleware that validates token against keystone
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
# Auth middleware that validates username/password against keystone
[filter:authpassword]
paste.filter_factory = heat.common.auth_password:filter_factory
# Auth middleware that validates against custom backend
[filter:custombackendauth]
paste.filter_factory = heat.common.custom_backend_auth:filter_factory
# Middleware to set x-openstack-request-id in http response header
[filter:request_id]
paste.filter_factory = oslo_middleware.request_id:RequestId.factory
[filter:osprofiler]
paste.filter_factory = osprofiler.web:WsgiMiddleware.factory

View File

@ -0,0 +1,96 @@
{
"context_is_admin": "role:admin and is_admin_project:True",
"project_admin": "role:admin",
"deny_stack_user": "not role:heat_stack_user",
"deny_everybody": "!",
"cloudformation:ListStacks": "rule:deny_stack_user",
"cloudformation:CreateStack": "rule:deny_stack_user",
"cloudformation:DescribeStacks": "rule:deny_stack_user",
"cloudformation:DeleteStack": "rule:deny_stack_user",
"cloudformation:UpdateStack": "rule:deny_stack_user",
"cloudformation:CancelUpdateStack": "rule:deny_stack_user",
"cloudformation:DescribeStackEvents": "rule:deny_stack_user",
"cloudformation:ValidateTemplate": "rule:deny_stack_user",
"cloudformation:GetTemplate": "rule:deny_stack_user",
"cloudformation:EstimateTemplateCost": "rule:deny_stack_user",
"cloudformation:DescribeStackResource": "",
"cloudformation:DescribeStackResources": "rule:deny_stack_user",
"cloudformation:ListStackResources": "rule:deny_stack_user",
"cloudwatch:DeleteAlarms": "rule:deny_stack_user",
"cloudwatch:DescribeAlarmHistory": "rule:deny_stack_user",
"cloudwatch:DescribeAlarms": "rule:deny_stack_user",
"cloudwatch:DescribeAlarmsForMetric": "rule:deny_stack_user",
"cloudwatch:DisableAlarmActions": "rule:deny_stack_user",
"cloudwatch:EnableAlarmActions": "rule:deny_stack_user",
"cloudwatch:GetMetricStatistics": "rule:deny_stack_user",
"cloudwatch:ListMetrics": "rule:deny_stack_user",
"cloudwatch:PutMetricAlarm": "rule:deny_stack_user",
"cloudwatch:PutMetricData": "",
"cloudwatch:SetAlarmState": "rule:deny_stack_user",
"actions:action": "rule:deny_stack_user",
"build_info:build_info": "rule:deny_stack_user",
"events:index": "rule:deny_stack_user",
"events:show": "rule:deny_stack_user",
"resource:index": "rule:deny_stack_user",
"resource:metadata": "",
"resource:signal": "",
"resource:mark_unhealthy": "rule:deny_stack_user",
"resource:show": "rule:deny_stack_user",
"stacks:abandon": "rule:deny_stack_user",
"stacks:create": "rule:deny_stack_user",
"stacks:delete": "rule:deny_stack_user",
"stacks:detail": "rule:deny_stack_user",
"stacks:export": "rule:deny_stack_user",
"stacks:generate_template": "rule:deny_stack_user",
"stacks:global_index": "rule:deny_everybody",
"stacks:index": "rule:deny_stack_user",
"stacks:list_resource_types": "rule:deny_stack_user",
"stacks:list_template_versions": "rule:deny_stack_user",
"stacks:list_template_functions": "rule:deny_stack_user",
"stacks:lookup": "",
"stacks:preview": "rule:deny_stack_user",
"stacks:resource_schema": "rule:deny_stack_user",
"stacks:show": "rule:deny_stack_user",
"stacks:template": "rule:deny_stack_user",
"stacks:environment": "rule:deny_stack_user",
"stacks:files": "rule:deny_stack_user",
"stacks:update": "rule:deny_stack_user",
"stacks:update_patch": "rule:deny_stack_user",
"stacks:preview_update": "rule:deny_stack_user",
"stacks:preview_update_patch": "rule:deny_stack_user",
"stacks:validate_template": "rule:deny_stack_user",
"stacks:snapshot": "rule:deny_stack_user",
"stacks:show_snapshot": "rule:deny_stack_user",
"stacks:delete_snapshot": "rule:deny_stack_user",
"stacks:list_snapshots": "rule:deny_stack_user",
"stacks:restore_snapshot": "rule:deny_stack_user",
"stacks:list_outputs": "rule:deny_stack_user",
"stacks:show_output": "rule:deny_stack_user",
"software_configs:global_index": "rule:deny_everybody",
"software_configs:index": "rule:deny_stack_user",
"software_configs:create": "rule:deny_stack_user",
"software_configs:show": "rule:deny_stack_user",
"software_configs:delete": "rule:deny_stack_user",
"software_deployments:index": "rule:deny_stack_user",
"software_deployments:create": "rule:deny_stack_user",
"software_deployments:show": "rule:deny_stack_user",
"software_deployments:update": "rule:deny_stack_user",
"software_deployments:delete": "rule:deny_stack_user",
"software_deployments:metadata": "",
"service:index": "rule:context_is_admin",
"resource_types:OS::Nova::Flavor": "rule:project_admin",
"resource_types:OS::Cinder::EncryptedVolumeType": "rule:project_admin",
"resource_types:OS::Cinder::VolumeType": "rule:project_admin",
"resource_types:OS::Cinder::Quota": "rule:project_admin",
"resource_types:OS::Manila::ShareType": "rule:project_admin",
"resource_types:OS::Neutron::QoSPolicy": "rule:project_admin",
"resource_types:OS::Neutron::QoSBandwidthLimitRule": "rule:project_admin",
"resource_types:OS::Nova::HostAggregate": "rule:project_admin",
"resource_types:OS::Cinder::QoSSpecs": "rule:project_admin"
}

View File

@ -0,0 +1,82 @@
[DEFAULT]
debug = {{ .Values.misc.debug }}
use_syslog = False
use_stderr = True
deferred_auth_method = "trusts"
enable_stack_adopt = "True"
enable_stack_abandon = "True"
heat_metadata_server_url = {{ .Values.service.cfn.proto }}://{{ .Values.service.cfn.name }}:{{ .Values.service.cfn.port }}
heat_waitcondition_server_url = {{ .Values.service.cfn.proto }}://{{ .Values.service.cfn.name }}:{{ .Values.service.cfn.port }}/v1/waitcondition
heat_watch_server_url = {{ .Values.service.cloudwatch.proto }}://{{ .Values.service.cloudwatch.name }}:{{ .Values.service.cloudwatch.port }}
num_engine_workers = {{ .Values.resources.engine.workers }}
stack_user_domain_name = {{ .Values.keystone.heat_stack_user_domain }}
stack_domain_admin = {{ .Values.keystone.heat_stack_user }}
stack_domain_admin_password = {{ .Values.keystone.heat_stack_password }}
trusts_delegated_roles = "Member"
[cache]
enabled = "True"
backend = oslo_cache.memcache_pool
memcache_servers = "{{ .Values.memcached.host }}:{{ .Values.memcached.port }}"
[database]
connection = mysql+pymysql://{{ .Values.database.heat_user }}:{{ .Values.database.heat_password }}@{{ .Values.database.address }}:{{ .Values.database.port }}/{{ .Values.database.heat_database_name }}
max_retries = -1
[keystone_authtoken]
signing_dir = "/var/cache/heat"
memcached_servers = "{{ .Values.memcached.host }}:{{ .Values.memcached.port }}"
auth_version = v3
auth_url = {{ include "endpoint_keystone_internal" . }}
auth_type = password
region_name = {{ .Values.keystone.heat_region_name }}
project_domain_name = {{ .Values.keystone.heat_project_domain }}
project_name = {{ .Values.keystone.heat_project_name }}
user_domain_name = {{ .Values.keystone.heat_user_domain }}
username = {{ .Values.keystone.heat_user }}
password = {{ .Values.keystone.heat_password }}
[heat_api]
bind_port = {{ .Values.service.api.port }}
bind_host = 0.0.0.0
workers = {{ .Values.resources.api.workers }}
[heat_api_cloudwatch]
bind_port = {{ .Values.service.cloudwatch.port }}
bind_host = 0.0.0.0
workers = {{ .Values.resources.cloudwatch.workers }}
[heat_api_cfn]
bind_port = {{ .Values.service.cfn.port }}
bind_host = 0.0.0.0
workers = {{ .Values.resources.cfn.workers }}
[oslo_messaging_rabbit]
rabbit_userid = {{ .Values.messaging.user }}
rabbit_password = {{ .Values.messaging.password }}
rabbit_ha_queues = true
rabbit_hosts = {{ .Values.messaging.hosts }}
[paste_deploy]
config_file = /etc/heat/api-paste.ini
[trustee]
auth_type = "password"
auth_section = "trustee_keystone"
[trustee_keystone]
signing_dir = "/var/cache/heat"
memcached_servers = "{{ .Values.memcached.host }}:{{ .Values.memcached.port }}"
auth_version = v3
auth_url = {{ include "endpoint_keystone_internal" . }}
auth_type = password
region_name = {{ .Values.keystone.heat_trustee_region_name }}
user_domain_name = {{ .Values.keystone.heat_trustee_user_domain }}
username = {{ .Values.keystone.heat_trustee_user }}
password = {{ .Values.keystone.heat_trustee_password }}

View File

@ -0,0 +1,54 @@
apiVersion: batch/v1
kind: Job
metadata:
name: heat-db-init
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_init.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_init.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
containers:
- name: heat-db-init
image: {{ .Values.images.db_init | quote }}
imagePullPolicy: {{ .Values.images.pull_policy | quote }}
env:
- name: ANSIBLE_LIBRARY
value: /usr/share/ansible/
command:
- bash
- /tmp/db-init.sh
volumeMounts:
- name: dbinitsh
mountPath: /tmp/db-init.sh
subPath: db-init.sh
readOnly: true
volumes:
- name: dbinitsh
configMap:
name: heat-bin

View File

@ -0,0 +1,58 @@
apiVersion: batch/v1
kind: Job
metadata:
name: heat-db-sync
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_sync.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_sync.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
containers:
- name: heat-db-sync
image: {{ .Values.images.db_sync }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- heat-manage
args:
- --config-dir
- /etc/heat/conf
- db_sync
volumeMounts:
- name: pod-etc-heat
mountPath: /etc/heat
- name: heatconf
mountPath: /etc/heat/conf/heat.conf
subPath: heat.conf
readOnly: true
volumes:
- name: pod-etc-heat
emptyDir: {}
- name: heatconf
configMap:
name: heat-etc

View File

@ -0,0 +1,65 @@
{{- $envAll := . }}
{{- $ksAdminSecret := .Values.keystone_secrets.admin }}
apiVersion: batch/v1
kind: Job
metadata:
name: heat-ks-endpoints
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.ks_service.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
containers:
{{- range $key1, $osServiceName := tuple "heat" "heat-cfn" }}
{{- range $key2, $osServiceEndPoint := tuple "admin" "internal" "public" }}
- name: {{ $osServiceName }}-ks-endpoints-{{ $osServiceEndPoint }}
image: {{ $envAll.Values.images.ks_endpoints }}
imagePullPolicy: {{ $envAll.Values.images.pull_policy }}
command:
- bash
- /tmp/ks-endpoints.sh
volumeMounts:
- name: ks-endpoints-sh
mountPath: /tmp/ks-endpoints.sh
subPath: ks-endpoints.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: OS_SVC_ENDPOINT
value: {{ $osServiceEndPoint }}
- name: OS_SERVICE_NAME
value: {{ $osServiceName }}
- name: OS_SERVICE_TYPE
value: {{ tuple $osServiceName $envAll | include "endpoint_type_lookup" }}
- name: OS_SERVICE_ENDPOINT
value: {{ tuple $osServiceName $osServiceEndPoint "api" $envAll | include "endpoint_addr_lookup" }}
{{- end }}
{{- end }}
volumes:
- name: ks-endpoints-sh
configMap:
name: heat-bin

View File

@ -0,0 +1,59 @@
{{- $envAll := . }}
{{- $ksAdminSecret := .Values.keystone_secrets.admin }}
apiVersion: batch/v1
kind: Job
metadata:
name: heat-ks-service
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.ks_service.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
containers:
{{- range $key1, $osServiceName := tuple "heat" "heat-cfn" }}
- name: {{ $osServiceName }}-ks-service-registration
image: {{ $envAll.Values.images.ks_service }}
imagePullPolicy: {{ $envAll.Values.images.pull_policy }}
command:
- bash
- /tmp/ks-service.sh
volumeMounts:
- name: ks-service-sh
mountPath: /tmp/ks-service.sh
subPath: ks-service.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: OS_SERVICE_NAME
value: {{ $osServiceName }}
- name: OS_SERVICE_TYPE
value: {{ tuple $osServiceName $envAll | include "endpoint_type_lookup" }}
{{- end }}
volumes:
- name: ks-service-sh
configMap:
name: heat-bin

View File

@ -0,0 +1,124 @@
{{- $ksAdminSecret := .Values.keystone_secrets.admin }}
{{- $ksUserSecret := .Values.keystone_secrets.user }}
# The heat user management job is a bit different from other services as it also needs to create a stack domain and trusts user
{{- $ksTrusteeUserSecret := .Values.keystone_secrets.trustee }}
{{- $ksStackUserSecret := .Values.keystone_secrets.stack }}
apiVersion: batch/v1
kind: Job
metadata:
name: heat-ks-user
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.ks_user.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
containers:
- name: heat-ks-user
image: {{ .Values.images.ks_user }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- bash
- /tmp/ks-user.sh
volumeMounts:
- name: ks-user-sh
mountPath: /tmp/ks-user.sh
subPath: ks-user.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: SERVICE_OS_SERVICE_NAME
value: "heat"
{{- with $env := dict "ksUserSecret" $ksUserSecret }}
{{- include "env_ks_user_create_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: SERVICE_OS_ROLE
value: {{ .Values.keystone.heat_user_role | quote }}
- name: heat-ks-trustee-user
image: {{ .Values.images.ks_user }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- bash
- /tmp/ks-user.sh
volumeMounts:
- name: ks-user-sh
mountPath: /tmp/ks-user.sh
subPath: ks-user.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: SERVICE_OS_SERVICE_NAME
value: "heat"
{{- with $env := dict "ksUserSecret" $ksTrusteeUserSecret }}
{{- include "env_ks_user_create_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: SERVICE_OS_ROLE
value: {{ .Values.keystone.heat_trustee_role | quote }}
- name: heat-ks-domain-user
image: {{ .Values.images.ks_user }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- bash
- /tmp/ks-domain-user.sh
volumeMounts:
- name: ks-user-sh
mountPath: /tmp/ks-domain-user.sh
subPath: ks-domain-user.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: SERVICE_OS_SERVICE_NAME
value: "heat"
- name: SERVICE_OS_REGION_NAME
valueFrom:
secretKeyRef:
name: {{ $ksStackUserSecret }}
key: OS_REGION_NAME
- name: SERVICE_OS_DOMAIN_NAME
valueFrom:
secretKeyRef:
name: {{ $ksStackUserSecret }}
key: OS_DOMAIN_NAME
- name: SERVICE_OS_USERNAME
valueFrom:
secretKeyRef:
name: {{ $ksStackUserSecret }}
key: OS_USERNAME
- name: SERVICE_OS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ $ksStackUserSecret }}
key: OS_PASSWORD
- name: SERVICE_OS_ROLE
value: {{ .Values.keystone.heat_stack_user_role | quote }}
volumes:
- name: ks-user-sh
configMap:
name: heat-bin

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.keystone_secrets.admin }}
type: Opaque
data:
OS_AUTH_URL: |
{{ .Values.keystone.auth_url | b64enc | indent 4 }}
OS_REGION_NAME: |
{{ .Values.keystone.admin_region_name | b64enc | indent 4 }}
OS_PROJECT_DOMAIN_NAME: |
{{ .Values.keystone.admin_project_domain | b64enc | indent 4 }}
OS_PROJECT_NAME: |
{{ .Values.keystone.admin_project_name | b64enc | indent 4 }}
OS_USER_DOMAIN_NAME: |
{{ .Values.keystone.admin_user_domain | b64enc | indent 4 }}
OS_USERNAME: |
{{ .Values.keystone.admin_user | b64enc | indent 4 }}
OS_PASSWORD: |
{{ .Values.keystone.admin_password | b64enc | indent 4 }}

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.keystone_secrets.stack }}
type: Opaque
data:
OS_REGION_NAME: |
{{ .Values.keystone.heat_stack_region_name | b64enc | indent 4 }}
OS_DOMAIN_NAME: |
{{ .Values.keystone.heat_stack_domain | b64enc | indent 4 }}
OS_USERNAME: |
{{ .Values.keystone.heat_stack_user | b64enc | indent 4 }}
OS_PASSWORD: |
{{ .Values.keystone.heat_stack_password | b64enc | indent 4 }}

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.keystone_secrets.trustee }}
type: Opaque
data:
OS_AUTH_URL: |
{{ .Values.keystone.auth_url | b64enc | indent 4 }}
OS_REGION_NAME: |
{{ .Values.keystone.heat_trustee_region_name | b64enc | indent 4 }}
OS_PROJECT_DOMAIN_NAME: |
{{ .Values.keystone.heat_trustee_project_domain | b64enc | indent 4 }}
OS_PROJECT_NAME: |
{{ .Values.keystone.heat_trustee_project_name | b64enc | indent 4 }}
OS_USER_DOMAIN_NAME: |
{{ .Values.keystone.heat_trustee_user_domain | b64enc | indent 4 }}
OS_USERNAME: |
{{ .Values.keystone.heat_trustee_user | b64enc | indent 4 }}
OS_PASSWORD: |
{{ .Values.keystone.heat_trustee_password | b64enc | indent 4 }}

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.keystone_secrets.user }}
type: Opaque
data:
OS_AUTH_URL: |
{{ .Values.keystone.auth_url | b64enc | indent 4 }}
OS_REGION_NAME: |
{{ .Values.keystone.heat_region_name | b64enc | indent 4 }}
OS_PROJECT_DOMAIN_NAME: |
{{ .Values.keystone.heat_project_domain | b64enc | indent 4 }}
OS_PROJECT_NAME: |
{{ .Values.keystone.heat_project_name | b64enc | indent 4 }}
OS_USER_DOMAIN_NAME: |
{{ .Values.keystone.heat_user_domain | b64enc | indent 4 }}
OS_USERNAME: |
{{ .Values.keystone.heat_user | b64enc | indent 4 }}
OS_PASSWORD: |
{{ .Values.keystone.heat_password | b64enc | indent 4 }}

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.service.api.name }}
spec:
ports:
- port: {{ .Values.service.api.port }}
selector:
app: heat-api

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.service.cfn.name }}
spec:
ports:
- port: {{ .Values.service.cfn.port }}
selector:
app: heat-cfn

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.service.cloudwatch.name }}
spec:
ports:
- port: {{ .Values.service.cloudwatch.port }}
selector:
app: heat-cloudwatch

View File

@ -0,0 +1,65 @@
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: heat-engine
spec:
serviceName: heat-engine
replicas: {{ .Values.replicas.engine }}
template:
metadata:
labels:
app: heat-engine
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.engine.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.engine.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: heat-engine
image: {{ .Values.images.engine }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- heat-engine
- --config-dir
- /etc/heat/conf
volumeMounts:
- name: pod-etc-heat
mountPath: /etc/heat
- name: pod-var-cache-heat
mountPath: /var/cache/heat
- name: heatconf
mountPath: /etc/heat/conf/heat.conf
subPath: heat.conf
readOnly: true
volumes:
- name: pod-etc-heat
emptyDir: {}
- name: pod-var-cache-heat
emptyDir: {}
- name: heatconf
configMap:
name: heat-etc

208
heat/values.yaml Normal file
View File

@ -0,0 +1,208 @@
# Default values for keystone.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
replicas:
api: 1
cfn: 1
cloudwatch: 1
engine: 1
labels:
node_selector_key: openstack-control-plane
node_selector_value: enabled
images:
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0
db_init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
db_sync: docker.io/kolla/ubuntu-source-heat-api:3.0.1
ks_user: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
ks_service: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
ks_endpoints: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
api: docker.io/kolla/ubuntu-source-heat-api:3.0.1
cfn: docker.io/kolla/ubuntu-source-heat-api:3.0.1
cloudwatch: docker.io/kolla/ubuntu-source-heat-api:3.0.1
engine: docker.io/kolla/ubuntu-source-heat-engine:3.0.1
pull_policy: "IfNotPresent"
keystone_secrets:
admin: "heat-env-keystone-admin"
user: "heat-env-keystone-user"
trustee: "heat-env-keystone-trustee"
stack: "heat-env-keystone-stack-user"
keystone:
auth_uri: "http://keystone-api:5000"
auth_url: "http://keystone-api:35357"
admin_user: "admin"
admin_user_domain: "default"
admin_password: "password"
admin_project_name: "admin"
admin_project_domain: "default"
admin_region_name: "RegionOne"
heat_user: "heat"
heat_user_domain: "default"
heat_user_role: "admin"
heat_password: "password"
heat_project_name: "service"
heat_project_domain: "default"
heat_region_name: "RegionOne"
heat_trustee_user: "heat-trust"
heat_trustee_user_domain: "default"
heat_trustee_role: "admin"
heat_trustee_password: "password"
heat_trustee_project_name: "service"
heat_trustee_project_domain: "default"
heat_trustee_region_name: "RegionOne"
heat_stack_user: "heat-domain"
heat_stack_domain: "heat"
heat_stack_user_role: "admin"
heat_stack_password: "password"
heat_stack_region_name: "RegionOne"
service:
api:
name: "heat-api"
port: 8004
proto: "http"
cfn:
name: "heat-cfn"
port: 8000
proto: "http"
cloudwatch:
name: "heat-cloudwatch"
port: 8003
proto: "http"
database:
address: mariadb
port: 3306
root_user: root
root_password: password
heat_database_name: heat
heat_password: password
heat_user: heat
messaging:
hosts: rabbitmq
user: rabbitmq
password: password
memcached:
host: memcached
port: 11211
resources:
api:
workers: 8
cfn:
workers: 8
cloudwatch:
workers: 8
engine:
workers: 8
misc:
debug: false
secrets:
keystone_admin:
dependencies:
db_init:
jobs:
- mariadb-seed
service:
- mariadb
db_sync:
jobs:
- heat-db-init
service:
- mariadb
ks_user:
service:
- keystone-api
ks_service:
service:
- keystone-api
ks_endpoints:
jobs:
- heat-ks-service
service:
- keystone-api
api:
jobs:
- heat-db-sync
- heat-ks-user
- heat-ks-endpoints
service:
- keystone-api
- mariadb
cfn:
jobs:
- heat-db-sync
- heat-ks-user
- heat-ks-endpoints
service:
- keystone-api
- mariadb
cloudwatch:
jobs:
- heat-db-sync
- heat-ks-user
- heat-ks-endpoints
service:
- keystone-api
- mariadb
engine:
jobs:
- heat-db-sync
- heat-ks-user
- heat-ks-endpoints
service:
- keystone-api
- mariadb
# typically overriden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
keystone:
hosts:
default: keystone-api
path: /v3
type: identity
scheme: 'http'
port:
admin: 35357
public: 5000
heat:
hosts:
default: heat-api
path: '/v1/%(project_id)s'
type: orchestration
scheme: 'http'
port:
api: 8004
heat_cfn:
hosts:
default: heat-cfn
path: /v1
type: cloudformation
scheme: 'http'
port:
api: 8000
# Cloudwatch does not get an entry in the keystone service catalog
heat_cloudwatch:
hosts:
default: heat-cloudwatch
path: null
type: null
scheme: 'http'
port:
api: 8003

View File

@ -2,6 +2,16 @@
set -x
chown neutron: /run/openvswitch/db.sock
# ensure we can talk to openvswitch or bail early
# this is until we can setup a proper dependency
# on deaemonsets - note that a show is not sufficient
# here, we need to communicate with both the db and vswitchd
# which means we need to do a create action
#
# see https://github.com/att-comdev/openstack-helm/issues/88
timeout 3m neutron-sanity-check --config-file /etc/neutron/neutron.conf --ovsdb_native --nokeepalived_ipv6_support
# determine local-ip dynamically based on interface provided but only if tunnel_types is not null
{{- if .Values.ml2.agent.tunnel_types }}
IP=$(ip a s {{ .Values.network.interface.tunnel | default .Values.network.interface.default}} | grep 'inet ' | awk '{print $2}' | awk -F "/" '{print $1}')

View File

@ -1,166 +0,0 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: neutron-openvswitch
spec:
template:
metadata:
labels:
app: neutron-openvswitch
spec:
nodeSelector:
{{ .Values.labels.ovs.node_selector_key }}: {{ .Values.labels.ovs.node_selector_value }}
securityContext:
runAsUser: 0
dnsPolicy: ClusterFirst
hostNetwork: true
containers:
- name: neutron-openvswitch-agent
image: {{ .Values.images.neutron_openvswitch_agent }}
imagePullPolicy: {{ .Values.images.pull_policy }}
securityContext:
privileged: true
# ensures this container can can see a br-int
# bridge before its marked as ready
readinessProbe:
exec:
command:
- bash
- -c
- 'ovs-vsctl list-br | grep -q br-int'
env:
- name: INTERFACE_NAME
value: {{ .Values.network.interface.openvswitch | default .Values.network.interface.default }}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "bash /tmp/neutron-openvswitch-agent.sh"
- name: DEPENDENCY_JOBS
value: "{{ include "joinListWithColon" .Values.dependencies.openvswitchagent.jobs }}"
- name: DEPENDENCY_SERVICE
value: "{{ include "joinListWithColon" .Values.dependencies.openvswitchagent.service }}"
- name: DEPENDENCY_CONTAINER
value: "{{ include "joinListWithColon" .Values.dependencies.openvswitchagent.container }}"
volumeMounts:
- name: neutronopenvswitchagentsh
mountPath: /tmp/neutron-openvswitch-agent.sh
subPath: neutron-openvswitch-agent.sh
- name: neutronconf
mountPath: /etc/neutron/neutron.conf
subPath: neutron.conf
- name: ml2confini
mountPath: /etc/neutron/plugins/ml2/ml2-conf.ini
subPath: ml2-conf.ini
- name: libmodules
mountPath: /lib/modules
readOnly: true
- name: run
mountPath: /run
- mountPath: /etc/resolv.conf
name: resolvconf
subPath: resolv.conf
- name: openvswitch-db-server
image: {{ .Values.images.openvswitch_db_server }}
imagePullPolicy: {{ .Values.images.pull_policy }}
securityContext:
privileged: true
env:
- name: INTERFACE_NAME
value: {{ .Values.network.interface.openvswitch | default .Values.network.interface.default }}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "bash /tmp/openvswitch-db-server.sh"
volumeMounts:
- name: openvswitchdbserversh
mountPath: /tmp/openvswitch-db-server.sh
subPath: openvswitch-db-server.sh
- mountPath: /etc/resolv.conf
name: resolvconf
subPath: resolv.conf
- name: varlibopenvswitch
mountPath: /var/lib/openvswitch/
- name: run
mountPath: /run
- name: openvswitch-vswitchd
image: {{ .Values.images.openvswitch_vswitchd }}
imagePullPolicy: {{ .Values.images.pull_policy }}
securityContext:
privileged: true
# ensures this container can speak to the ovs database
# successfully before its marked as ready
readinessProbe:
exec:
command:
- /usr/bin/ovs-vsctl
- show
env:
- name: INTERFACE_NAME
value: {{ .Values.network.interface.openvswitch | default .Values.network.interface.default }}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "bash /tmp/openvswitch-vswitchd.sh"
- name: DEPENDENCY_CONTAINER
value: "openvswitch-db-server"
volumeMounts:
- name: openvswitchvswitchdsh
mountPath: /tmp/openvswitch-vswitchd.sh
subPath: openvswitch-vswitchd.sh
- name: openvswitchensureconfiguredsh
mountPath: /tmp/openvswitch-ensure-configured.sh
subPath: openvswitch-ensure-configured.sh
- name: libmodules
mountPath: /lib/modules
readOnly: true
- name: run
mountPath: /run
volumes:
- name: openvswitchdbserversh
configMap:
name: neutron-bin
- name: openvswitchvswitchdsh
configMap:
name: neutron-bin
- name: openvswitchensureconfiguredsh
configMap:
name: neutron-bin
- name: varlibopenvswitch
emptyDir: {}
- name: neutronopenvswitchagentsh
configMap:
name: neutron-bin
- name: neutronconf
configMap:
name: neutron-etc
- name: ml2confini
configMap:
name: neutron-etc
- name: resolvconf
configMap:
name: neutron-etc
- name: libmodules
hostPath:
path: /lib/modules
- name: run
hostPath:
path: /run

View File

@ -0,0 +1,86 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: ovs-agent
spec:
template:
metadata:
labels:
app: ovs-agent
spec:
nodeSelector:
{{ .Values.labels.ovs.node_selector_key }}: {{ .Values.labels.ovs.node_selector_value }}
securityContext:
runAsUser: 0
dnsPolicy: ClusterFirst
hostNetwork: true
containers:
- name: ovs-agent
image: {{ .Values.images.neutron_openvswitch_agent }}
imagePullPolicy: {{ .Values.images.pull_policy }}
securityContext:
privileged: true
# ensures this container can can see a br-int
# bridge before its marked as ready
readinessProbe:
exec:
command:
- bash
- -c
- 'ovs-vsctl list-br | grep -q br-int'
env:
- name: INTERFACE_NAME
value: {{ .Values.network.interface.openvswitch | default .Values.network.interface.default }}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "bash /tmp/neutron-openvswitch-agent.sh"
- name: DEPENDENCY_JOBS
value: "{{ include "joinListWithColon" .Values.dependencies.ovs_agent.jobs }}"
- name: DEPENDENCY_SERVICE
value: "{{ include "joinListWithColon" .Values.dependencies.ovs_agent.service }}"
volumeMounts:
- name: neutronopenvswitchagentsh
mountPath: /tmp/neutron-openvswitch-agent.sh
subPath: neutron-openvswitch-agent.sh
- name: neutronconf
mountPath: /etc/neutron/neutron.conf
subPath: neutron.conf
- name: ml2confini
mountPath: /etc/neutron/plugins/ml2/ml2-conf.ini
subPath: ml2-conf.ini
- name: libmodules
mountPath: /lib/modules
readOnly: true
- name: run
mountPath: /run
- mountPath: /etc/resolv.conf
name: resolvconf
subPath: resolv.conf
volumes:
- name: varlibopenvswitch
emptyDir: {}
- name: neutronopenvswitchagentsh
configMap:
name: neutron-bin
- name: neutronconf
configMap:
name: neutron-etc
- name: ml2confini
configMap:
name: neutron-etc
- name: resolvconf
configMap:
name: neutron-etc
- name: libmodules
hostPath:
path: /lib/modules
- name: run
hostPath:
path: /run

View File

@ -0,0 +1,62 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: ovs-db
spec:
template:
metadata:
labels:
app: ovs-db
spec:
nodeSelector:
{{ .Values.labels.ovs.node_selector_key }}: {{ .Values.labels.ovs.node_selector_value }}
securityContext:
runAsUser: 0
dnsPolicy: ClusterFirst
hostNetwork: true
containers:
- name: ovs-db
image: {{ .Values.images.openvswitch_db_server }}
imagePullPolicy: {{ .Values.images.pull_policy }}
securityContext:
privileged: true
env:
- name: INTERFACE_NAME
value: {{ .Values.network.interface.openvswitch | default .Values.network.interface.default }}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "bash /tmp/openvswitch-db-server.sh"
volumeMounts:
- name: openvswitchdbserversh
mountPath: /tmp/openvswitch-db-server.sh
subPath: openvswitch-db-server.sh
- mountPath: /etc/resolv.conf
name: resolvconf
subPath: resolv.conf
- name: varlibopenvswitch
mountPath: /var/lib/openvswitch/
- name: run
mountPath: /run
volumes:
- name: openvswitchdbserversh
configMap:
name: neutron-bin
- name: varlibopenvswitch
emptyDir: {}
- name: resolvconf
configMap:
name: neutron-etc
- name: libmodules
hostPath:
path: /lib/modules
- name: run
hostPath:
path: /run

View File

@ -0,0 +1,67 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: ovs-vswitchd
spec:
template:
metadata:
labels:
app: ovs-vswitchd
spec:
nodeSelector:
{{ .Values.labels.ovs.node_selector_key }}: {{ .Values.labels.ovs.node_selector_value }}
securityContext:
runAsUser: 0
dnsPolicy: ClusterFirst
hostNetwork: true
containers:
- name: ovs-vswitchd
image: {{ .Values.images.openvswitch_vswitchd }}
imagePullPolicy: {{ .Values.images.pull_policy }}
securityContext:
privileged: true
# ensures this container can speak to the ovs database
# successfully before its marked as ready
readinessProbe:
exec:
command:
- /usr/bin/ovs-vsctl
- show
env:
- name: INTERFACE_NAME
value: {{ .Values.network.interface.openvswitch | default .Values.network.interface.default }}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "bash /tmp/openvswitch-vswitchd.sh"
volumeMounts:
- name: openvswitchvswitchdsh
mountPath: /tmp/openvswitch-vswitchd.sh
subPath: openvswitch-vswitchd.sh
- name: openvswitchensureconfiguredsh
mountPath: /tmp/openvswitch-ensure-configured.sh
subPath: openvswitch-ensure-configured.sh
- name: libmodules
mountPath: /lib/modules
readOnly: true
- name: run
mountPath: /run
volumes:
- name: openvswitchvswitchdsh
configMap:
name: neutron-bin
- name: openvswitchensureconfiguredsh
configMap:
name: neutron-bin
- name: libmodules
hostPath:
path: /lib/modules
- name: run
hostPath:
path: /run

View File

@ -7,16 +7,16 @@ replicas:
server: 1
images:
init: quay.io/stackanetes/stackanetes-kolla-toolbox:barcelona
db_sync: quay.io/stackanetes/stackanetes-neutron-server:barcelona
server: quay.io/stackanetes/stackanetes-neutron-server:barcelona
dhcp: quay.io/stackanetes/stackanetes-neutron-dhcp-agent:barcelona
metadata: quay.io/stackanetes/stackanetes-neutron-metadata-agent:barcelona
l3: quay.io/stackanetes/stackanetes-neutron-l3-agent:barcelona
neutron_openvswitch_agent: quay.io/stackanetes/stackanetes-neutron-openvswitch-agent:barcelona
init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
db_sync: quay.io/stackanetes/stackanetes-neutron-server:newton
server: quay.io/stackanetes/stackanetes-neutron-server:newton
dhcp: quay.io/stackanetes/stackanetes-neutron-dhcp-agent:newton
metadata: quay.io/stackanetes/stackanetes-neutron-metadata-agent:newton
l3: quay.io/stackanetes/stackanetes-neutron-l3-agent:newton
neutron_openvswitch_agent: quay.io/stackanetes/stackanetes-neutron-openvswitch-agent:newton
openvswitch_db_server: quay.io/attcomdev/openvswitch-vswitchd:latest
openvswitch_vswitchd: quay.io/attcomdev/openvswitch-vswitchd:latest
post: quay.io/stackanetes/stackanetes-kolla-toolbox:barcelona
post: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
entrypoint: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0
pull_policy: "IfNotPresent"
@ -133,7 +133,6 @@ dependencies:
server:
jobs:
- neutron-db-sync
- mariadb-seed
service:
- rabbitmq
- mariadb
@ -148,18 +147,17 @@ dependencies:
- neutron-init
- nova-post
daemonset:
- neutron-openvswitch
- ovs-agent
metadata:
service:
- rabbitmq
- nova-api
jobs:
- neutron-init
- nova-post
service:
- neutron-server
- rabbitmq
- nova-api
daemonset:
- neutron-openvswitch
openvswitchagent:
- ovs-agent
ovs_agent:
jobs:
- neutron-post
- nova-post
@ -167,24 +165,20 @@ dependencies:
- keystone-api
- rabbitmq
- neutron-server
container:
- openvswitch-db-server
- openvswitch-vswitchd
l3:
jobs:
- nova-init
- neutron-init
- nova-post
service:
- neutron-server
- rabbitmq
- nova-api
jobs:
- nova-init
- neutron-init
- nova-post
daemonset:
- neutron-openvswitch
- ovs-agent
db_sync:
jobs:
- neutron-init
- mariadb-seed
service:
- mariadb
init:
@ -193,11 +187,10 @@ dependencies:
service:
- mariadb
post:
jobs:
- neutron-db-sync
service:
- keystone-api
- neutron-server
jobs:
- neutron-db-sync
# typically overriden by environmental
# values, but should include all endpoints