Merge pull request #78 from portdirect/cinder

OpenStack Cinder Initial Commit
This commit is contained in:
Alan Meadows 2017-01-10 11:18:25 -08:00 committed by GitHub
commit bab88b829e
25 changed files with 1195 additions and 3 deletions

View File

@ -1,12 +1,13 @@
.PHONY: ceph bootstrap mariadb keystone memcached rabbitmq common openstack neutron heat maas all clean
.PHONY: ceph bootstrap mariadb keystone memcached rabbitmq common openstack neutron cinder heat maas all clean
B64_DIRS := common/secrets
B64_EXCLUDE := $(wildcard common/secrets/*.b64)
CHARTS := ceph mariadb rabbitmq GLANCE memcached keystone glance horizon neutron heat maas openstack
CHARTS := ceph mariadb rabbitmq memcached keystone glance horizon neutron cinder heat maas openstack
COMMON_TPL := common/templates/_globals.tpl
all: common ceph bootstrap mariadb rabbitmq memcached keystone glance horizon neutron heat maas openstack
all: common ceph bootstrap mariadb rabbitmq memcached keystone glance horizon neutron cinder heat maas openstack
common: build-common
@ -19,6 +20,8 @@ mariadb: build-mariadb
keystone: build-keystone
cinder: build-cinder
horizon: build-horizon
rabbitmq: build-rabbitmq

3
cinder/Chart.yaml Normal file
View File

@ -0,0 +1,3 @@
description: A Helm chart for cinder
name: cinder
version: 0.1.0

4
cinder/requirements.yaml Normal file
View File

@ -0,0 +1,4 @@
dependencies:
- name: common
repository: http://localhost:8879/charts
version: 0.1.0

View File

@ -0,0 +1,45 @@
# This file is required because we use a slightly different endpoint layout in
# the values yaml, until we can make this change for all services.
# this function returns the endpoint uri for a service, it takes an tuple
# input in the form: service-type, endpoint-class, port-name. eg:
# { tuple "orchestration" "public" "api" . | include "endpoint_type_lookup_addr" }
# will return the appropriate URI. Once merged this should phase out the above.
{{- define "endpoint_type_lookup_addr" -}}
{{- $type := index . 0 -}}
{{- $endpoint := index . 1 -}}
{{- $port := index . 2 -}}
{{- $context := index . 3 -}}
{{- $endpointMap := index $context.Values.endpoints $type }}
{{- $fqdn := $context.Release.Namespace -}}
{{- if $context.Values.endpoints.fqdn -}}
{{- $fqdn := $context.Values.endpoints.fqdn -}}
{{- end -}}
{{- with $endpointMap -}}
{{- $endpointScheme := .scheme }}
{{- $endpointHost := index .hosts $endpoint | default .hosts.default}}
{{- $endpointPort := index .port $port }}
{{- $endpointPath := .path }}
{{- printf "%s://%s.%s:%1.f%s" $endpointScheme $endpointHost $fqdn $endpointPort $endpointPath | quote -}}
{{- end -}}
{{- end -}}
#-------------------------------
# endpoint name lookup
#-------------------------------
# this function is used in endpoint management templates
# it returns the service type for an openstack service eg:
# { tuple orchestration . | include "ks_endpoint_type" }
# will return "heat"
{{- define "endpoint_name_lookup" -}}
{{- $type := index . 0 -}}
{{- $context := index . 1 -}}
{{- $endpointMap := index $context.Values.endpoints $type }}
{{- $endpointName := index $endpointMap "name" }}
{{- $endpointName | quote -}}
{{- end -}}

View File

@ -0,0 +1,21 @@
#!/bin/bash
set -ex
export HOME=/tmp
ansible localhost -vvv \
-m mysql_db -a "login_host='{{ .Values.database.address }}' \
login_port='{{ .Values.database.port }}' \
login_user='{{ .Values.database.root_user }}' \
login_password='{{ .Values.database.root_password }}' \
name='{{ .Values.database.cinder_database_name }}'"
ansible localhost -vvv \
-m mysql_user -a "login_host='{{ .Values.database.address }}' \
login_port='{{ .Values.database.port }}' \
login_user='{{ .Values.database.root_user }}' \
login_password='{{ .Values.database.root_password }}' \
name='{{ .Values.database.cinder_user }}' \
password='{{ .Values.database.cinder_password }}' \
host='%' \
priv='{{ .Values.database.cinder_database_name }}.*:ALL' \
append_privs='yes'"

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: cinder-bin
data:
db-init.sh: |+
{{ tuple "bin/_db-init.sh.tpl" . | include "template" | indent 4 }}
ks-service.sh: |+
{{- include "common_keystone_service" . | indent 4 }}
ks-endpoints.sh: |+
{{- include "common_keystone_endpoints" . | indent 4 }}
ks-user.sh: |+
{{- include "common_keystone_user" . | indent 4 }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: cinder-etc
data:
cinder.conf: |+
{{ tuple "etc/_cinder.conf.tpl" . | include "template" | indent 4 }}
api-paste.ini: |+
{{ tuple "etc/_cinder-api-paste.ini.tpl" . | include "template" | indent 4 }}
policy.json: |+
{{ tuple "etc/_policy.json.tpl" . | include "template" | indent 4 }}
ceph.conf: |+
{{ tuple "etc/_ceph.conf.tpl" . | include "template" | indent 4 }}
ceph.client.{{ .Values.ceph.cinder_user }}.keyring: |+
{{ tuple "etc/_ceph-cinder.keyring.tpl" . | include "template" | indent 4 }}

View File

@ -0,0 +1,93 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: cinder-api
spec:
replicas: {{ .Values.replicas.api }}
revisionHistoryLimit: {{ .Values.upgrades.revision_history }}
strategy:
type: {{ .Values.upgrades.pod_replacement_strategy }}
{{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }}
rollingUpdate:
maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }}
maxSurge: {{ .Values.upgrades.rolling_update.max_surge }}
{{ end }}
template:
metadata:
labels:
app: cinder-api
annotations:
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }}
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }}
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.api.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.api.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: cinder-api
image: {{ .Values.images.api }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- cinder-api
- --config-dir
- /etc/cinder/conf
ports:
- containerPort: {{ .Values.service.api.port }}
readinessProbe:
tcpSocket:
port: {{ .Values.service.api.port }}
volumeMounts:
- name: pod-etc-cinder
mountPath: /etc/cinder
- name: pod-var-cache-cinder
mountPath: /var/cache/cinder
- name: cinderconf
mountPath: /etc/cinder/conf/cinder.conf
subPath: cinder.conf
readOnly: true
- name: cinderpaste
mountPath: /etc/cinder/api-paste.ini
subPath: api-paste.ini
readOnly: true
- name: cinderpolicy
mountPath: /etc/cinder/policy.json
subPath: policy.json
readOnly: true
volumes:
- name: pod-etc-cinder
emptyDir: {}
- name: pod-var-cache-cinder
emptyDir: {}
- name: cinderconf
configMap:
name: cinder-etc
- name: cinderpaste
configMap:
name: cinder-etc
- name: cinderpolicy
configMap:
name: cinder-etc

View File

@ -0,0 +1,88 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: cinder-scheduler
spec:
replicas: {{ .Values.replicas.scheduler }}
revisionHistoryLimit: {{ .Values.upgrades.revision_history }}
strategy:
type: {{ .Values.upgrades.pod_replacement_strategy }}
{{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }}
rollingUpdate:
maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }}
maxSurge: {{ .Values.upgrades.rolling_update.max_surge }}
{{ end }}
template:
metadata:
labels:
app: cinder-scheduler
annotations:
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }}
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }}
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.scheduler.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.scheduler.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: cinder-scheduler
image: {{ .Values.images.scheduler }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- cinder-scheduler
- --config-dir
- /etc/cinder/conf
volumeMounts:
- name: pod-etc-cinder
mountPath: /etc/cinder
- name: pod-var-cache-cinder
mountPath: /var/cache/cinder
- name: cinderconf
mountPath: /etc/cinder/conf/cinder.conf
subPath: cinder.conf
readOnly: true
- name: cinderpaste
mountPath: /etc/cinder/api-paste.ini
subPath: api-paste.ini
readOnly: true
- name: cinderpolicy
mountPath: /etc/cinder/policy.json
subPath: policy.json
readOnly: true
volumes:
- name: pod-etc-cinder
emptyDir: {}
- name: pod-var-cache-cinder
emptyDir: {}
- name: cinderconf
configMap:
name: cinder-etc
- name: cinderpaste
configMap:
name: cinder-etc
- name: cinderpolicy
configMap:
name: cinder-etc

View File

@ -0,0 +1,84 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: cinder-volume
spec:
replicas: {{ .Values.replicas.volume }}
revisionHistoryLimit: {{ .Values.upgrades.revision_history }}
strategy:
type: {{ .Values.upgrades.pod_replacement_strategy }}
{{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }}
rollingUpdate:
maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }}
maxSurge: {{ .Values.upgrades.rolling_update.max_surge }}
{{ end }}
template:
metadata:
labels:
app: cinder-volume
annotations:
configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "hash" }}
configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "hash" }}
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.volume.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: cinder-volume
image: {{ .Values.images.volume }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- cinder-volume
- --config-dir
- /etc/cinder/conf
volumeMounts:
- name: pod-etc-cinder
mountPath: /etc/cinder
- name: pod-var-cache-cinder
mountPath: /var/cache/cinder
- name: cinderconf
mountPath: /etc/cinder/conf/cinder.conf
subPath: cinder.conf
readOnly: true
- name: cephconf
mountPath: /etc/ceph/ceph.conf
subPath: ceph.conf
readOnly: true
- name: cephclientcinderkeyring
mountPath: /etc/ceph/ceph.client.{{ .Values.ceph.cinder_user }}.keyring
subPath: ceph.client.{{ .Values.ceph.cinder_user }}.keyring
readOnly: true
volumes:
- name: pod-etc-cinder
emptyDir: {}
- name: pod-var-cache-cinder
emptyDir: {}
- name: cinderconf
configMap:
name: cinder-etc
- name: cephconf
configMap:
name: cinder-etc
- name: cephclientcinderkeyring
configMap:
name: cinder-etc

View File

@ -0,0 +1,6 @@
[client.{{ .Values.ceph.cinder_user }}]
{{- if .Values.ceph.cinder_keyring }}
key = {{ .Values.ceph.cinder_keyring }}
{{- else }}
key = {{- include "secrets/ceph-client-key" . -}}
{{- end }}

View File

@ -0,0 +1,16 @@
[global]
rgw_thread_pool_size = 1024
rgw_num_rados_handles = 100
{{- if .Values.ceph.monitors }}
[mon]
{{ range .Values.ceph.monitors }}
[mon.{{ . }}]
host = {{ . }}
mon_addr = {{ . }}
{{ end }}
{{- else }}
mon_host = ceph-mon.ceph
{{- end }}
[client]
rbd_cache_enabled = true
rbd_cache_writethrough_until_flush = true

View File

@ -0,0 +1,75 @@
#############
# OpenStack #
#############
[composite:osapi_volume]
use = call:cinder.api:root_app_factory
/: apiversions
/v1: openstack_volume_api_v1
/v2: openstack_volume_api_v2
/v3: openstack_volume_api_v3
[composite:openstack_volume_api_v1]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv1
keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
[composite:openstack_volume_api_v2]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv2
keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
[composite:openstack_volume_api_v3]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv3
keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3
keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3
[filter:request_id]
paste.filter_factory = oslo_middleware.request_id:RequestId.factory
[filter:http_proxy_to_wsgi]
paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
[filter:cors]
paste.filter_factory = oslo_middleware.cors:filter_factory
oslo_config_project = cinder
[filter:faultwrap]
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
[filter:osprofiler]
paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
[filter:noauth]
paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
[filter:sizelimit]
paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory
[app:apiv1]
paste.app_factory = cinder.api.v1.router:APIRouter.factory
[app:apiv2]
paste.app_factory = cinder.api.v2.router:APIRouter.factory
[app:apiv3]
paste.app_factory = cinder.api.v3.router:APIRouter.factory
[pipeline:apiversions]
pipeline = cors http_proxy_to_wsgi faultwrap osvolumeversionapp
[app:osvolumeversionapp]
paste.app_factory = cinder.api.versions:Versions.factory
##########
# Shared #
##########
[filter:keystonecontext]
paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory

View File

@ -0,0 +1,64 @@
[DEFAULT]
debug = {{ .Values.misc.debug }}
use_syslog = False
use_stderr = True
enable_v1_api = false
volume_name_template = %s
osapi_volume_workers = {{ .Values.api.workers }}
osapi_volume_listen = 0.0.0.0
osapi_volume_listen_port = {{ .Values.service.api.port }}
api_paste_config = /etc/cinder/api-paste.ini
glance_api_servers = "{{ .Values.glance.proto }}://{{ .Values.glance.host }}:{{ .Values.glance.port }}"
glance_api_version = {{ .Values.glance.version }}
enabled_backends = {{ include "joinListWithColon" .Values.backends.enabled }}
auth_strategy = keystone
os_region_name = {{ .Values.keystone.cinder_region_name }}
# ensures that our volume worker service-list doesn't
# explode with dead agents from terminated containers
# by pinning the agent identifier
host=cinder-volume-worker
[database]
connection = mysql+pymysql://{{ .Values.database.cinder_user }}:{{ .Values.database.cinder_password }}@{{ .Values.database.address }}:{{ .Values.database.port }}/{{ .Values.database.cinder_database_name }}
max_retries = -1
[keystone_authtoken]
auth_url = {{ .Values.keystone.auth_url }}
auth_type = password
project_domain_name = {{ .Values.keystone.cinder_project_domain }}
user_domain_name = {{ .Values.keystone.cinder_user_domain }}
project_name = {{ .Values.keystone.cinder_project_name }}
username = {{ .Values.keystone.cinder_user }}
password = {{ .Values.keystone.cinder_password }}
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[oslo_messaging_rabbit]
rabbit_userid = {{ .Values.messaging.user }}
rabbit_password = {{ .Values.messaging.password }}
rabbit_ha_queues = true
rabbit_hosts = {{ .Values.messaging.hosts }}
[rbd1]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = {{ .Values.backends.rbd1.pool }}
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
{{- if .Values.backends.rbd1.secret }}
rbd_user = {{ .Values.backends.rbd1.user }}
{{- else }}
rbd_secret_uuid = {{- include "secrets/ceph-client-key" . -}}
{{- end }}
rbd_secret_uuid = {{ .Values.backends.rbd1.secret }}
report_discard_supported = True

View File

@ -0,0 +1,138 @@
{
"context_is_admin": "role:admin",
"admin_or_owner": "is_admin:True or project_id:%(project_id)s",
"default": "rule:admin_or_owner",
"admin_api": "is_admin:True",
"volume:create": "",
"volume:delete": "rule:admin_or_owner",
"volume:get": "rule:admin_or_owner",
"volume:get_all": "rule:admin_or_owner",
"volume:get_volume_metadata": "rule:admin_or_owner",
"volume:create_volume_metadata": "rule:admin_or_owner",
"volume:delete_volume_metadata": "rule:admin_or_owner",
"volume:update_volume_metadata": "rule:admin_or_owner",
"volume:get_volume_admin_metadata": "rule:admin_api",
"volume:update_volume_admin_metadata": "rule:admin_api",
"volume:get_snapshot": "rule:admin_or_owner",
"volume:get_all_snapshots": "rule:admin_or_owner",
"volume:create_snapshot": "rule:admin_or_owner",
"volume:delete_snapshot": "rule:admin_or_owner",
"volume:update_snapshot": "rule:admin_or_owner",
"volume:get_snapshot_metadata": "rule:admin_or_owner",
"volume:delete_snapshot_metadata": "rule:admin_or_owner",
"volume:update_snapshot_metadata": "rule:admin_or_owner",
"volume:extend": "rule:admin_or_owner",
"volume:update_readonly_flag": "rule:admin_or_owner",
"volume:retype": "rule:admin_or_owner",
"volume:update": "rule:admin_or_owner",
"volume_extension:types_manage": "rule:admin_api",
"volume_extension:types_extra_specs": "rule:admin_api",
"volume_extension:access_types_qos_specs_id": "rule:admin_api",
"volume_extension:access_types_extra_specs": "rule:admin_api",
"volume_extension:volume_type_access": "rule:admin_or_owner",
"volume_extension:volume_type_access:addProjectAccess": "rule:admin_api",
"volume_extension:volume_type_access:removeProjectAccess": "rule:admin_api",
"volume_extension:volume_type_encryption": "rule:admin_api",
"volume_extension:volume_encryption_metadata": "rule:admin_or_owner",
"volume_extension:extended_snapshot_attributes": "rule:admin_or_owner",
"volume_extension:volume_image_metadata": "rule:admin_or_owner",
"volume_extension:quotas:show": "",
"volume_extension:quotas:update": "rule:admin_api",
"volume_extension:quotas:delete": "rule:admin_api",
"volume_extension:quota_classes": "rule:admin_api",
"volume_extension:quota_classes:validate_setup_for_nested_quota_use": "rule:admin_api",
"volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
"volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
"volume_extension:backup_admin_actions:reset_status": "rule:admin_api",
"volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
"volume_extension:volume_admin_actions:force_detach": "rule:admin_api",
"volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api",
"volume_extension:backup_admin_actions:force_delete": "rule:admin_api",
"volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api",
"volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api",
"volume_extension:volume_actions:upload_public": "rule:admin_api",
"volume_extension:volume_actions:upload_image": "rule:admin_or_owner",
"volume_extension:volume_host_attribute": "rule:admin_api",
"volume_extension:volume_tenant_attribute": "rule:admin_or_owner",
"volume_extension:volume_mig_status_attribute": "rule:admin_api",
"volume_extension:hosts": "rule:admin_api",
"volume_extension:services:index": "rule:admin_api",
"volume_extension:services:update" : "rule:admin_api",
"volume_extension:volume_manage": "rule:admin_api",
"volume_extension:volume_unmanage": "rule:admin_api",
"volume_extension:list_manageable": "rule:admin_api",
"volume_extension:capabilities": "rule:admin_api",
"volume:create_transfer": "rule:admin_or_owner",
"volume:accept_transfer": "",
"volume:delete_transfer": "rule:admin_or_owner",
"volume:get_transfer": "rule:admin_or_owner",
"volume:get_all_transfers": "rule:admin_or_owner",
"volume_extension:replication:promote": "rule:admin_api",
"volume_extension:replication:reenable": "rule:admin_api",
"volume:failover_host": "rule:admin_api",
"volume:freeze_host": "rule:admin_api",
"volume:thaw_host": "rule:admin_api",
"backup:create" : "",
"backup:delete": "rule:admin_or_owner",
"backup:get": "rule:admin_or_owner",
"backup:get_all": "rule:admin_or_owner",
"backup:restore": "rule:admin_or_owner",
"backup:backup-import": "rule:admin_api",
"backup:backup-export": "rule:admin_api",
"backup:update": "rule:admin_or_owner",
"snapshot_extension:snapshot_actions:update_snapshot_status": "",
"snapshot_extension:snapshot_manage": "rule:admin_api",
"snapshot_extension:snapshot_unmanage": "rule:admin_api",
"snapshot_extension:list_manageable": "rule:admin_api",
"consistencygroup:create" : "group:nobody",
"consistencygroup:delete": "group:nobody",
"consistencygroup:update": "group:nobody",
"consistencygroup:get": "group:nobody",
"consistencygroup:get_all": "group:nobody",
"consistencygroup:create_cgsnapshot" : "group:nobody",
"consistencygroup:delete_cgsnapshot": "group:nobody",
"consistencygroup:get_cgsnapshot": "group:nobody",
"consistencygroup:get_all_cgsnapshots": "group:nobody",
"group:group_types_manage": "rule:admin_api",
"group:group_types_specs": "rule:admin_api",
"group:access_group_types_specs": "rule:admin_api",
"group:group_type_access": "rule:admin_or_owner",
"group:create" : "",
"group:delete": "rule:admin_or_owner",
"group:update": "rule:admin_or_owner",
"group:get": "rule:admin_or_owner",
"group:get_all": "rule:admin_or_owner",
"group:create_group_snapshot": "",
"group:delete_group_snapshot": "rule:admin_or_owner",
"group:update_group_snapshot": "rule:admin_or_owner",
"group:get_group_snapshot": "rule:admin_or_owner",
"group:get_all_group_snapshots": "rule:admin_or_owner",
"scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api",
"message:delete": "rule:admin_or_owner",
"message:get": "rule:admin_or_owner",
"message:get_all": "rule:admin_or_owner",
"clusters:get": "rule:admin_api",
"clusters:get_all": "rule:admin_api",
"clusters:update": "rule:admin_api"
}

View File

@ -0,0 +1,54 @@
apiVersion: batch/v1
kind: Job
metadata:
name: cinder-db-init
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_init.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_init.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
containers:
- name: cinder-db-init
image: {{ .Values.images.db_init | quote }}
imagePullPolicy: {{ .Values.images.pull_policy | quote }}
env:
- name: ANSIBLE_LIBRARY
value: /usr/share/ansible/
command:
- bash
- /tmp/db-init.sh
volumeMounts:
- name: dbinitsh
mountPath: /tmp/db-init.sh
subPath: db-init.sh
readOnly: true
volumes:
- name: dbinitsh
configMap:
name: cinder-bin

View File

@ -0,0 +1,59 @@
apiVersion: batch/v1
kind: Job
metadata:
name: cinder-db-sync
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_sync.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" .Values.dependencies.db_sync.jobs }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
containers:
- name: cinder-db-sync
image: {{ .Values.images.db_sync }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- cinder-manage
args:
- --config-dir
- /etc/cinder/conf
- db
- sync
volumeMounts:
- name: pod-etc-cinder
mountPath: /etc/cinder
- name: cinderconf
mountPath: /etc/cinder/conf/cinder.conf
subPath: cinder.conf
readOnly: true
volumes:
- name: pod-etc-cinder
emptyDir: {}
- name: cinderconf
configMap:
name: cinder-etc

View File

@ -0,0 +1,65 @@
{{- $envAll := . }}
{{- $ksAdminSecret := $envAll.Values.keystone.admin_secret | default "cinder-env-keystone-admin" }}
apiVersion: batch/v1
kind: Job
metadata:
name: cinder-ks-endpoints
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.ks_service.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
containers:
{{- range $key1, $osServiceType := tuple "volume" "volumev2" "volumev3" }}
{{- range $key2, $osServiceEndPoint := tuple "admin" "internal" "public" }}
- name: {{ $osServiceType }}-ks-endpoints-{{ $osServiceEndPoint }}
image: {{ $envAll.Values.images.ks_endpoints }}
imagePullPolicy: {{ $envAll.Values.images.pull_policy }}
command:
- bash
- /tmp/ks-endpoints.sh
volumeMounts:
- name: ks-endpoints-sh
mountPath: /tmp/ks-endpoints.sh
subPath: ks-endpoints.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: OS_SVC_ENDPOINT
value: {{ $osServiceEndPoint }}
- name: OS_SERVICE_NAME
value: {{ tuple $osServiceType $envAll | include "endpoint_name_lookup" }}
- name: OS_SERVICE_TYPE
value: {{ $osServiceType }}
- name: OS_SERVICE_ENDPOINT
value: {{ tuple $osServiceType $osServiceEndPoint "api" $envAll | include "endpoint_type_lookup_addr" }}
{{- end }}
{{- end }}
volumes:
- name: ks-endpoints-sh
configMap:
name: cinder-bin

View File

@ -0,0 +1,59 @@
{{- $envAll := . }}
{{- $ksAdminSecret := .Values.keystone.admin_secret | default "cinder-env-keystone-admin" }}
apiVersion: batch/v1
kind: Job
metadata:
name: cinder-ks-service
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.ks_service.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
containers:
{{- range $key1, $osServiceType := tuple "volume" "volumev2" "volumev3" }}
- name: {{ $osServiceType }}-ks-service-registration
image: {{ $envAll.Values.images.ks_service }}
imagePullPolicy: {{ $envAll.Values.images.pull_policy }}
command:
- bash
- /tmp/ks-service.sh
volumeMounts:
- name: ks-service-sh
mountPath: /tmp/ks-service.sh
subPath: ks-service.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: OS_SERVICE_NAME
value: {{ tuple $osServiceType $envAll | include "endpoint_name_lookup" }}
- name: OS_SERVICE_TYPE
value: {{ $osServiceType }}
{{- end }}
volumes:
- name: ks-service-sh
configMap:
name: cinder-bin

View File

@ -0,0 +1,60 @@
{{- $ksAdminSecret := .Values.keystone.admin_secret | default "cinder-env-keystone-admin" }}
{{- $ksUserSecret := .Values.keystone.user_secret | default "cinder-env-keystone-user" }}
apiVersion: batch/v1
kind: Job
metadata:
name: cinder-ks-user
spec:
template:
metadata:
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": {{ .Values.images.dep_check | quote }},
"imagePullPolicy": {{ .Values.images.pull_policy | quote }},
"env": [
{
"name": "NAMESPACE",
"value": "{{ .Release.Namespace }}"
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" .Values.dependencies.ks_user.service }}"
},
{
"name": "COMMAND",
"value": "echo done"
}
]
}
]'
spec:
restartPolicy: OnFailure
containers:
- name: cinder-ks-user
image: {{ .Values.images.ks_user }}
imagePullPolicy: {{ .Values.images.pull_policy }}
command:
- bash
- /tmp/ks-user.sh
volumeMounts:
- name: ks-user-sh
mountPath: /tmp/ks-user.sh
subPath: ks-user.sh
readOnly: true
env:
{{- with $env := dict "ksUserSecret" $ksAdminSecret }}
{{- include "env_ks_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: SERVICE_OS_SERVICE_NAME
value: "cinder"
{{- with $env := dict "ksUserSecret" $ksUserSecret }}
{{- include "env_ks_user_create_openrc_tpl" $env | indent 12 }}
{{- end }}
- name: SERVICE_OS_ROLE
value: {{ .Values.keystone.cinder_user_role | quote }}
volumes:
- name: ks-user-sh
configMap:
name: cinder-bin

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Secret
metadata:
name: cinder-env-keystone-admin
type: Opaque
data:
OS_AUTH_URL: |
{{ .Values.keystone.auth_url | b64enc | indent 4 }}
OS_REGION_NAME: |
{{ .Values.keystone.admin_region_name | b64enc | indent 4 }}
OS_PROJECT_DOMAIN_NAME: |
{{ .Values.keystone.admin_project_domain | b64enc | indent 4 }}
OS_PROJECT_NAME: |
{{ .Values.keystone.admin_project_name | b64enc | indent 4 }}
OS_USER_DOMAIN_NAME: |
{{ .Values.keystone.admin_user_domain | b64enc | indent 4 }}
OS_USERNAME: |
{{ .Values.keystone.admin_user | b64enc | indent 4 }}
OS_PASSWORD: |
{{ .Values.keystone.admin_password | b64enc | indent 4 }}

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Secret
metadata:
name: cinder-env-keystone-user
type: Opaque
data:
OS_AUTH_URL: |
{{ .Values.keystone.auth_url | b64enc | indent 4 }}
OS_REGION_NAME: |
{{ .Values.keystone.cinder_region_name | b64enc | indent 4 }}
OS_PROJECT_DOMAIN_NAME: |
{{ .Values.keystone.cinder_project_domain | b64enc | indent 4 }}
OS_PROJECT_NAME: |
{{ .Values.keystone.cinder_project_name | b64enc | indent 4 }}
OS_USER_DOMAIN_NAME: |
{{ .Values.keystone.cinder_user_domain | b64enc | indent 4 }}
OS_USERNAME: |
{{ .Values.keystone.cinder_user | b64enc | indent 4 }}
OS_PASSWORD: |
{{ .Values.keystone.cinder_password | b64enc | indent 4 }}

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.service.api.name }}
spec:
ports:
- port: {{ .Values.service.api.port }}
selector:
app: cinder-api

177
cinder/values.yaml Normal file
View File

@ -0,0 +1,177 @@
# Default values for keystone.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
replicas:
api: 1
volume: 1
scheduler: 1
labels:
node_selector_key: openstack-control-plane
node_selector_value: enabled
images:
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0
db_init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
db_sync: quay.io/stackanetes/stackanetes-cinder-api:newton
ks_user: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
ks_service: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
ks_endpoints: quay.io/stackanetes/stackanetes-kolla-toolbox:newton
api: quay.io/stackanetes/stackanetes-cinder-api:newton
scheduler: quay.io/stackanetes/stackanetes-cinder-scheduler:newton
volume: quay.io/stackanetes/stackanetes-cinder-volume:newton
pull_policy: "IfNotPresent"
upgrades:
revision_history: 3
pod_replacement_strategy: RollingUpdate
rolling_update:
max_unavailable: 1
max_surge: 3
keystone:
auth_uri: "http://keystone-api:5000"
auth_url: "http://keystone-api:35357"
admin_user: "admin"
admin_user_domain: "default"
admin_password: "password"
admin_project_name: "admin"
admin_project_domain: "default"
admin_region_name: "RegionOne"
cinder_user: "cinder"
cinder_user_domain: "default"
cinder_user_role: "admin"
cinder_password: "password"
cinder_project_name: "service"
cinder_project_domain: "default"
cinder_region_name: "RegionOne"
service:
api:
name: "cinder-api"
port: 8776
proto: "http"
database:
address: mariadb
port: 3306
root_user: root
root_password: password
cinder_database_name: cinder
cinder_password: password
cinder_user: cinder
ceph:
enabled: true
monitors: []
cinder_user: "admin"
# a null value for the keyring will
# attempt to use the key from
# common/secrets/ceph-client-key
cinder_keyring: null
backends:
enabled:
- rbd1
rbd1:
secret: null
user: "admin"
pool: "volumes"
glance:
proto: "http"
host: "glance-api"
port: 9292
version: 2
messaging:
hosts: rabbitmq
user: rabbitmq
password: password
api:
workers: 8
misc:
debug: false
dependencies:
db_init:
jobs:
- mariadb-seed
service:
- mariadb
db_sync:
jobs:
- cinder-db-init
service:
- mariadb
ks_user:
service:
- keystone-api
ks_service:
service:
- keystone-api
ks_endpoints:
jobs:
- cinder-ks-service
service:
- keystone-api
api:
jobs:
- cinder-db-sync
- cinder-ks-user
- cinder-ks-endpoints
service:
- mariadb
- keystone-api
volume:
service:
- keystone-api
- cinder-api
scheduler:
service:
- keystone-api
- cinder-api
# We use a different layout of the endpoints here to account for versioning
# this swaps the service name and type, and should be rolled out to other
# services.
endpoints:
identity:
name: keystone
hosts:
default: keystone-api
path: /v3
scheme: 'http'
port:
admin: 35357
public: 5000
volume:
name: cinder
hosts:
default: cinder-api
path: '/v1/%(tenant_id)s'
scheme: 'http'
port:
api: 8776
volumev2:
name: cinder
hosts:
default: cinder-api
path: '/v2/%(tenant_id)s'
scheme: 'http'
port:
api: 8776
volumev3:
name: cinder
hosts:
default: cinder-api
path: '/v3/%(tenant_id)s'
scheme: 'http'
port:
api: 8776

View File

@ -160,6 +160,7 @@ $ helm install --name=memcached local/memcached --namespace=openstack
$ helm install --name=rabbitmq local/rabbitmq --namespace=openstack
$ helm install --name=keystone local/keystone --namespace=openstack
$ helm install --name=horizon local/horizon --namespace=openstack
$ helm install --name=cinder local/cinder --namespace=openstack
$ helm install --name=glance local/glance --namespace=openstack
$ helm install --name=nova local/nova --namespace=openstack
$ helm install --name=neutron local/neutron --namespace=openstack