976cab856c
Currently each service uses the same name for their helm test user, "test". While this works when services are ran sequentially, when multiple services are deployed and tested at the same time, it can lead to a race condition where one service deletes the user before the other is done testing, causing a failure. This change makes it so that each service defines its own test user in the form of [service]-test. Change-Id: Idd7ad3bef78a039f23fb0dd79391e3588e94b73c
1405 lines
45 KiB
YAML
1405 lines
45 KiB
YAML
# Copyright 2017 The Openstack-Helm Authors.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
# Default values for cinder.
|
|
# This is a YAML-formatted file.
|
|
# Declare name/value pairs to be passed into your templates.
|
|
# name: value
|
|
|
|
storage: ceph
|
|
|
|
labels:
|
|
api:
|
|
node_selector_key: openstack-control-plane
|
|
node_selector_value: enabled
|
|
backup:
|
|
node_selector_key: openstack-control-plane
|
|
node_selector_value: enabled
|
|
job:
|
|
node_selector_key: openstack-control-plane
|
|
node_selector_value: enabled
|
|
scheduler:
|
|
node_selector_key: openstack-control-plane
|
|
node_selector_value: enabled
|
|
test:
|
|
node_selector_key: openstack-control-plane
|
|
node_selector_value: enabled
|
|
volume:
|
|
node_selector_key: openstack-control-plane
|
|
node_selector_value: enabled
|
|
|
|
release_group: null
|
|
|
|
images:
|
|
tags:
|
|
test: docker.io/xrally/xrally-openstack:1.3.0
|
|
db_init: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
|
|
cinder_db_sync: docker.io/openstackhelm/cinder:ocata-ubuntu_xenial
|
|
db_drop: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
|
|
rabbit_init: docker.io/rabbitmq:3.7-management
|
|
ks_user: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
|
|
ks_service: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
|
|
ks_endpoints: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
|
|
cinder_api: docker.io/openstackhelm/cinder:ocata-ubuntu_xenial
|
|
bootstrap: docker.io/openstackhelm/heat:ocata-ubuntu_xenial
|
|
cinder_scheduler: docker.io/openstackhelm/cinder:ocata-ubuntu_xenial
|
|
cinder_volume: docker.io/openstackhelm/cinder:ocata-ubuntu_xenial
|
|
cinder_volume_usage_audit: docker.io/openstackhelm/cinder:ocata-ubuntu_xenial
|
|
cinder_storage_init: docker.io/port/ceph-config-helper:v1.10.3
|
|
cinder_backup: docker.io/openstackhelm/cinder:ocata-ubuntu_xenial
|
|
cinder_backup_storage_init: docker.io/port/ceph-config-helper:v1.10.3
|
|
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
|
|
image_repo_sync: docker.io/docker:17.07.0
|
|
pull_policy: "IfNotPresent"
|
|
local_registry:
|
|
active: false
|
|
exclude:
|
|
- dep_check
|
|
- image_repo_sync
|
|
|
|
jobs:
|
|
volume_usage_audit:
|
|
cron: "*/5 * * * *"
|
|
starting_deadline: 600
|
|
history:
|
|
success: 3
|
|
failed: 1
|
|
|
|
pod:
|
|
security_context:
|
|
cinder:
|
|
pod:
|
|
runAsUser: 42424
|
|
container:
|
|
cinder_api:
|
|
readOnlyRootFilesystem: true
|
|
allowPrivilegeEscalation: false
|
|
cinder_backup:
|
|
redOnlyRootFilesystem: true
|
|
allowPrivilegeEscalation: false
|
|
cinder_scheduler:
|
|
readOnlyRootFilesystem: true
|
|
allowPrivilegeEscalation: false
|
|
cinder_volume:
|
|
readOnlyRootFilesystem: true
|
|
allowPrivilegeEscalation: false
|
|
affinity:
|
|
anti:
|
|
type:
|
|
default: preferredDuringSchedulingIgnoredDuringExecution
|
|
topologyKey:
|
|
default: kubernetes.io/hostname
|
|
weight:
|
|
default: 10
|
|
|
|
mounts:
|
|
cinder_api:
|
|
init_container: null
|
|
cinder_api:
|
|
volumeMounts:
|
|
volumes:
|
|
cinder_scheduler:
|
|
init_container: null
|
|
cinder_scheduler:
|
|
volumeMounts:
|
|
volumes:
|
|
cinder_volume:
|
|
init_container: null
|
|
cinder_volume:
|
|
volumeMounts:
|
|
volumes:
|
|
cinder_volume_usage_audit:
|
|
init_container: null
|
|
cinder_volume_usage_audit:
|
|
volumeMounts:
|
|
volumes:
|
|
cinder_backup:
|
|
init_container: null
|
|
cinder_backup:
|
|
volumeMounts:
|
|
volumes:
|
|
cinder_tests:
|
|
init_container: null
|
|
cinder_tests:
|
|
volumeMounts:
|
|
volumes:
|
|
replicas:
|
|
api: 1
|
|
volume: 1
|
|
scheduler: 1
|
|
backup: 1
|
|
lifecycle:
|
|
upgrades:
|
|
deployments:
|
|
revision_history: 3
|
|
pod_replacement_strategy: RollingUpdate
|
|
rolling_update:
|
|
max_unavailable: 1
|
|
max_surge: 3
|
|
disruption_budget:
|
|
api:
|
|
min_available: 0
|
|
termination_grace_period:
|
|
api:
|
|
timeout: 30
|
|
resources:
|
|
enabled: false
|
|
api:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
scheduler:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
volume:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
jobs:
|
|
volume_usage_audit:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
bootstrap:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
rabbit_init:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
db_init:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
db_sync:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
db_drop:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
clean:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
backup_storage_init:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
storage_init:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
ks_endpoints:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
ks_service:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
ks_user:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
tests:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
image_repo_sync:
|
|
requests:
|
|
memory: "128Mi"
|
|
cpu: "100m"
|
|
limits:
|
|
memory: "1024Mi"
|
|
cpu: "2000m"
|
|
|
|
bootstrap:
|
|
enabled: true
|
|
ks_user: admin
|
|
bootstrap_conf_backends: true
|
|
volume_types:
|
|
name:
|
|
group:
|
|
volume_backend_name:
|
|
|
|
network:
|
|
api:
|
|
ingress:
|
|
public: true
|
|
classes:
|
|
namespace: "nginx"
|
|
cluster: "nginx-cluster"
|
|
annotations:
|
|
nginx.ingress.kubernetes.io/rewrite-target: /
|
|
external_policy_local: false
|
|
node_port:
|
|
enabled: false
|
|
port: 30877
|
|
|
|
ceph_client:
|
|
configmap: ceph-etc
|
|
user_secret_name: pvc-ceph-client-key
|
|
|
|
conf:
|
|
software:
|
|
rbd:
|
|
rbd_pool_app_name_backup: cinder-backup
|
|
rbd_pool_app_name: cinder-volume
|
|
paste:
|
|
composite:osapi_volume:
|
|
use: call:cinder.api:root_app_factory
|
|
/: apiversions
|
|
/v1: openstack_volume_api_v1
|
|
/v2: openstack_volume_api_v2
|
|
/v3: openstack_volume_api_v3
|
|
composite:openstack_volume_api_v1:
|
|
use: call:cinder.api.middleware.auth:pipeline_factory
|
|
noauth: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv1
|
|
keystone: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv1
|
|
keystone_nolimit: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv1
|
|
composite:openstack_volume_api_v2:
|
|
use: call:cinder.api.middleware.auth:pipeline_factory
|
|
noauth: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv2
|
|
keystone: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv2
|
|
keystone_nolimit: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv2
|
|
composite:openstack_volume_api_v3:
|
|
use: call:cinder.api.middleware.auth:pipeline_factory
|
|
noauth: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv3
|
|
keystone: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv3
|
|
keystone_nolimit: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv3
|
|
filter:request_id:
|
|
paste.filter_factory: oslo_middleware.request_id:RequestId.factory
|
|
filter:http_proxy_to_wsgi:
|
|
paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
|
|
filter:cors:
|
|
paste.filter_factory: oslo_middleware.cors:filter_factory
|
|
oslo_config_project: cinder
|
|
filter:faultwrap:
|
|
paste.filter_factory: cinder.api.middleware.fault:FaultWrapper.factory
|
|
filter:osprofiler:
|
|
paste.filter_factory: osprofiler.web:WsgiMiddleware.factory
|
|
filter:noauth:
|
|
paste.filter_factory: cinder.api.middleware.auth:NoAuthMiddleware.factory
|
|
filter:sizelimit:
|
|
paste.filter_factory: oslo_middleware.sizelimit:RequestBodySizeLimiter.factory
|
|
app:apiv1:
|
|
paste.app_factory: cinder.api.v1.router:APIRouter.factory
|
|
app:apiv2:
|
|
paste.app_factory: cinder.api.v2.router:APIRouter.factory
|
|
app:apiv3:
|
|
paste.app_factory: cinder.api.v3.router:APIRouter.factory
|
|
pipeline:apiversions:
|
|
pipeline: cors http_proxy_to_wsgi faultwrap osvolumeversionapp
|
|
app:osvolumeversionapp:
|
|
paste.app_factory: cinder.api.versions:Versions.factory
|
|
filter:keystonecontext:
|
|
paste.filter_factory: cinder.api.middleware.auth:CinderKeystoneContext.factory
|
|
filter:authtoken:
|
|
paste.filter_factory: keystonemiddleware.auth_token:filter_factory
|
|
filter:audit:
|
|
paste.filter_factory: keystonemiddleware.audit:filter_factory
|
|
audit_map_file: /etc/cinder/api_audit_map.conf
|
|
policy:
|
|
context_is_admin: role:admin
|
|
admin_or_owner: is_admin:True or project_id:%(project_id)s
|
|
default: rule:admin_or_owner
|
|
admin_api: is_admin:True
|
|
volume:create: ''
|
|
volume:delete: rule:admin_or_owner
|
|
volume:get: rule:admin_or_owner
|
|
volume:get_all: rule:admin_or_owner
|
|
volume:get_volume_metadata: rule:admin_or_owner
|
|
volume:create_volume_metadata: rule:admin_or_owner
|
|
volume:delete_volume_metadata: rule:admin_or_owner
|
|
volume:update_volume_metadata: rule:admin_or_owner
|
|
volume:get_volume_admin_metadata: rule:admin_api
|
|
volume:update_volume_admin_metadata: rule:admin_api
|
|
volume:get_snapshot: rule:admin_or_owner
|
|
volume:get_all_snapshots: rule:admin_or_owner
|
|
volume:create_snapshot: rule:admin_or_owner
|
|
volume:delete_snapshot: rule:admin_or_owner
|
|
volume:update_snapshot: rule:admin_or_owner
|
|
volume:get_snapshot_metadata: rule:admin_or_owner
|
|
volume:delete_snapshot_metadata: rule:admin_or_owner
|
|
volume:update_snapshot_metadata: rule:admin_or_owner
|
|
volume:extend: rule:admin_or_owner
|
|
volume:update_readonly_flag: rule:admin_or_owner
|
|
volume:retype: rule:admin_or_owner
|
|
volume:update: rule:admin_or_owner
|
|
volume_extension:types_manage: rule:admin_api
|
|
volume_extension:types_extra_specs: rule:admin_api
|
|
volume_extension:access_types_qos_specs_id: rule:admin_api
|
|
volume_extension:access_types_extra_specs: rule:admin_api
|
|
volume_extension:volume_type_access: rule:admin_or_owner
|
|
volume_extension:volume_type_access:addProjectAccess: rule:admin_api
|
|
volume_extension:volume_type_access:removeProjectAccess: rule:admin_api
|
|
volume_extension:volume_type_encryption: rule:admin_api
|
|
volume_extension:volume_encryption_metadata: rule:admin_or_owner
|
|
volume_extension:extended_snapshot_attributes: rule:admin_or_owner
|
|
volume_extension:volume_image_metadata: rule:admin_or_owner
|
|
volume_extension:quotas:show: ''
|
|
volume_extension:quotas:update: rule:admin_api
|
|
volume_extension:quotas:delete: rule:admin_api
|
|
volume_extension:quota_classes: rule:admin_api
|
|
volume_extension:quota_classes:validate_setup_for_nested_quota_use: rule:admin_api
|
|
volume_extension:volume_admin_actions:reset_status: rule:admin_api
|
|
volume_extension:snapshot_admin_actions:reset_status: rule:admin_api
|
|
volume_extension:backup_admin_actions:reset_status: rule:admin_api
|
|
volume_extension:volume_admin_actions:force_delete: rule:admin_api
|
|
volume_extension:volume_admin_actions:force_detach: rule:admin_api
|
|
volume_extension:snapshot_admin_actions:force_delete: rule:admin_api
|
|
volume_extension:backup_admin_actions:force_delete: rule:admin_api
|
|
volume_extension:volume_admin_actions:migrate_volume: rule:admin_api
|
|
volume_extension:volume_admin_actions:migrate_volume_completion: rule:admin_api
|
|
volume_extension:volume_actions:upload_public: rule:admin_api
|
|
volume_extension:volume_actions:upload_image: rule:admin_or_owner
|
|
volume_extension:volume_host_attribute: rule:admin_api
|
|
volume_extension:volume_tenant_attribute: rule:admin_or_owner
|
|
volume_extension:volume_mig_status_attribute: rule:admin_api
|
|
volume_extension:hosts: rule:admin_api
|
|
volume_extension:services:index: rule:admin_api
|
|
volume_extension:services:update: rule:admin_api
|
|
volume_extension:volume_manage: rule:admin_api
|
|
volume_extension:volume_unmanage: rule:admin_api
|
|
volume_extension:list_manageable: rule:admin_api
|
|
volume_extension:capabilities: rule:admin_api
|
|
volume:create_transfer: rule:admin_or_owner
|
|
volume:accept_transfer: ''
|
|
volume:delete_transfer: rule:admin_or_owner
|
|
volume:get_transfer: rule:admin_or_owner
|
|
volume:get_all_transfers: rule:admin_or_owner
|
|
volume_extension:replication:promote: rule:admin_api
|
|
volume_extension:replication:reenable: rule:admin_api
|
|
volume:failover_host: rule:admin_api
|
|
volume:freeze_host: rule:admin_api
|
|
volume:thaw_host: rule:admin_api
|
|
backup:create: ''
|
|
backup:delete: rule:admin_or_owner
|
|
backup:get: rule:admin_or_owner
|
|
backup:get_all: rule:admin_or_owner
|
|
backup:restore: rule:admin_or_owner
|
|
backup:backup-import: rule:admin_api
|
|
backup:backup-export: rule:admin_api
|
|
backup:update: rule:admin_or_owner
|
|
snapshot_extension:snapshot_actions:update_snapshot_status: ''
|
|
snapshot_extension:snapshot_manage: rule:admin_api
|
|
snapshot_extension:snapshot_unmanage: rule:admin_api
|
|
snapshot_extension:list_manageable: rule:admin_api
|
|
consistencygroup:create: group:nobody
|
|
consistencygroup:delete: group:nobody
|
|
consistencygroup:update: group:nobody
|
|
consistencygroup:get: group:nobody
|
|
consistencygroup:get_all: group:nobody
|
|
consistencygroup:create_cgsnapshot: group:nobody
|
|
consistencygroup:delete_cgsnapshot: group:nobody
|
|
consistencygroup:get_cgsnapshot: group:nobody
|
|
consistencygroup:get_all_cgsnapshots: group:nobody
|
|
group:group_types_manage: rule:admin_api
|
|
group:group_types_specs: rule:admin_api
|
|
group:access_group_types_specs: rule:admin_api
|
|
group:group_type_access: rule:admin_or_owner
|
|
group:create: ''
|
|
group:delete: rule:admin_or_owner
|
|
group:update: rule:admin_or_owner
|
|
group:get: rule:admin_or_owner
|
|
group:get_all: rule:admin_or_owner
|
|
group:create_group_snapshot: ''
|
|
group:delete_group_snapshot: rule:admin_or_owner
|
|
group:update_group_snapshot: rule:admin_or_owner
|
|
group:get_group_snapshot: rule:admin_or_owner
|
|
group:get_all_group_snapshots: rule:admin_or_owner
|
|
scheduler_extension:scheduler_stats:get_pools: rule:admin_api
|
|
message:delete: rule:admin_or_owner
|
|
message:get: rule:admin_or_owner
|
|
message:get_all: rule:admin_or_owner
|
|
clusters:get: rule:admin_api
|
|
clusters:get_all: rule:admin_api
|
|
clusters:update: rule:admin_api
|
|
api_audit_map:
|
|
DEFAULT:
|
|
target_endpoint_type: None
|
|
custom_actions:
|
|
associate: update/associate
|
|
disassociate: update/disassociate_all
|
|
disassociate_all: update/disassociate_all
|
|
associations: read/list/associations
|
|
path_keywords:
|
|
defaults: None
|
|
detail: None
|
|
limits: None
|
|
os-quota-specs: project
|
|
qos-specs: qos-spec
|
|
snapshots: snapshot
|
|
types: type
|
|
volumes: volume
|
|
service_endpoints:
|
|
volume: service/storage/block
|
|
volumev2: service/storage/block
|
|
cinder_sudoers: |
|
|
# This sudoers file supports rootwrap for both Kolla and LOCI Images.
|
|
Defaults !requiretty
|
|
Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
|
|
cinder ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/cinder-rootwrap /etc/cinder/rootwrap.conf *, /var/lib/openstack/bin/cinder-rootwrap /etc/cinder/rootwrap.conf *
|
|
rootwrap: |
|
|
# Configuration for cinder-rootwrap
|
|
# This file should be owned by (and only-writeable by) the root user
|
|
|
|
[DEFAULT]
|
|
# List of directories to load filter definitions from (separated by ',').
|
|
# These directories MUST all be only writeable by root !
|
|
filters_path=/etc/cinder/rootwrap.d
|
|
|
|
# List of directories to search executables in, in case filters do not
|
|
# explicitely specify a full path (separated by ',')
|
|
# If not specified, defaults to system PATH environment variable.
|
|
# These directories MUST all be only writeable by root !
|
|
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
|
|
|
|
# Enable logging to syslog
|
|
# Default value is False
|
|
use_syslog=False
|
|
|
|
# Which syslog facility to use.
|
|
# Valid values include auth, authpriv, syslog, local0, local1...
|
|
# Default value is 'syslog'
|
|
syslog_log_facility=syslog
|
|
|
|
# Which messages to log.
|
|
# INFO means log all usage
|
|
# ERROR means only log unsuccessful attempts
|
|
syslog_log_level=ERROR
|
|
rootwrap_filters:
|
|
volume:
|
|
pods:
|
|
- volume
|
|
content: |
|
|
# cinder-rootwrap command filters for volume nodes
|
|
# This file should be owned by (and only-writeable by) the root user
|
|
|
|
[Filters]
|
|
# cinder/volume/iscsi.py: iscsi_helper '--op' ...
|
|
ietadm: CommandFilter, ietadm, root
|
|
tgtadm: CommandFilter, tgtadm, root
|
|
iscsictl: CommandFilter, iscsictl, root
|
|
tgt-admin: CommandFilter, tgt-admin, root
|
|
cinder-rtstool: CommandFilter, cinder-rtstool, root
|
|
scstadmin: CommandFilter, scstadmin, root
|
|
|
|
# LVM related show commands
|
|
pvs: EnvFilter, env, root, LC_ALL=C, pvs
|
|
vgs: EnvFilter, env, root, LC_ALL=C, vgs
|
|
lvs: EnvFilter, env, root, LC_ALL=C, lvs
|
|
lvdisplay: EnvFilter, env, root, LC_ALL=C, lvdisplay
|
|
|
|
# -LVM related show commands with suppress fd warnings
|
|
pvs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs
|
|
vgs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs
|
|
lvs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs
|
|
lvdisplay_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay
|
|
|
|
|
|
# -LVM related show commands conf var
|
|
pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, pvs
|
|
vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, vgs
|
|
lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvs
|
|
lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvdisplay
|
|
|
|
# -LVM conf var with suppress fd_warnings
|
|
pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs
|
|
vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs
|
|
lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs
|
|
lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay
|
|
|
|
# os-brick library commands
|
|
# os_brick.privileged.run_as_root oslo.privsep context
|
|
# This line ties the superuser privs with the config files, context name,
|
|
# and (implicitly) the actual python code invoked.
|
|
privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.*
|
|
# The following and any cinder/brick/* entries should all be obsoleted
|
|
# by privsep, and may be removed once the os-brick version requirement
|
|
# is updated appropriately.
|
|
scsi_id: CommandFilter, /lib/udev/scsi_id, root
|
|
drbdadm: CommandFilter, drbdadm, root
|
|
|
|
# cinder/brick/local_dev/lvm.py: 'vgcreate', vg_name, pv_list
|
|
vgcreate: CommandFilter, vgcreate, root
|
|
|
|
# cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', sizestr, '-n', volume_name,..
|
|
# cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', ...
|
|
lvcreate: EnvFilter, env, root, LC_ALL=C, lvcreate
|
|
lvcreate_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvcreate
|
|
lvcreate_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvcreate
|
|
lvcreate_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, LC_ALL=C, lvcreate
|
|
|
|
# cinder/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,...
|
|
dd: CommandFilter, dd, root
|
|
|
|
# cinder/volume/driver.py: 'lvremove', '-f', %s/%s % ...
|
|
lvremove: CommandFilter, lvremove, root
|
|
|
|
# cinder/volume/driver.py: 'lvrename', '%(vg)s', '%(orig)s' '(new)s'...
|
|
lvrename: CommandFilter, lvrename, root
|
|
|
|
# cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(lv_name)s' ...
|
|
# cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(thin_pool)s' ...
|
|
lvextend: EnvFilter, env, root, LC_ALL=C, lvextend
|
|
lvextend_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvextend
|
|
lvextend_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend
|
|
lvextend_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend
|
|
|
|
# cinder/brick/local_dev/lvm.py: 'lvchange -a y -K <lv>'
|
|
lvchange: CommandFilter, lvchange, root
|
|
|
|
# cinder/brick/local_dev/lvm.py: 'lvconvert', '--merge', snapshot_name
|
|
lvconvert: CommandFilter, lvconvert, root
|
|
|
|
# cinder/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',...
|
|
# cinder/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ...
|
|
iscsiadm: CommandFilter, iscsiadm, root
|
|
|
|
# cinder/volume/utils.py: utils.temporary_chown(path, 0)
|
|
chown: CommandFilter, chown, root
|
|
|
|
# cinder/volume/utils.py: copy_volume(..., ionice='...')
|
|
ionice_1: ChainingRegExpFilter, ionice, root, ionice, -c[0-3], -n[0-7]
|
|
ionice_2: ChainingRegExpFilter, ionice, root, ionice, -c[0-3]
|
|
|
|
# cinder/volume/utils.py: setup_blkio_cgroup()
|
|
cgcreate: CommandFilter, cgcreate, root
|
|
cgset: CommandFilter, cgset, root
|
|
cgexec: ChainingRegExpFilter, cgexec, root, cgexec, -g, blkio:\S+
|
|
|
|
# cinder/volume/driver.py
|
|
dmsetup: CommandFilter, dmsetup, root
|
|
ln: CommandFilter, ln, root
|
|
|
|
# cinder/image/image_utils.py
|
|
qemu-img: EnvFilter, env, root, LC_ALL=C, qemu-img
|
|
qemu-img_convert: CommandFilter, qemu-img, root
|
|
|
|
udevadm: CommandFilter, udevadm, root
|
|
|
|
# cinder/volume/driver.py: utils.read_file_as_root()
|
|
cat: CommandFilter, cat, root
|
|
|
|
# cinder/volume/nfs.py
|
|
stat: CommandFilter, stat, root
|
|
mount: CommandFilter, mount, root
|
|
df: CommandFilter, df, root
|
|
du: CommandFilter, du, root
|
|
truncate: CommandFilter, truncate, root
|
|
chmod: CommandFilter, chmod, root
|
|
rm: CommandFilter, rm, root
|
|
|
|
# cinder/volume/drivers/remotefs.py
|
|
mkdir: CommandFilter, mkdir, root
|
|
|
|
# cinder/volume/drivers/netapp/nfs.py:
|
|
netapp_nfs_find: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -name, img-cache.*, -amin, \+\d+
|
|
|
|
# cinder/volume/drivers/glusterfs.py
|
|
chgrp: CommandFilter, chgrp, root
|
|
umount: CommandFilter, umount, root
|
|
fallocate: CommandFilter, fallocate, root
|
|
|
|
# cinder/volumes/drivers/hds/hds.py:
|
|
hus-cmd: CommandFilter, hus-cmd, root
|
|
hus-cmd_local: CommandFilter, /usr/local/bin/hus-cmd, root
|
|
|
|
# cinder/volumes/drivers/hds/hnas_backend.py
|
|
ssc: CommandFilter, ssc, root
|
|
|
|
# cinder/brick/initiator/connector.py:
|
|
ls: CommandFilter, ls, root
|
|
tee: CommandFilter, tee, root
|
|
multipath: CommandFilter, multipath, root
|
|
multipathd: CommandFilter, multipathd, root
|
|
systool: CommandFilter, systool, root
|
|
|
|
# cinder/volume/drivers/block_device.py
|
|
blockdev: CommandFilter, blockdev, root
|
|
|
|
# cinder/volume/drivers/ibm/gpfs.py
|
|
# cinder/volume/drivers/tintri.py
|
|
mv: CommandFilter, mv, root
|
|
|
|
# cinder/volume/drivers/ibm/gpfs.py
|
|
cp: CommandFilter, cp, root
|
|
mmgetstate: CommandFilter, /usr/lpp/mmfs/bin/mmgetstate, root
|
|
mmclone: CommandFilter, /usr/lpp/mmfs/bin/mmclone, root
|
|
mmlsattr: CommandFilter, /usr/lpp/mmfs/bin/mmlsattr, root
|
|
mmchattr: CommandFilter, /usr/lpp/mmfs/bin/mmchattr, root
|
|
mmlsconfig: CommandFilter, /usr/lpp/mmfs/bin/mmlsconfig, root
|
|
mmlsfs: CommandFilter, /usr/lpp/mmfs/bin/mmlsfs, root
|
|
mmlspool: CommandFilter, /usr/lpp/mmfs/bin/mmlspool, root
|
|
mkfs: CommandFilter, mkfs, root
|
|
mmcrfileset: CommandFilter, /usr/lpp/mmfs/bin/mmcrfileset, root
|
|
mmlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmlinkfileset, root
|
|
mmunlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmunlinkfileset, root
|
|
mmdelfileset: CommandFilter, /usr/lpp/mmfs/bin/mmdelfileset, root
|
|
mmcrsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmcrsnapshot, root
|
|
mmdelsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmdelsnapshot, root
|
|
|
|
# cinder/volume/drivers/ibm/gpfs.py
|
|
# cinder/volume/drivers/ibm/ibmnas.py
|
|
find_maxdepth_inum: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -ignore_readdir_race, -inum, \d+, -print0, -quit
|
|
|
|
# cinder/brick/initiator/connector.py:
|
|
aoe-revalidate: CommandFilter, aoe-revalidate, root
|
|
aoe-discover: CommandFilter, aoe-discover, root
|
|
aoe-flush: CommandFilter, aoe-flush, root
|
|
|
|
# cinder/brick/initiator/linuxscsi.py:
|
|
sg_scan: CommandFilter, sg_scan, root
|
|
|
|
#cinder/backup/services/tsm.py
|
|
dsmc:CommandFilter,/usr/bin/dsmc,root
|
|
|
|
# cinder/volume/drivers/hitachi/hbsd_horcm.py
|
|
raidqry: CommandFilter, raidqry, root
|
|
raidcom: CommandFilter, raidcom, root
|
|
pairsplit: CommandFilter, pairsplit, root
|
|
paircreate: CommandFilter, paircreate, root
|
|
pairdisplay: CommandFilter, pairdisplay, root
|
|
pairevtwait: CommandFilter, pairevtwait, root
|
|
horcmstart.sh: CommandFilter, horcmstart.sh, root
|
|
horcmshutdown.sh: CommandFilter, horcmshutdown.sh, root
|
|
horcmgr: EnvFilter, env, root, HORCMINST=, /etc/horcmgr
|
|
|
|
# cinder/volume/drivers/hitachi/hbsd_snm2.py
|
|
auman: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auman
|
|
auluref: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluref
|
|
auhgdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgdef
|
|
aufibre1: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aufibre1
|
|
auhgwwn: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgwwn
|
|
auhgmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgmap
|
|
autargetmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetmap
|
|
aureplicationvvol: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationvvol
|
|
auluadd: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluadd
|
|
auludel: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auludel
|
|
auluchgsize: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluchgsize
|
|
auchapuser: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auchapuser
|
|
autargetdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetdef
|
|
autargetopt: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetopt
|
|
autargetini: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetini
|
|
auiscsi: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auiscsi
|
|
audppool: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/audppool
|
|
aureplicationlocal: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationlocal
|
|
aureplicationmon: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationmon
|
|
|
|
# cinder/volume/drivers/hgst.py
|
|
vgc-cluster: CommandFilter, vgc-cluster, root
|
|
|
|
# cinder/volume/drivers/vzstorage.py
|
|
pstorage-mount: CommandFilter, pstorage-mount, root
|
|
pstorage: CommandFilter, pstorage, root
|
|
ploop: CommandFilter, ploop, root
|
|
|
|
# initiator/connector.py:
|
|
drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid
|
|
ceph:
|
|
override:
|
|
append:
|
|
monitors: []
|
|
admin_keyring: null
|
|
pools:
|
|
backup:
|
|
replication: 3
|
|
crush_rule: replicated_rule
|
|
chunk_size: 8
|
|
volume:
|
|
replication: 3
|
|
crush_rule: replicated_rule
|
|
chunk_size: 8
|
|
cinder:
|
|
DEFAULT:
|
|
resource_query_filters_file: /etc/cinder/resource_filters.json
|
|
log_config_append: /etc/cinder/logging.conf
|
|
use_syslog: false
|
|
use_stderr: true
|
|
enable_v1_api: false
|
|
volume_name_template: "%s"
|
|
osapi_volume_workers: 1
|
|
glance_api_version: 2
|
|
os_region_name: RegionOne
|
|
host: cinder-volume-worker
|
|
# NOTE(portdirect): the bind port should not be defined, and is manipulated
|
|
# via the endpoints section.
|
|
osapi_volume_listen_port: null
|
|
enabled_backends: "rbd1"
|
|
default_volume_type: "rbd1"
|
|
# NOTE(portdirect): "cinder.backup.drivers.ceph" and
|
|
# "cinder.backup.drivers.posix" also supported
|
|
backup_driver: "cinder.backup.drivers.swift"
|
|
# Backup: Ceph RBD options
|
|
backup_ceph_conf: "/etc/ceph/ceph.conf"
|
|
backup_ceph_user: cinderbackup
|
|
backup_ceph_pool: cinder.backups
|
|
# Backup: Posix options
|
|
backup_posix_path: /var/lib/cinder/backup
|
|
auth_strategy: keystone
|
|
# Internal tenant id
|
|
internal_project_name: internal_cinder
|
|
internal_user_name: internal_cinder
|
|
database:
|
|
max_retries: -1
|
|
keystone_authtoken:
|
|
auth_version: v3
|
|
auth_type: password
|
|
memcache_security_strategy: ENCRYPT
|
|
oslo_concurrency:
|
|
lock_path: "/var/lib/cinder/tmp"
|
|
oslo_messaging_notifications:
|
|
driver: messagingv2
|
|
oslo_middleware:
|
|
enable_proxy_headers_parsing: true
|
|
oslo_messaging_rabbit:
|
|
rabbit_ha_queues: true
|
|
coordination:
|
|
backend_url: file:///var/lib/cinder/coordination
|
|
logging:
|
|
loggers:
|
|
keys:
|
|
- root
|
|
- cinder
|
|
handlers:
|
|
keys:
|
|
- stdout
|
|
- stderr
|
|
- "null"
|
|
formatters:
|
|
keys:
|
|
- context
|
|
- default
|
|
logger_root:
|
|
level: WARNING
|
|
handlers: stdout
|
|
logger_cinder:
|
|
level: INFO
|
|
handlers:
|
|
- stdout
|
|
qualname: cinder
|
|
logger_amqp:
|
|
level: WARNING
|
|
handlers: stderr
|
|
qualname: amqp
|
|
logger_amqplib:
|
|
level: WARNING
|
|
handlers: stderr
|
|
qualname: amqplib
|
|
logger_eventletwsgi:
|
|
level: WARNING
|
|
handlers: stderr
|
|
qualname: eventlet.wsgi.server
|
|
logger_sqlalchemy:
|
|
level: WARNING
|
|
handlers: stderr
|
|
qualname: sqlalchemy
|
|
logger_boto:
|
|
level: WARNING
|
|
handlers: stderr
|
|
qualname: boto
|
|
handler_null:
|
|
class: logging.NullHandler
|
|
formatter: default
|
|
args: ()
|
|
handler_stdout:
|
|
class: StreamHandler
|
|
args: (sys.stdout,)
|
|
formatter: context
|
|
handler_stderr:
|
|
class: StreamHandler
|
|
args: (sys.stderr,)
|
|
formatter: context
|
|
formatter_context:
|
|
class: oslo_log.formatters.ContextFormatter
|
|
datefmt: "%Y-%m-%d %H:%M:%S"
|
|
formatter_default:
|
|
format: "%(message)s"
|
|
datefmt: "%Y-%m-%d %H:%M:%S"
|
|
rabbitmq:
|
|
#NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
|
|
policies:
|
|
- vhost: "cinder"
|
|
name: "ha_ttl_cinder"
|
|
definition:
|
|
#mirror messges to other nodes in rmq cluster
|
|
ha-mode: "all"
|
|
ha-sync-mode: "automatic"
|
|
#70s
|
|
message-ttl: 70000
|
|
priority: 0
|
|
apply-to: all
|
|
pattern: '(notifications)\.'
|
|
|
|
backends:
|
|
# Those options will be written to backends.conf as-is.
|
|
rbd1:
|
|
volume_driver: cinder.volume.drivers.rbd.RBDDriver
|
|
volume_backend_name: rbd1
|
|
rbd_pool: cinder.volumes
|
|
rbd_ceph_conf: "/etc/ceph/ceph.conf"
|
|
rbd_flatten_volume_from_snapshot: false
|
|
report_discard_supported: true
|
|
rbd_max_clone_depth: 5
|
|
rbd_store_chunk_size: 4
|
|
rados_connect_timeout: -1
|
|
rbd_user: cinder
|
|
rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
|
|
rally_tests:
|
|
run_tempest: false
|
|
tests:
|
|
CinderVolumes.create_and_delete_volume:
|
|
- args:
|
|
size: 1
|
|
runner:
|
|
concurrency: 1
|
|
times: 1
|
|
type: constant
|
|
sla:
|
|
failure_rate:
|
|
max: 0
|
|
- args:
|
|
size:
|
|
max: 5
|
|
min: 1
|
|
runner:
|
|
concurrency: 1
|
|
times: 1
|
|
type: constant
|
|
sla:
|
|
failure_rate:
|
|
max: 0
|
|
resource_filters:
|
|
volume:
|
|
- name
|
|
- status
|
|
- metadata
|
|
- bootable
|
|
- migration_status
|
|
- availability_zone
|
|
- group_id
|
|
backup:
|
|
- name
|
|
- status
|
|
- volume_id
|
|
snapshot:
|
|
- name
|
|
- status
|
|
- volume_id
|
|
- metadata
|
|
- availability_zone
|
|
group: []
|
|
group_snapshot:
|
|
- status
|
|
- group_id
|
|
attachment:
|
|
- volume_id
|
|
- status
|
|
- instance_id
|
|
- attach_status
|
|
message:
|
|
- resource_uuid
|
|
- resource_type
|
|
- event_id
|
|
- request_id
|
|
- message_level
|
|
pool:
|
|
- name
|
|
- volume_type
|
|
volume_type: []
|
|
|
|
backup:
|
|
external_ceph_rbd:
|
|
enabled: false
|
|
admin_keyring: null
|
|
conf:
|
|
global:
|
|
mon_host: null
|
|
osd:
|
|
posix:
|
|
volume:
|
|
class_name: general
|
|
size: 10Gi
|
|
|
|
dependencies:
|
|
dynamic:
|
|
common:
|
|
local_image_registry:
|
|
jobs:
|
|
- cinder-image-repo-sync
|
|
services:
|
|
- endpoint: node
|
|
service: local_image_registry
|
|
static:
|
|
api:
|
|
jobs:
|
|
- cinder-db-sync
|
|
- cinder-ks-user
|
|
- cinder-ks-endpoints
|
|
- cinder-rabbit-init
|
|
- cinder-storage-init
|
|
services:
|
|
- endpoint: internal
|
|
service: oslo_db
|
|
- endpoint: internal
|
|
service: identity
|
|
backup:
|
|
jobs:
|
|
- cinder-db-sync
|
|
- cinder-ks-user
|
|
- cinder-ks-endpoints
|
|
- cinder-rabbit-init
|
|
- cinder-storage-init
|
|
- cinder-backup-storage-init
|
|
services:
|
|
- endpoint: internal
|
|
service: identity
|
|
- endpoint: internal
|
|
service: volume
|
|
backup_storage_init:
|
|
jobs: null
|
|
bootstrap:
|
|
services:
|
|
- endpoint: internal
|
|
service: identity
|
|
- endpoint: internal
|
|
service: volume
|
|
clean:
|
|
jobs: null
|
|
db_drop:
|
|
services:
|
|
- endpoint: internal
|
|
service: oslo_db
|
|
db_init:
|
|
services:
|
|
- endpoint: internal
|
|
service: oslo_db
|
|
db_sync:
|
|
jobs:
|
|
- cinder-db-init
|
|
services:
|
|
- endpoint: internal
|
|
service: oslo_db
|
|
ks_endpoints:
|
|
jobs:
|
|
- cinder-ks-service
|
|
services:
|
|
- endpoint: internal
|
|
service: identity
|
|
ks_service:
|
|
services:
|
|
- endpoint: internal
|
|
service: identity
|
|
ks_user:
|
|
services:
|
|
- endpoint: internal
|
|
service: identity
|
|
rabbit_init:
|
|
services:
|
|
- service: oslo_messaging
|
|
endpoint: internal
|
|
scheduler:
|
|
jobs:
|
|
- cinder-db-sync
|
|
- cinder-ks-user
|
|
- cinder-ks-endpoints
|
|
- cinder-rabbit-init
|
|
- cinder-storage-init
|
|
services:
|
|
- endpoint: internal
|
|
service: identity
|
|
- endpoint: internal
|
|
service: volume
|
|
storage_init:
|
|
jobs: null
|
|
tests:
|
|
services:
|
|
- endpoint: internal
|
|
service: identity
|
|
- endpoint: internal
|
|
service: volume
|
|
volume:
|
|
jobs:
|
|
- cinder-db-sync
|
|
- cinder-ks-user
|
|
- cinder-ks-endpoints
|
|
- cinder-rabbit-init
|
|
- cinder-storage-init
|
|
services:
|
|
- endpoint: internal
|
|
service: identity
|
|
- endpoint: internal
|
|
service: volume
|
|
volume_usage_audit:
|
|
jobs:
|
|
- cinder-db-sync
|
|
- cinder-ks-user
|
|
- cinder-ks-endpoints
|
|
- cinder-rabbit-init
|
|
- cinder-storage-init
|
|
services:
|
|
- endpoint: internal
|
|
service: identity
|
|
- endpoint: internal
|
|
service: volume
|
|
image_repo_sync:
|
|
services:
|
|
- endpoint: internal
|
|
service: local_image_registry
|
|
|
|
# Names of secrets used by bootstrap and environmental checks
|
|
secrets:
|
|
identity:
|
|
admin: cinder-keystone-admin
|
|
cinder: cinder-keystone-user
|
|
test: cinder-keystone-test
|
|
oslo_db:
|
|
admin: cinder-db-admin
|
|
cinder: cinder-db-user
|
|
rbd:
|
|
backup: cinder-backup-rbd-keyring
|
|
volume: cinder-volume-rbd-keyring
|
|
oslo_messaging:
|
|
admin: cinder-rabbitmq-admin
|
|
cinder: cinder-rabbitmq-user
|
|
tls:
|
|
volume:
|
|
api:
|
|
public: cinder-tls-public
|
|
|
|
# We use a different layout of the endpoints here to account for versioning
|
|
# this swaps the service name and type, and should be rolled out to other
|
|
# services.
|
|
endpoints:
|
|
cluster_domain_suffix: cluster.local
|
|
local_image_registry:
|
|
name: docker-registry
|
|
namespace: docker-registry
|
|
hosts:
|
|
default: localhost
|
|
internal: docker-registry
|
|
node: localhost
|
|
host_fqdn_override:
|
|
default: null
|
|
port:
|
|
registry:
|
|
node: 5000
|
|
identity:
|
|
name: keystone
|
|
auth:
|
|
admin:
|
|
region_name: RegionOne
|
|
username: admin
|
|
password: password
|
|
project_name: admin
|
|
user_domain_name: default
|
|
project_domain_name: default
|
|
cinder:
|
|
role: admin
|
|
region_name: RegionOne
|
|
username: cinder
|
|
password: password
|
|
project_name: service
|
|
user_domain_name: service
|
|
project_domain_name: service
|
|
test:
|
|
role: admin
|
|
region_name: RegionOne
|
|
username: cinder-test
|
|
password: password
|
|
project_name: test
|
|
user_domain_name: service
|
|
project_domain_name: service
|
|
hosts:
|
|
default: keystone
|
|
internal: keystone-api
|
|
host_fqdn_override:
|
|
default: null
|
|
path:
|
|
default: /v3
|
|
scheme:
|
|
default: http
|
|
port:
|
|
api:
|
|
default: 80
|
|
internal: 5000
|
|
image:
|
|
name: glance
|
|
hosts:
|
|
default: glance-api
|
|
public: glance
|
|
host_fqdn_override:
|
|
default: null
|
|
path:
|
|
default: null
|
|
scheme:
|
|
default: http
|
|
port:
|
|
api:
|
|
default: 9292
|
|
public: 80
|
|
image_registry:
|
|
name: glance-registry
|
|
hosts:
|
|
default: glance-registry
|
|
public: glance-reg
|
|
host_fqdn_override:
|
|
default: null
|
|
path:
|
|
default: null
|
|
scheme:
|
|
default: 'http'
|
|
port:
|
|
api:
|
|
default: 9191
|
|
public: 80
|
|
volume:
|
|
name: cinder
|
|
hosts:
|
|
default: cinder-api
|
|
public: cinder
|
|
host_fqdn_override:
|
|
default: null
|
|
# NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
|
|
# endpoints using the following format:
|
|
# public:
|
|
# host: null
|
|
# tls:
|
|
# crt: null
|
|
# key: null
|
|
path:
|
|
default: '/v1/%(tenant_id)s'
|
|
scheme:
|
|
default: 'http'
|
|
port:
|
|
api:
|
|
default: 8776
|
|
public: 80
|
|
volumev2:
|
|
name: cinderv2
|
|
hosts:
|
|
default: cinder-api
|
|
public: cinder
|
|
host_fqdn_override:
|
|
default: null
|
|
# NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
|
|
# endpoints using the following format:
|
|
# public:
|
|
# host: null
|
|
# tls:
|
|
# crt: null
|
|
# key: null
|
|
path:
|
|
default: '/v2/%(tenant_id)s'
|
|
scheme:
|
|
default: 'http'
|
|
port:
|
|
api:
|
|
default: 8776
|
|
public: 80
|
|
volumev3:
|
|
name: cinderv3
|
|
hosts:
|
|
default: cinder-api
|
|
public: cinder
|
|
host_fqdn_override:
|
|
default: null
|
|
# NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
|
|
# endpoints using the following format:
|
|
# public:
|
|
# host: null
|
|
# tls:
|
|
# crt: null
|
|
# key: null
|
|
path:
|
|
default: '/v3/%(tenant_id)s'
|
|
scheme:
|
|
default: 'http'
|
|
port:
|
|
api:
|
|
default: 8776
|
|
public: 80
|
|
oslo_db:
|
|
auth:
|
|
admin:
|
|
username: root
|
|
password: password
|
|
cinder:
|
|
username: cinder
|
|
password: password
|
|
hosts:
|
|
default: mariadb
|
|
host_fqdn_override:
|
|
default: null
|
|
path: /cinder
|
|
scheme: mysql+pymysql
|
|
port:
|
|
mysql:
|
|
default: 3306
|
|
oslo_messaging:
|
|
auth:
|
|
admin:
|
|
username: rabbitmq
|
|
password: password
|
|
cinder:
|
|
username: cinder
|
|
password: password
|
|
hosts:
|
|
default: rabbitmq
|
|
host_fqdn_override:
|
|
default: null
|
|
path: /cinder
|
|
scheme: rabbit
|
|
port:
|
|
amqp:
|
|
default: 5672
|
|
http:
|
|
default: 15672
|
|
oslo_cache:
|
|
auth:
|
|
# NOTE(portdirect): this is used to define the value for keystone
|
|
# authtoken cache encryption key, if not set it will be populated
|
|
# automatically with a random value, but to take advantage of
|
|
# this feature all services should be set to use the same key,
|
|
# and memcache service.
|
|
memcache_secret_key: null
|
|
hosts:
|
|
default: memcached
|
|
host_fqdn_override:
|
|
default: null
|
|
port:
|
|
memcache:
|
|
default: 11211
|
|
fluentd:
|
|
namespace: null
|
|
name: fluentd
|
|
hosts:
|
|
default: fluentd-logging
|
|
host_fqdn_override:
|
|
default: null
|
|
path:
|
|
default: null
|
|
scheme: 'http'
|
|
port:
|
|
service:
|
|
default: 24224
|
|
metrics:
|
|
default: 24220
|
|
#NOTE(tp6510): these endpoints allow for things like DNS lookups and apiserver access.
|
|
# They are using to enable the Egress K8s network policy.
|
|
k8s:
|
|
port:
|
|
api:
|
|
default: 6443
|
|
internal: 5000
|
|
default:
|
|
namespace: default
|
|
kube_system:
|
|
namespace: kube-system
|
|
kube_public:
|
|
namespace: kube-public
|
|
|
|
network_policy:
|
|
cinder:
|
|
ingress:
|
|
- {}
|
|
egress:
|
|
- {}
|
|
|
|
manifests:
|
|
configmap_bin: true
|
|
configmap_etc: true
|
|
cron_volume_usage_audit: true
|
|
deployment_api: true
|
|
deployment_backup: true
|
|
deployment_scheduler: true
|
|
deployment_volume: true
|
|
ingress_api: true
|
|
job_backup_storage_init: true
|
|
job_bootstrap: true
|
|
job_clean: true
|
|
job_create_internal_tenant: true
|
|
job_db_init: true
|
|
job_image_repo_sync: true
|
|
job_rabbit_init: true
|
|
job_db_sync: true
|
|
job_db_drop: false
|
|
job_ks_endpoints: true
|
|
job_ks_service: true
|
|
job_ks_user: true
|
|
job_storage_init: true
|
|
pdb_api: true
|
|
pod_rally_test: true
|
|
pvc_backup: true
|
|
network_policy: false
|
|
secret_db: true
|
|
secret_ingress_tls: true
|
|
secret_keystone: true
|
|
secret_rabbitmq: true
|
|
service_api: true
|
|
service_ingress_api: true
|